index
int64 0
100k
| blob_id
stringlengths 40
40
| code
stringlengths 7
7.27M
| steps
listlengths 1
1.25k
| error
bool 2
classes |
---|---|---|---|---|
98,800 |
fda3d128195e858f1ed6a5996758624e2708f458
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('password', models.CharField(verbose_name='password', max_length=128)),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('email', models.EmailField(unique=True, max_length=75)),
('username', models.CharField(verbose_name='Username', unique=True, max_length=40)),
('first_name', models.CharField(verbose_name='First name', blank=True, max_length=40)),
('last_name', models.CharField(verbose_name='Last name', blank=True, max_length=40)),
('is_premium', models.BooleanField(default=False)),
('premium_expires', models.DateTimeField(null=True)),
('is_admin', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('title', models.CharField(unique=True, max_length=40)),
('parent', models.ForeignKey(blank=True, null=True, to='searchsystem.Category')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('adress', models.TextField()),
('id_google', models.CharField(max_length=255)),
('title', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PlaceCategory',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('category', models.ForeignKey(to='searchsystem.Category')),
('place', models.ForeignKey(to='searchsystem.Place')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('place', models.ForeignKey(to='searchsystem.Place')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='UserAdd',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('place_id', models.ForeignKey(to='searchsystem.Place')),
('user_id', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='place',
name='author',
field=models.ManyToManyField(through='searchsystem.UserAdd', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='place',
name='categories_places',
field=models.ManyToManyField(through='searchsystem.PlaceCategory', to='searchsystem.Category'),
preserve_default=True,
),
]
|
[
"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Account',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('password', models.CharField(verbose_name='password', max_length=128)),\n ('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),\n ('email', models.EmailField(unique=True, max_length=75)),\n ('username', models.CharField(verbose_name='Username', unique=True, max_length=40)),\n ('first_name', models.CharField(verbose_name='First name', blank=True, max_length=40)),\n ('last_name', models.CharField(verbose_name='Last name', blank=True, max_length=40)),\n ('is_premium', models.BooleanField(default=False)),\n ('premium_expires', models.DateTimeField(null=True)),\n ('is_admin', models.BooleanField(default=False)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('title', models.CharField(unique=True, max_length=40)),\n ('parent', models.ForeignKey(blank=True, null=True, to='searchsystem.Category')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Place',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('adress', models.TextField()),\n ('id_google', models.CharField(max_length=255)),\n ('title', models.CharField(max_length=255)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='PlaceCategory',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('category', models.ForeignKey(to='searchsystem.Category')),\n ('place', models.ForeignKey(to='searchsystem.Place')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Review',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('content', models.TextField()),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('place', models.ForeignKey(to='searchsystem.Place')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='UserAdd',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('place_id', models.ForeignKey(to='searchsystem.Place')),\n ('user_id', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='place',\n name='author',\n field=models.ManyToManyField(through='searchsystem.UserAdd', to=settings.AUTH_USER_MODEL),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='place',\n name='categories_places',\n field=models.ManyToManyField(through='searchsystem.PlaceCategory', to='searchsystem.Category'),\n preserve_default=True,\n ),\n ]\n",
"from __future__ import unicode_literals\nfrom django.db import models, migrations\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Account', fields=[('id',\n models.AutoField(auto_created=True, serialize=False, primary_key=\n True, verbose_name='ID')), ('password', models.CharField(\n verbose_name='password', max_length=128)), ('last_login', models.\n DateTimeField(default=django.utils.timezone.now, verbose_name=\n 'last login')), ('email', models.EmailField(unique=True, max_length\n =75)), ('username', models.CharField(verbose_name='Username',\n unique=True, max_length=40)), ('first_name', models.CharField(\n verbose_name='First name', blank=True, max_length=40)), (\n 'last_name', models.CharField(verbose_name='Last name', blank=True,\n max_length=40)), ('is_premium', models.BooleanField(default=False)),\n ('premium_expires', models.DateTimeField(null=True)), ('is_admin',\n models.BooleanField(default=False)), ('created_at', models.\n DateTimeField(auto_now_add=True)), ('updated_at', models.\n DateTimeField(auto_now=True))], options={'abstract': False}, bases=\n (models.Model,)), migrations.CreateModel(name='Category', fields=[(\n 'id', models.AutoField(auto_created=True, serialize=False,\n primary_key=True, verbose_name='ID')), ('title', models.CharField(\n unique=True, max_length=40)), ('parent', models.ForeignKey(blank=\n True, null=True, to='searchsystem.Category'))], options={}, bases=(\n models.Model,)), migrations.CreateModel(name='Place', fields=[('id',\n models.AutoField(auto_created=True, serialize=False, primary_key=\n True, verbose_name='ID')), ('adress', models.TextField()), (\n 'id_google', models.CharField(max_length=255)), ('title', models.\n CharField(max_length=255)), ('created_at', models.DateTimeField(\n auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=\n True))], options={}, bases=(models.Model,)), migrations.CreateModel\n (name='PlaceCategory', fields=[('id', models.AutoField(auto_created\n =True, serialize=False, primary_key=True, verbose_name='ID')), (\n 'category', models.ForeignKey(to='searchsystem.Category')), (\n 'place', models.ForeignKey(to='searchsystem.Place'))], options={},\n bases=(models.Model,)), migrations.CreateModel(name='Review',\n fields=[('id', models.AutoField(auto_created=True, serialize=False,\n primary_key=True, verbose_name='ID')), ('content', models.TextField\n ()), ('created_at', models.DateTimeField(auto_now_add=True)), (\n 'updated_at', models.DateTimeField(auto_now=True)), ('place',\n models.ForeignKey(to='searchsystem.Place'))], options={}, bases=(\n models.Model,)), migrations.CreateModel(name='UserAdd', fields=[(\n 'id', models.AutoField(auto_created=True, serialize=False,\n primary_key=True, verbose_name='ID')), ('place_id', models.\n ForeignKey(to='searchsystem.Place')), ('user_id', models.ForeignKey\n (to=settings.AUTH_USER_MODEL))], options={}, bases=(models.Model,)),\n migrations.AddField(model_name='place', name='author', field=models\n .ManyToManyField(through='searchsystem.UserAdd', to=settings.\n AUTH_USER_MODEL), preserve_default=True), migrations.AddField(\n model_name='place', name='categories_places', field=models.\n ManyToManyField(through='searchsystem.PlaceCategory', to=\n 'searchsystem.Category'), preserve_default=True)]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Account', fields=[('id',\n models.AutoField(auto_created=True, serialize=False, primary_key=\n True, verbose_name='ID')), ('password', models.CharField(\n verbose_name='password', max_length=128)), ('last_login', models.\n DateTimeField(default=django.utils.timezone.now, verbose_name=\n 'last login')), ('email', models.EmailField(unique=True, max_length\n =75)), ('username', models.CharField(verbose_name='Username',\n unique=True, max_length=40)), ('first_name', models.CharField(\n verbose_name='First name', blank=True, max_length=40)), (\n 'last_name', models.CharField(verbose_name='Last name', blank=True,\n max_length=40)), ('is_premium', models.BooleanField(default=False)),\n ('premium_expires', models.DateTimeField(null=True)), ('is_admin',\n models.BooleanField(default=False)), ('created_at', models.\n DateTimeField(auto_now_add=True)), ('updated_at', models.\n DateTimeField(auto_now=True))], options={'abstract': False}, bases=\n (models.Model,)), migrations.CreateModel(name='Category', fields=[(\n 'id', models.AutoField(auto_created=True, serialize=False,\n primary_key=True, verbose_name='ID')), ('title', models.CharField(\n unique=True, max_length=40)), ('parent', models.ForeignKey(blank=\n True, null=True, to='searchsystem.Category'))], options={}, bases=(\n models.Model,)), migrations.CreateModel(name='Place', fields=[('id',\n models.AutoField(auto_created=True, serialize=False, primary_key=\n True, verbose_name='ID')), ('adress', models.TextField()), (\n 'id_google', models.CharField(max_length=255)), ('title', models.\n CharField(max_length=255)), ('created_at', models.DateTimeField(\n auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=\n True))], options={}, bases=(models.Model,)), migrations.CreateModel\n (name='PlaceCategory', fields=[('id', models.AutoField(auto_created\n =True, serialize=False, primary_key=True, verbose_name='ID')), (\n 'category', models.ForeignKey(to='searchsystem.Category')), (\n 'place', models.ForeignKey(to='searchsystem.Place'))], options={},\n bases=(models.Model,)), migrations.CreateModel(name='Review',\n fields=[('id', models.AutoField(auto_created=True, serialize=False,\n primary_key=True, verbose_name='ID')), ('content', models.TextField\n ()), ('created_at', models.DateTimeField(auto_now_add=True)), (\n 'updated_at', models.DateTimeField(auto_now=True)), ('place',\n models.ForeignKey(to='searchsystem.Place'))], options={}, bases=(\n models.Model,)), migrations.CreateModel(name='UserAdd', fields=[(\n 'id', models.AutoField(auto_created=True, serialize=False,\n primary_key=True, verbose_name='ID')), ('place_id', models.\n ForeignKey(to='searchsystem.Place')), ('user_id', models.ForeignKey\n (to=settings.AUTH_USER_MODEL))], options={}, bases=(models.Model,)),\n migrations.AddField(model_name='place', name='author', field=models\n .ManyToManyField(through='searchsystem.UserAdd', to=settings.\n AUTH_USER_MODEL), preserve_default=True), migrations.AddField(\n model_name='place', name='categories_places', field=models.\n ManyToManyField(through='searchsystem.PlaceCategory', to=\n 'searchsystem.Category'), preserve_default=True)]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,801 |
594c923fabe64bb29a1a3aea25466cf370ff5377
|
import os
import math
import numpy as np
print(math.floor(2000.543531512354))
|
[
"import os\nimport math\nimport numpy as np\nprint(math.floor(2000.543531512354))",
"import os\nimport math\nimport numpy as np\nprint(math.floor(2000.543531512354))\n",
"<import token>\nprint(math.floor(2000.543531512354))\n",
"<import token>\n<code token>\n"
] | false |
98,802 |
941bf3737775f7a207d5447b0cf74c6d35003148
|
from automatic_plot_helper import load_settings
from automatic_plot_helper import load_top_isings
from automatic_plot_helper import load_top_isings_attr
from automatic_plot_helper import load_isings_from_list
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from os import makedirs, path
import pickle
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
import os
class SmallIsing:
def __init__(self, avg_energy, time_steps_gen):
self.avg_energy = avg_energy
self.time_steps_gen = time_steps_gen
self.norm_avg_energy = avg_energy / time_steps_gen
def all_plots(sim_name_b1_fix, sim_name_b10_fix, sim_name_b1_rand, sim_name_rand, only_top_isings=20,
load_previous=False):
save_folder = 'save/plots_for_anna/'
if not os.path.exists(save_folder):
os.makedirs(save_folder)
matplotlib.rcParams.update({'font.size': 30})
alpha = 0.3
s = 25
colour_b1 = 'darkorange'
colour_b10 = 'royalblue'
if not load_previous:
attrs_gen_b10_fix = load_ising_stuff(sim_name_b10_fix, only_top_isings)
attrs_gen_b1_fix = load_ising_stuff(sim_name_b1_fix, only_top_isings)
attrs_gen_b10_rand = load_ising_stuff(sim_name_b10_rand, only_top_isings)
attrs_gen_b1_rand = load_ising_stuff(sim_name_b1_rand, only_top_isings)
loaded_plot_attrs = {
'attrs_gen_b1_fix': attrs_gen_b1_fix,
'attrs_gen_b10_fix': attrs_gen_b10_fix,
'attrs_gen_b10_rand': attrs_gen_b10_rand,
'attrs_gen_b1_rand': attrs_gen_b1_rand
}
try:
pickle_out = open('{}loaded_plot_attrs.pickle'.format(save_folder), 'wb')
pickle.dump(loaded_plot_attrs, pickle_out)
pickle_out.close()
except Exception:
print('Could not save pickle file')
else:
file = open('{}/loaded_plot_attrs.pickle'.format(save_folder), 'rb')
loaded_plot_attrs = pickle.load(file)
file.close()
attrs_gen_b10_fix = loaded_plot_attrs['attrs_gen_b10_fix']
attrs_gen_b1_fix = loaded_plot_attrs['attrs_gen_b1_fix']
attrs_gen_b10_rand = loaded_plot_attrs['attrs_gen_b10_rand']
attrs_gen_b1_rand = loaded_plot_attrs['attrs_gen_b1_rand']
# Increasing scale
# attrs_gen_b10_fix = list(map(lambda x: x*1000, attrs_gen_b10_fix))
# attrs_gen_b1_fix = list(map(lambda x: x*1000, attrs_gen_b1_fix))
# attrs_gen_b10_rand = list(map(lambda x: x*1000, attrs_gen_b10_rand))
# attrs_gen_b1_rand = list(map(lambda x: x*1000, attrs_gen_b1_rand))
ylim = plot_generational_avg(attrs_gen_b10_fix, colour_b10, save_folder, 'fixed_time_steps_b10', alpha, s,
get_axis=True)
labels = plot_generational_avg(attrs_gen_b1_fix, colour_b1, save_folder, 'fixed_time_steps_b1', alpha, s, get_axis=False,
ylim=ylim, return_labels=True)
plot_generational_avg(attrs_gen_b10_rand, colour_b10, save_folder, 'random_time_steps_b10', alpha, s, get_axis=False,
ylim=ylim)
plot_generational_avg(attrs_gen_b1_rand, colour_b1, save_folder, 'random_time_steps_b1', alpha, s, get_axis=False,
ylim=ylim, set_labels=None)
plot_overlap(attrs_gen_b1_fix, attrs_gen_b10_fix, colour_b1, colour_b10, save_folder,
'Overlap_fixed_time_steps', alpha, s, ylim)
plot_overlap(attrs_gen_b1_rand, attrs_gen_b10_rand, colour_b1, colour_b10, save_folder,
'Overlap_random_time_steps', alpha, s, ylim)
def load_ising_stuff(sim_name, only_top_isings):
isings_avg_energy_list = load_top_isings_attr(sim_name, only_top_isings, 'avg_energy')
# Load this in order to have something to compute the number of time steps of current generation with
# TODO Always fit this to current data format... only in latest version time steps of current generation are saved as attributes in isings
#Getting number of time steps for each generation:
try:
# Get rid of double list (usually several individuals are in there but now only one is in there, which is why we can remove one nesting)
time_steps_first_ind = load_top_isings_attr(sim_name, 1, 'time_steps')
time_steps_each_gen = [time_steps[0] for time_steps in time_steps_first_ind]
except Exception:
energies_first_ind = load_top_isings_attr(sim_name, 1, 'energies')
energies_first_ind = [energies[0] for energies in energies_first_ind]
time_steps_each_gen = list(map(lambda x: len(x), energies_first_ind))
settings = load_settings(sim_name)
settings['pop_size'] = only_top_isings
small_isings_list = create_small_isings(isings_avg_energy_list, time_steps_each_gen)
mean_attrs_generational = create_generational_avg(small_isings_list, 'norm_avg_energy')
return mean_attrs_generational
def create_generational_avg(isings_list, attr_name):
mean_attrs_generational = []
for isings in isings_list:
attrs = []
for I in isings:
exec('attrs.append(I.{})'.format(attr_name))
mean_attrs_generational.append(np.mean(attrs))
return mean_attrs_generational
def plot_generational_avg(y_axis, colour, save_folder, add_save_name, alpha, s, get_axis=True, ylim=None,
return_labels=False, set_labels=None):
x_axis = np.arange(len(y_axis))
#matplotlib.use('GTK3Cairo')
plt.figure(figsize=(19, 10))
ax = plt.scatter(x_axis, y_axis, alpha=alpha, c=colour, s=s)
# Replace ticks with larger numbers
locs, labels = plt.yticks()
if set_labels is not None:
labels = set_labels
for label in labels[::2]:
label.set_visible(False)
legend_elements = [
Line2D([0], [0], marker='o', color='w', label='Critical', markerfacecolor='darkorange',
markersize=25, alpha=0.75),
Line2D([0], [0], marker='o', color='w', label='Sub-critical', markerfacecolor='royalblue',
markersize=25, alpha=0.75)
]
plt.legend(loc="lower right", bbox_to_anchor=(0.95, 0.05), handles=legend_elements)
plt.xlabel('Generation')
plt.ylabel('Performance')
#plt.yticks([])
if get_axis:
ylim = plt.ylim()
else:
plt.ylim(ylim)
if not path.exists(save_folder):
makedirs(save_folder)
save_name = '{}.png'.format(add_save_name)
plt.savefig(save_folder + save_name, dpi=300) #bbox_inches='tight'
plt.show()
if get_axis:
return ylim
if return_labels:
return labels
def plot_overlap(y_axis_b1, y_axis_b10, colour_b1, colour_b10, save_folder, add_save_name, alpha, s, ylim):
x_axis_b1 = np.arange(len(y_axis_b1))
x_axis_b10 = np.arange(len(y_axis_b10))
plt.figure(figsize=(19, 10))
plt.scatter(x_axis_b1, y_axis_b1, alpha=alpha, c=colour_b1, s=s)
plot1 = plt.scatter(x_axis_b10, y_axis_b10, alpha=alpha, c=colour_b10, s=s)
plt.ylim(ylim)
locs, labels = plt.yticks()
for label in labels[::2]:
label.set_visible(False)
plt.xlabel('Generation')
plt.ylabel('Performance')
#plt.yticks([])
legend_elements = [
Line2D([0], [0], marker='o', color='w', label='Critical', markerfacecolor=colour_b1,
markersize=25, alpha=0.75),
Line2D([0], [0], marker='o', color='w', label='Sub-critical', markerfacecolor=colour_b10,
markersize=25, alpha=0.75)
]
plt.legend(loc="lower right", bbox_to_anchor=(0.95, 0.05), handles=legend_elements)
plt.savefig(save_folder+add_save_name, dpi=300)
plt.show()
def create_small_isings(isings_avg_energy_list, time_steps_each_gen):
small_isings_list = []
for avg_energies, time_steps_gen in zip(isings_avg_energy_list, time_steps_each_gen):
small_isings = []
for avg_energy in avg_energies:
I_small = SmallIsing(avg_energy, time_steps_gen)
small_isings.append(I_small)
small_isings_list.append(small_isings)
return small_isings_list
if __name__ == '__main__':
# sim_name_b10_fix = 'sim-20200604-235433-g_2000_-t_2000_-b_10_-dream_c_0_-nat_c_0_-ref_0_-rec_c_0_-n_energies_velocities_saved'
# sim_name_b1_fix = 'sim-20200604-235424-g_2000_-t_2000_-b_1_-dream_c_0_-nat_c_0_-ref_0_-rec_c_0_-n_energies_velocities_saved'
# # sim_name_b10_rand = 'sim-20200621-130735-g_2001_-ref_0_-noplt_-b_10_-dream_c_500_-c_4_-a_1990_1999_--nomutb_-n_random_time_steps_save_energies_nomutb' #'sim-20200619-173340-g_2001_-ref_0_-noplt_-b_10_-dream_c_500_-c_4_-a_1995_1996_1997_1998_1999_-n_random_time_steps_save_energies_4'
# # sim_name_b1_rand = 'sim-20200619-173349-g_2001_-ref_0_-noplt_-b_1_-dream_c_500_-c_4_-a_1995_1996_1997_1998_1999_-n_random_time_steps_save_energies_4'
# sim_name_b10_rand = 'sim-20200702-113213-g_10000_-rand_ts_-rand_ts_lim_100_8000_-b_10_-noplt_-n_huge_random_ts_run_ts_saved'
# sim_name_b1_rand = 'sim-20200702-113206-g_10000_-rand_ts_-rand_ts_lim_100_8000_-b_1_-noplt_-n_huge_random_ts_run_ts_saved'
sim_name_b1_fix = 'sim-20200715-151540-g_4000_-t_2000_-b_1_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'
sim_name_b10_fix = 'sim-20200715-151426-g_4000_-t_2000_-b_10_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'
sim_name_b1_rand = 'sim-20200715-151519-g_4000_-rand_ts_-b_1_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'
sim_name_b10_rand = 'sim-20200715-151458-g_4000_-rand_ts_-b_10_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'
#pre_folder = 'Energies_Velocities_saved_during_2d_sim_random_time_steps_cut_off_animations/'
pre_folder = ''
# sim_name_b10_rand = pre_folder + sim_name_b10_rand
# sim_name_b1_rand = pre_folder + sim_name_b1_rand
sim_name_b10_fix = pre_folder + sim_name_b10_fix
sim_name_b1_fix = pre_folder + sim_name_b1_fix
all_plots(sim_name_b1_fix, sim_name_b10_fix, sim_name_b1_rand, sim_name_b10_rand)
|
[
"from automatic_plot_helper import load_settings\nfrom automatic_plot_helper import load_top_isings\nfrom automatic_plot_helper import load_top_isings_attr\nfrom automatic_plot_helper import load_isings_from_list\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom os import makedirs, path\nimport pickle\nfrom matplotlib.patches import Patch\nfrom matplotlib.lines import Line2D\nimport os\n\nclass SmallIsing:\n def __init__(self, avg_energy, time_steps_gen):\n self.avg_energy = avg_energy\n self.time_steps_gen = time_steps_gen\n self.norm_avg_energy = avg_energy / time_steps_gen\n\n\ndef all_plots(sim_name_b1_fix, sim_name_b10_fix, sim_name_b1_rand, sim_name_rand, only_top_isings=20,\n load_previous=False):\n\n\n save_folder = 'save/plots_for_anna/'\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n matplotlib.rcParams.update({'font.size': 30})\n alpha = 0.3\n s = 25\n colour_b1 = 'darkorange'\n colour_b10 = 'royalblue'\n\n if not load_previous:\n attrs_gen_b10_fix = load_ising_stuff(sim_name_b10_fix, only_top_isings)\n attrs_gen_b1_fix = load_ising_stuff(sim_name_b1_fix, only_top_isings)\n attrs_gen_b10_rand = load_ising_stuff(sim_name_b10_rand, only_top_isings)\n attrs_gen_b1_rand = load_ising_stuff(sim_name_b1_rand, only_top_isings)\n\n loaded_plot_attrs = {\n 'attrs_gen_b1_fix': attrs_gen_b1_fix,\n 'attrs_gen_b10_fix': attrs_gen_b10_fix,\n 'attrs_gen_b10_rand': attrs_gen_b10_rand,\n 'attrs_gen_b1_rand': attrs_gen_b1_rand\n }\n try:\n pickle_out = open('{}loaded_plot_attrs.pickle'.format(save_folder), 'wb')\n pickle.dump(loaded_plot_attrs, pickle_out)\n pickle_out.close()\n except Exception:\n print('Could not save pickle file')\n\n else:\n\n\n\n file = open('{}/loaded_plot_attrs.pickle'.format(save_folder), 'rb')\n loaded_plot_attrs = pickle.load(file)\n file.close()\n\n attrs_gen_b10_fix = loaded_plot_attrs['attrs_gen_b10_fix']\n attrs_gen_b1_fix = loaded_plot_attrs['attrs_gen_b1_fix']\n attrs_gen_b10_rand = loaded_plot_attrs['attrs_gen_b10_rand']\n attrs_gen_b1_rand = loaded_plot_attrs['attrs_gen_b1_rand']\n\n # Increasing scale\n # attrs_gen_b10_fix = list(map(lambda x: x*1000, attrs_gen_b10_fix))\n # attrs_gen_b1_fix = list(map(lambda x: x*1000, attrs_gen_b1_fix))\n # attrs_gen_b10_rand = list(map(lambda x: x*1000, attrs_gen_b10_rand))\n # attrs_gen_b1_rand = list(map(lambda x: x*1000, attrs_gen_b1_rand))\n\n ylim = plot_generational_avg(attrs_gen_b10_fix, colour_b10, save_folder, 'fixed_time_steps_b10', alpha, s,\n get_axis=True)\n labels = plot_generational_avg(attrs_gen_b1_fix, colour_b1, save_folder, 'fixed_time_steps_b1', alpha, s, get_axis=False,\n ylim=ylim, return_labels=True)\n plot_generational_avg(attrs_gen_b10_rand, colour_b10, save_folder, 'random_time_steps_b10', alpha, s, get_axis=False,\n ylim=ylim)\n plot_generational_avg(attrs_gen_b1_rand, colour_b1, save_folder, 'random_time_steps_b1', alpha, s, get_axis=False,\n ylim=ylim, set_labels=None)\n\n plot_overlap(attrs_gen_b1_fix, attrs_gen_b10_fix, colour_b1, colour_b10, save_folder,\n 'Overlap_fixed_time_steps', alpha, s, ylim)\n plot_overlap(attrs_gen_b1_rand, attrs_gen_b10_rand, colour_b1, colour_b10, save_folder,\n 'Overlap_random_time_steps', alpha, s, ylim)\n\n\n\ndef load_ising_stuff(sim_name, only_top_isings):\n isings_avg_energy_list = load_top_isings_attr(sim_name, only_top_isings, 'avg_energy')\n\n # Load this in order to have something to compute the number of time steps of current generation with\n\n # TODO Always fit this to current data format... only in latest version time steps of current generation are saved as attributes in isings\n #Getting number of time steps for each generation:\n try:\n # Get rid of double list (usually several individuals are in there but now only one is in there, which is why we can remove one nesting)\n time_steps_first_ind = load_top_isings_attr(sim_name, 1, 'time_steps')\n time_steps_each_gen = [time_steps[0] for time_steps in time_steps_first_ind]\n except Exception:\n energies_first_ind = load_top_isings_attr(sim_name, 1, 'energies')\n energies_first_ind = [energies[0] for energies in energies_first_ind]\n time_steps_each_gen = list(map(lambda x: len(x), energies_first_ind))\n\n\n settings = load_settings(sim_name)\n settings['pop_size'] = only_top_isings\n small_isings_list = create_small_isings(isings_avg_energy_list, time_steps_each_gen)\n mean_attrs_generational = create_generational_avg(small_isings_list, 'norm_avg_energy')\n return mean_attrs_generational\n\n\n\ndef create_generational_avg(isings_list, attr_name):\n mean_attrs_generational = []\n for isings in isings_list:\n attrs = []\n for I in isings:\n exec('attrs.append(I.{})'.format(attr_name))\n mean_attrs_generational.append(np.mean(attrs))\n return mean_attrs_generational\n\n\ndef plot_generational_avg(y_axis, colour, save_folder, add_save_name, alpha, s, get_axis=True, ylim=None,\n return_labels=False, set_labels=None):\n x_axis = np.arange(len(y_axis))\n #matplotlib.use('GTK3Cairo')\n plt.figure(figsize=(19, 10))\n ax = plt.scatter(x_axis, y_axis, alpha=alpha, c=colour, s=s)\n\n # Replace ticks with larger numbers\n locs, labels = plt.yticks()\n if set_labels is not None:\n labels = set_labels\n\n for label in labels[::2]:\n label.set_visible(False)\n\n legend_elements = [\n Line2D([0], [0], marker='o', color='w', label='Critical', markerfacecolor='darkorange',\n markersize=25, alpha=0.75),\n Line2D([0], [0], marker='o', color='w', label='Sub-critical', markerfacecolor='royalblue',\n markersize=25, alpha=0.75)\n ]\n\n plt.legend(loc=\"lower right\", bbox_to_anchor=(0.95, 0.05), handles=legend_elements)\n\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n #plt.yticks([])\n if get_axis:\n ylim = plt.ylim()\n else:\n plt.ylim(ylim)\n\n if not path.exists(save_folder):\n makedirs(save_folder)\n save_name = '{}.png'.format(add_save_name)\n\n plt.savefig(save_folder + save_name, dpi=300) #bbox_inches='tight'\n plt.show()\n if get_axis:\n return ylim\n if return_labels:\n return labels\n\ndef plot_overlap(y_axis_b1, y_axis_b10, colour_b1, colour_b10, save_folder, add_save_name, alpha, s, ylim):\n x_axis_b1 = np.arange(len(y_axis_b1))\n x_axis_b10 = np.arange(len(y_axis_b10))\n plt.figure(figsize=(19, 10))\n plt.scatter(x_axis_b1, y_axis_b1, alpha=alpha, c=colour_b1, s=s)\n plot1 = plt.scatter(x_axis_b10, y_axis_b10, alpha=alpha, c=colour_b10, s=s)\n plt.ylim(ylim)\n locs, labels = plt.yticks()\n for label in labels[::2]:\n label.set_visible(False)\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n #plt.yticks([])\n legend_elements = [\n Line2D([0], [0], marker='o', color='w', label='Critical', markerfacecolor=colour_b1,\n markersize=25, alpha=0.75),\n Line2D([0], [0], marker='o', color='w', label='Sub-critical', markerfacecolor=colour_b10,\n markersize=25, alpha=0.75)\n ]\n\n plt.legend(loc=\"lower right\", bbox_to_anchor=(0.95, 0.05), handles=legend_elements)\n plt.savefig(save_folder+add_save_name, dpi=300)\n plt.show()\n\n\ndef create_small_isings(isings_avg_energy_list, time_steps_each_gen):\n small_isings_list = []\n for avg_energies, time_steps_gen in zip(isings_avg_energy_list, time_steps_each_gen):\n small_isings = []\n for avg_energy in avg_energies:\n I_small = SmallIsing(avg_energy, time_steps_gen)\n small_isings.append(I_small)\n small_isings_list.append(small_isings)\n return small_isings_list\n\n\nif __name__ == '__main__':\n # sim_name_b10_fix = 'sim-20200604-235433-g_2000_-t_2000_-b_10_-dream_c_0_-nat_c_0_-ref_0_-rec_c_0_-n_energies_velocities_saved'\n # sim_name_b1_fix = 'sim-20200604-235424-g_2000_-t_2000_-b_1_-dream_c_0_-nat_c_0_-ref_0_-rec_c_0_-n_energies_velocities_saved'\n # # sim_name_b10_rand = 'sim-20200621-130735-g_2001_-ref_0_-noplt_-b_10_-dream_c_500_-c_4_-a_1990_1999_--nomutb_-n_random_time_steps_save_energies_nomutb' #'sim-20200619-173340-g_2001_-ref_0_-noplt_-b_10_-dream_c_500_-c_4_-a_1995_1996_1997_1998_1999_-n_random_time_steps_save_energies_4'\n # # sim_name_b1_rand = 'sim-20200619-173349-g_2001_-ref_0_-noplt_-b_1_-dream_c_500_-c_4_-a_1995_1996_1997_1998_1999_-n_random_time_steps_save_energies_4'\n # sim_name_b10_rand = 'sim-20200702-113213-g_10000_-rand_ts_-rand_ts_lim_100_8000_-b_10_-noplt_-n_huge_random_ts_run_ts_saved'\n # sim_name_b1_rand = 'sim-20200702-113206-g_10000_-rand_ts_-rand_ts_lim_100_8000_-b_1_-noplt_-n_huge_random_ts_run_ts_saved'\n\n sim_name_b1_fix = 'sim-20200715-151540-g_4000_-t_2000_-b_1_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'\n sim_name_b10_fix = 'sim-20200715-151426-g_4000_-t_2000_-b_10_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'\n sim_name_b1_rand = 'sim-20200715-151519-g_4000_-rand_ts_-b_1_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'\n sim_name_b10_rand = 'sim-20200715-151458-g_4000_-rand_ts_-b_10_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'\n\n #pre_folder = 'Energies_Velocities_saved_during_2d_sim_random_time_steps_cut_off_animations/'\n pre_folder = ''\n\n # sim_name_b10_rand = pre_folder + sim_name_b10_rand\n # sim_name_b1_rand = pre_folder + sim_name_b1_rand\n sim_name_b10_fix = pre_folder + sim_name_b10_fix\n sim_name_b1_fix = pre_folder + sim_name_b1_fix\n all_plots(sim_name_b1_fix, sim_name_b10_fix, sim_name_b1_rand, sim_name_b10_rand)\n",
"from automatic_plot_helper import load_settings\nfrom automatic_plot_helper import load_top_isings\nfrom automatic_plot_helper import load_top_isings_attr\nfrom automatic_plot_helper import load_isings_from_list\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom os import makedirs, path\nimport pickle\nfrom matplotlib.patches import Patch\nfrom matplotlib.lines import Line2D\nimport os\n\n\nclass SmallIsing:\n\n def __init__(self, avg_energy, time_steps_gen):\n self.avg_energy = avg_energy\n self.time_steps_gen = time_steps_gen\n self.norm_avg_energy = avg_energy / time_steps_gen\n\n\ndef all_plots(sim_name_b1_fix, sim_name_b10_fix, sim_name_b1_rand,\n sim_name_rand, only_top_isings=20, load_previous=False):\n save_folder = 'save/plots_for_anna/'\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n matplotlib.rcParams.update({'font.size': 30})\n alpha = 0.3\n s = 25\n colour_b1 = 'darkorange'\n colour_b10 = 'royalblue'\n if not load_previous:\n attrs_gen_b10_fix = load_ising_stuff(sim_name_b10_fix, only_top_isings)\n attrs_gen_b1_fix = load_ising_stuff(sim_name_b1_fix, only_top_isings)\n attrs_gen_b10_rand = load_ising_stuff(sim_name_b10_rand,\n only_top_isings)\n attrs_gen_b1_rand = load_ising_stuff(sim_name_b1_rand, only_top_isings)\n loaded_plot_attrs = {'attrs_gen_b1_fix': attrs_gen_b1_fix,\n 'attrs_gen_b10_fix': attrs_gen_b10_fix, 'attrs_gen_b10_rand':\n attrs_gen_b10_rand, 'attrs_gen_b1_rand': attrs_gen_b1_rand}\n try:\n pickle_out = open('{}loaded_plot_attrs.pickle'.format(\n save_folder), 'wb')\n pickle.dump(loaded_plot_attrs, pickle_out)\n pickle_out.close()\n except Exception:\n print('Could not save pickle file')\n else:\n file = open('{}/loaded_plot_attrs.pickle'.format(save_folder), 'rb')\n loaded_plot_attrs = pickle.load(file)\n file.close()\n attrs_gen_b10_fix = loaded_plot_attrs['attrs_gen_b10_fix']\n attrs_gen_b1_fix = loaded_plot_attrs['attrs_gen_b1_fix']\n attrs_gen_b10_rand = loaded_plot_attrs['attrs_gen_b10_rand']\n attrs_gen_b1_rand = loaded_plot_attrs['attrs_gen_b1_rand']\n ylim = plot_generational_avg(attrs_gen_b10_fix, colour_b10, save_folder,\n 'fixed_time_steps_b10', alpha, s, get_axis=True)\n labels = plot_generational_avg(attrs_gen_b1_fix, colour_b1, save_folder,\n 'fixed_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n return_labels=True)\n plot_generational_avg(attrs_gen_b10_rand, colour_b10, save_folder,\n 'random_time_steps_b10', alpha, s, get_axis=False, ylim=ylim)\n plot_generational_avg(attrs_gen_b1_rand, colour_b1, save_folder,\n 'random_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n set_labels=None)\n plot_overlap(attrs_gen_b1_fix, attrs_gen_b10_fix, colour_b1, colour_b10,\n save_folder, 'Overlap_fixed_time_steps', alpha, s, ylim)\n plot_overlap(attrs_gen_b1_rand, attrs_gen_b10_rand, colour_b1,\n colour_b10, save_folder, 'Overlap_random_time_steps', alpha, s, ylim)\n\n\ndef load_ising_stuff(sim_name, only_top_isings):\n isings_avg_energy_list = load_top_isings_attr(sim_name, only_top_isings,\n 'avg_energy')\n try:\n time_steps_first_ind = load_top_isings_attr(sim_name, 1, 'time_steps')\n time_steps_each_gen = [time_steps[0] for time_steps in\n time_steps_first_ind]\n except Exception:\n energies_first_ind = load_top_isings_attr(sim_name, 1, 'energies')\n energies_first_ind = [energies[0] for energies in energies_first_ind]\n time_steps_each_gen = list(map(lambda x: len(x), energies_first_ind))\n settings = load_settings(sim_name)\n settings['pop_size'] = only_top_isings\n small_isings_list = create_small_isings(isings_avg_energy_list,\n time_steps_each_gen)\n mean_attrs_generational = create_generational_avg(small_isings_list,\n 'norm_avg_energy')\n return mean_attrs_generational\n\n\ndef create_generational_avg(isings_list, attr_name):\n mean_attrs_generational = []\n for isings in isings_list:\n attrs = []\n for I in isings:\n exec('attrs.append(I.{})'.format(attr_name))\n mean_attrs_generational.append(np.mean(attrs))\n return mean_attrs_generational\n\n\ndef plot_generational_avg(y_axis, colour, save_folder, add_save_name, alpha,\n s, get_axis=True, ylim=None, return_labels=False, set_labels=None):\n x_axis = np.arange(len(y_axis))\n plt.figure(figsize=(19, 10))\n ax = plt.scatter(x_axis, y_axis, alpha=alpha, c=colour, s=s)\n locs, labels = plt.yticks()\n if set_labels is not None:\n labels = set_labels\n for label in labels[::2]:\n label.set_visible(False)\n legend_elements = [Line2D([0], [0], marker='o', color='w', label=\n 'Critical', markerfacecolor='darkorange', markersize=25, alpha=0.75\n ), Line2D([0], [0], marker='o', color='w', label='Sub-critical',\n markerfacecolor='royalblue', markersize=25, alpha=0.75)]\n plt.legend(loc='lower right', bbox_to_anchor=(0.95, 0.05), handles=\n legend_elements)\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n if get_axis:\n ylim = plt.ylim()\n else:\n plt.ylim(ylim)\n if not path.exists(save_folder):\n makedirs(save_folder)\n save_name = '{}.png'.format(add_save_name)\n plt.savefig(save_folder + save_name, dpi=300)\n plt.show()\n if get_axis:\n return ylim\n if return_labels:\n return labels\n\n\ndef plot_overlap(y_axis_b1, y_axis_b10, colour_b1, colour_b10, save_folder,\n add_save_name, alpha, s, ylim):\n x_axis_b1 = np.arange(len(y_axis_b1))\n x_axis_b10 = np.arange(len(y_axis_b10))\n plt.figure(figsize=(19, 10))\n plt.scatter(x_axis_b1, y_axis_b1, alpha=alpha, c=colour_b1, s=s)\n plot1 = plt.scatter(x_axis_b10, y_axis_b10, alpha=alpha, c=colour_b10, s=s)\n plt.ylim(ylim)\n locs, labels = plt.yticks()\n for label in labels[::2]:\n label.set_visible(False)\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n legend_elements = [Line2D([0], [0], marker='o', color='w', label=\n 'Critical', markerfacecolor=colour_b1, markersize=25, alpha=0.75),\n Line2D([0], [0], marker='o', color='w', label='Sub-critical',\n markerfacecolor=colour_b10, markersize=25, alpha=0.75)]\n plt.legend(loc='lower right', bbox_to_anchor=(0.95, 0.05), handles=\n legend_elements)\n plt.savefig(save_folder + add_save_name, dpi=300)\n plt.show()\n\n\ndef create_small_isings(isings_avg_energy_list, time_steps_each_gen):\n small_isings_list = []\n for avg_energies, time_steps_gen in zip(isings_avg_energy_list,\n time_steps_each_gen):\n small_isings = []\n for avg_energy in avg_energies:\n I_small = SmallIsing(avg_energy, time_steps_gen)\n small_isings.append(I_small)\n small_isings_list.append(small_isings)\n return small_isings_list\n\n\nif __name__ == '__main__':\n sim_name_b1_fix = (\n 'sim-20200715-151540-g_4000_-t_2000_-b_1_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'\n )\n sim_name_b10_fix = (\n 'sim-20200715-151426-g_4000_-t_2000_-b_10_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'\n )\n sim_name_b1_rand = (\n 'sim-20200715-151519-g_4000_-rand_ts_-b_1_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'\n )\n sim_name_b10_rand = (\n 'sim-20200715-151458-g_4000_-rand_ts_-b_10_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'\n )\n pre_folder = ''\n sim_name_b10_fix = pre_folder + sim_name_b10_fix\n sim_name_b1_fix = pre_folder + sim_name_b1_fix\n all_plots(sim_name_b1_fix, sim_name_b10_fix, sim_name_b1_rand,\n sim_name_b10_rand)\n",
"<import token>\n\n\nclass SmallIsing:\n\n def __init__(self, avg_energy, time_steps_gen):\n self.avg_energy = avg_energy\n self.time_steps_gen = time_steps_gen\n self.norm_avg_energy = avg_energy / time_steps_gen\n\n\ndef all_plots(sim_name_b1_fix, sim_name_b10_fix, sim_name_b1_rand,\n sim_name_rand, only_top_isings=20, load_previous=False):\n save_folder = 'save/plots_for_anna/'\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n matplotlib.rcParams.update({'font.size': 30})\n alpha = 0.3\n s = 25\n colour_b1 = 'darkorange'\n colour_b10 = 'royalblue'\n if not load_previous:\n attrs_gen_b10_fix = load_ising_stuff(sim_name_b10_fix, only_top_isings)\n attrs_gen_b1_fix = load_ising_stuff(sim_name_b1_fix, only_top_isings)\n attrs_gen_b10_rand = load_ising_stuff(sim_name_b10_rand,\n only_top_isings)\n attrs_gen_b1_rand = load_ising_stuff(sim_name_b1_rand, only_top_isings)\n loaded_plot_attrs = {'attrs_gen_b1_fix': attrs_gen_b1_fix,\n 'attrs_gen_b10_fix': attrs_gen_b10_fix, 'attrs_gen_b10_rand':\n attrs_gen_b10_rand, 'attrs_gen_b1_rand': attrs_gen_b1_rand}\n try:\n pickle_out = open('{}loaded_plot_attrs.pickle'.format(\n save_folder), 'wb')\n pickle.dump(loaded_plot_attrs, pickle_out)\n pickle_out.close()\n except Exception:\n print('Could not save pickle file')\n else:\n file = open('{}/loaded_plot_attrs.pickle'.format(save_folder), 'rb')\n loaded_plot_attrs = pickle.load(file)\n file.close()\n attrs_gen_b10_fix = loaded_plot_attrs['attrs_gen_b10_fix']\n attrs_gen_b1_fix = loaded_plot_attrs['attrs_gen_b1_fix']\n attrs_gen_b10_rand = loaded_plot_attrs['attrs_gen_b10_rand']\n attrs_gen_b1_rand = loaded_plot_attrs['attrs_gen_b1_rand']\n ylim = plot_generational_avg(attrs_gen_b10_fix, colour_b10, save_folder,\n 'fixed_time_steps_b10', alpha, s, get_axis=True)\n labels = plot_generational_avg(attrs_gen_b1_fix, colour_b1, save_folder,\n 'fixed_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n return_labels=True)\n plot_generational_avg(attrs_gen_b10_rand, colour_b10, save_folder,\n 'random_time_steps_b10', alpha, s, get_axis=False, ylim=ylim)\n plot_generational_avg(attrs_gen_b1_rand, colour_b1, save_folder,\n 'random_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n set_labels=None)\n plot_overlap(attrs_gen_b1_fix, attrs_gen_b10_fix, colour_b1, colour_b10,\n save_folder, 'Overlap_fixed_time_steps', alpha, s, ylim)\n plot_overlap(attrs_gen_b1_rand, attrs_gen_b10_rand, colour_b1,\n colour_b10, save_folder, 'Overlap_random_time_steps', alpha, s, ylim)\n\n\ndef load_ising_stuff(sim_name, only_top_isings):\n isings_avg_energy_list = load_top_isings_attr(sim_name, only_top_isings,\n 'avg_energy')\n try:\n time_steps_first_ind = load_top_isings_attr(sim_name, 1, 'time_steps')\n time_steps_each_gen = [time_steps[0] for time_steps in\n time_steps_first_ind]\n except Exception:\n energies_first_ind = load_top_isings_attr(sim_name, 1, 'energies')\n energies_first_ind = [energies[0] for energies in energies_first_ind]\n time_steps_each_gen = list(map(lambda x: len(x), energies_first_ind))\n settings = load_settings(sim_name)\n settings['pop_size'] = only_top_isings\n small_isings_list = create_small_isings(isings_avg_energy_list,\n time_steps_each_gen)\n mean_attrs_generational = create_generational_avg(small_isings_list,\n 'norm_avg_energy')\n return mean_attrs_generational\n\n\ndef create_generational_avg(isings_list, attr_name):\n mean_attrs_generational = []\n for isings in isings_list:\n attrs = []\n for I in isings:\n exec('attrs.append(I.{})'.format(attr_name))\n mean_attrs_generational.append(np.mean(attrs))\n return mean_attrs_generational\n\n\ndef plot_generational_avg(y_axis, colour, save_folder, add_save_name, alpha,\n s, get_axis=True, ylim=None, return_labels=False, set_labels=None):\n x_axis = np.arange(len(y_axis))\n plt.figure(figsize=(19, 10))\n ax = plt.scatter(x_axis, y_axis, alpha=alpha, c=colour, s=s)\n locs, labels = plt.yticks()\n if set_labels is not None:\n labels = set_labels\n for label in labels[::2]:\n label.set_visible(False)\n legend_elements = [Line2D([0], [0], marker='o', color='w', label=\n 'Critical', markerfacecolor='darkorange', markersize=25, alpha=0.75\n ), Line2D([0], [0], marker='o', color='w', label='Sub-critical',\n markerfacecolor='royalblue', markersize=25, alpha=0.75)]\n plt.legend(loc='lower right', bbox_to_anchor=(0.95, 0.05), handles=\n legend_elements)\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n if get_axis:\n ylim = plt.ylim()\n else:\n plt.ylim(ylim)\n if not path.exists(save_folder):\n makedirs(save_folder)\n save_name = '{}.png'.format(add_save_name)\n plt.savefig(save_folder + save_name, dpi=300)\n plt.show()\n if get_axis:\n return ylim\n if return_labels:\n return labels\n\n\ndef plot_overlap(y_axis_b1, y_axis_b10, colour_b1, colour_b10, save_folder,\n add_save_name, alpha, s, ylim):\n x_axis_b1 = np.arange(len(y_axis_b1))\n x_axis_b10 = np.arange(len(y_axis_b10))\n plt.figure(figsize=(19, 10))\n plt.scatter(x_axis_b1, y_axis_b1, alpha=alpha, c=colour_b1, s=s)\n plot1 = plt.scatter(x_axis_b10, y_axis_b10, alpha=alpha, c=colour_b10, s=s)\n plt.ylim(ylim)\n locs, labels = plt.yticks()\n for label in labels[::2]:\n label.set_visible(False)\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n legend_elements = [Line2D([0], [0], marker='o', color='w', label=\n 'Critical', markerfacecolor=colour_b1, markersize=25, alpha=0.75),\n Line2D([0], [0], marker='o', color='w', label='Sub-critical',\n markerfacecolor=colour_b10, markersize=25, alpha=0.75)]\n plt.legend(loc='lower right', bbox_to_anchor=(0.95, 0.05), handles=\n legend_elements)\n plt.savefig(save_folder + add_save_name, dpi=300)\n plt.show()\n\n\ndef create_small_isings(isings_avg_energy_list, time_steps_each_gen):\n small_isings_list = []\n for avg_energies, time_steps_gen in zip(isings_avg_energy_list,\n time_steps_each_gen):\n small_isings = []\n for avg_energy in avg_energies:\n I_small = SmallIsing(avg_energy, time_steps_gen)\n small_isings.append(I_small)\n small_isings_list.append(small_isings)\n return small_isings_list\n\n\nif __name__ == '__main__':\n sim_name_b1_fix = (\n 'sim-20200715-151540-g_4000_-t_2000_-b_1_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'\n )\n sim_name_b10_fix = (\n 'sim-20200715-151426-g_4000_-t_2000_-b_10_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'\n )\n sim_name_b1_rand = (\n 'sim-20200715-151519-g_4000_-rand_ts_-b_1_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'\n )\n sim_name_b10_rand = (\n 'sim-20200715-151458-g_4000_-rand_ts_-b_10_-ref_500_-rec_c_500_-n_beta_uniform_mutations_added_normal_run'\n )\n pre_folder = ''\n sim_name_b10_fix = pre_folder + sim_name_b10_fix\n sim_name_b1_fix = pre_folder + sim_name_b1_fix\n all_plots(sim_name_b1_fix, sim_name_b10_fix, sim_name_b1_rand,\n sim_name_b10_rand)\n",
"<import token>\n\n\nclass SmallIsing:\n\n def __init__(self, avg_energy, time_steps_gen):\n self.avg_energy = avg_energy\n self.time_steps_gen = time_steps_gen\n self.norm_avg_energy = avg_energy / time_steps_gen\n\n\ndef all_plots(sim_name_b1_fix, sim_name_b10_fix, sim_name_b1_rand,\n sim_name_rand, only_top_isings=20, load_previous=False):\n save_folder = 'save/plots_for_anna/'\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n matplotlib.rcParams.update({'font.size': 30})\n alpha = 0.3\n s = 25\n colour_b1 = 'darkorange'\n colour_b10 = 'royalblue'\n if not load_previous:\n attrs_gen_b10_fix = load_ising_stuff(sim_name_b10_fix, only_top_isings)\n attrs_gen_b1_fix = load_ising_stuff(sim_name_b1_fix, only_top_isings)\n attrs_gen_b10_rand = load_ising_stuff(sim_name_b10_rand,\n only_top_isings)\n attrs_gen_b1_rand = load_ising_stuff(sim_name_b1_rand, only_top_isings)\n loaded_plot_attrs = {'attrs_gen_b1_fix': attrs_gen_b1_fix,\n 'attrs_gen_b10_fix': attrs_gen_b10_fix, 'attrs_gen_b10_rand':\n attrs_gen_b10_rand, 'attrs_gen_b1_rand': attrs_gen_b1_rand}\n try:\n pickle_out = open('{}loaded_plot_attrs.pickle'.format(\n save_folder), 'wb')\n pickle.dump(loaded_plot_attrs, pickle_out)\n pickle_out.close()\n except Exception:\n print('Could not save pickle file')\n else:\n file = open('{}/loaded_plot_attrs.pickle'.format(save_folder), 'rb')\n loaded_plot_attrs = pickle.load(file)\n file.close()\n attrs_gen_b10_fix = loaded_plot_attrs['attrs_gen_b10_fix']\n attrs_gen_b1_fix = loaded_plot_attrs['attrs_gen_b1_fix']\n attrs_gen_b10_rand = loaded_plot_attrs['attrs_gen_b10_rand']\n attrs_gen_b1_rand = loaded_plot_attrs['attrs_gen_b1_rand']\n ylim = plot_generational_avg(attrs_gen_b10_fix, colour_b10, save_folder,\n 'fixed_time_steps_b10', alpha, s, get_axis=True)\n labels = plot_generational_avg(attrs_gen_b1_fix, colour_b1, save_folder,\n 'fixed_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n return_labels=True)\n plot_generational_avg(attrs_gen_b10_rand, colour_b10, save_folder,\n 'random_time_steps_b10', alpha, s, get_axis=False, ylim=ylim)\n plot_generational_avg(attrs_gen_b1_rand, colour_b1, save_folder,\n 'random_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n set_labels=None)\n plot_overlap(attrs_gen_b1_fix, attrs_gen_b10_fix, colour_b1, colour_b10,\n save_folder, 'Overlap_fixed_time_steps', alpha, s, ylim)\n plot_overlap(attrs_gen_b1_rand, attrs_gen_b10_rand, colour_b1,\n colour_b10, save_folder, 'Overlap_random_time_steps', alpha, s, ylim)\n\n\ndef load_ising_stuff(sim_name, only_top_isings):\n isings_avg_energy_list = load_top_isings_attr(sim_name, only_top_isings,\n 'avg_energy')\n try:\n time_steps_first_ind = load_top_isings_attr(sim_name, 1, 'time_steps')\n time_steps_each_gen = [time_steps[0] for time_steps in\n time_steps_first_ind]\n except Exception:\n energies_first_ind = load_top_isings_attr(sim_name, 1, 'energies')\n energies_first_ind = [energies[0] for energies in energies_first_ind]\n time_steps_each_gen = list(map(lambda x: len(x), energies_first_ind))\n settings = load_settings(sim_name)\n settings['pop_size'] = only_top_isings\n small_isings_list = create_small_isings(isings_avg_energy_list,\n time_steps_each_gen)\n mean_attrs_generational = create_generational_avg(small_isings_list,\n 'norm_avg_energy')\n return mean_attrs_generational\n\n\ndef create_generational_avg(isings_list, attr_name):\n mean_attrs_generational = []\n for isings in isings_list:\n attrs = []\n for I in isings:\n exec('attrs.append(I.{})'.format(attr_name))\n mean_attrs_generational.append(np.mean(attrs))\n return mean_attrs_generational\n\n\ndef plot_generational_avg(y_axis, colour, save_folder, add_save_name, alpha,\n s, get_axis=True, ylim=None, return_labels=False, set_labels=None):\n x_axis = np.arange(len(y_axis))\n plt.figure(figsize=(19, 10))\n ax = plt.scatter(x_axis, y_axis, alpha=alpha, c=colour, s=s)\n locs, labels = plt.yticks()\n if set_labels is not None:\n labels = set_labels\n for label in labels[::2]:\n label.set_visible(False)\n legend_elements = [Line2D([0], [0], marker='o', color='w', label=\n 'Critical', markerfacecolor='darkorange', markersize=25, alpha=0.75\n ), Line2D([0], [0], marker='o', color='w', label='Sub-critical',\n markerfacecolor='royalblue', markersize=25, alpha=0.75)]\n plt.legend(loc='lower right', bbox_to_anchor=(0.95, 0.05), handles=\n legend_elements)\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n if get_axis:\n ylim = plt.ylim()\n else:\n plt.ylim(ylim)\n if not path.exists(save_folder):\n makedirs(save_folder)\n save_name = '{}.png'.format(add_save_name)\n plt.savefig(save_folder + save_name, dpi=300)\n plt.show()\n if get_axis:\n return ylim\n if return_labels:\n return labels\n\n\ndef plot_overlap(y_axis_b1, y_axis_b10, colour_b1, colour_b10, save_folder,\n add_save_name, alpha, s, ylim):\n x_axis_b1 = np.arange(len(y_axis_b1))\n x_axis_b10 = np.arange(len(y_axis_b10))\n plt.figure(figsize=(19, 10))\n plt.scatter(x_axis_b1, y_axis_b1, alpha=alpha, c=colour_b1, s=s)\n plot1 = plt.scatter(x_axis_b10, y_axis_b10, alpha=alpha, c=colour_b10, s=s)\n plt.ylim(ylim)\n locs, labels = plt.yticks()\n for label in labels[::2]:\n label.set_visible(False)\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n legend_elements = [Line2D([0], [0], marker='o', color='w', label=\n 'Critical', markerfacecolor=colour_b1, markersize=25, alpha=0.75),\n Line2D([0], [0], marker='o', color='w', label='Sub-critical',\n markerfacecolor=colour_b10, markersize=25, alpha=0.75)]\n plt.legend(loc='lower right', bbox_to_anchor=(0.95, 0.05), handles=\n legend_elements)\n plt.savefig(save_folder + add_save_name, dpi=300)\n plt.show()\n\n\ndef create_small_isings(isings_avg_energy_list, time_steps_each_gen):\n small_isings_list = []\n for avg_energies, time_steps_gen in zip(isings_avg_energy_list,\n time_steps_each_gen):\n small_isings = []\n for avg_energy in avg_energies:\n I_small = SmallIsing(avg_energy, time_steps_gen)\n small_isings.append(I_small)\n small_isings_list.append(small_isings)\n return small_isings_list\n\n\n<code token>\n",
"<import token>\n\n\nclass SmallIsing:\n\n def __init__(self, avg_energy, time_steps_gen):\n self.avg_energy = avg_energy\n self.time_steps_gen = time_steps_gen\n self.norm_avg_energy = avg_energy / time_steps_gen\n\n\ndef all_plots(sim_name_b1_fix, sim_name_b10_fix, sim_name_b1_rand,\n sim_name_rand, only_top_isings=20, load_previous=False):\n save_folder = 'save/plots_for_anna/'\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n matplotlib.rcParams.update({'font.size': 30})\n alpha = 0.3\n s = 25\n colour_b1 = 'darkorange'\n colour_b10 = 'royalblue'\n if not load_previous:\n attrs_gen_b10_fix = load_ising_stuff(sim_name_b10_fix, only_top_isings)\n attrs_gen_b1_fix = load_ising_stuff(sim_name_b1_fix, only_top_isings)\n attrs_gen_b10_rand = load_ising_stuff(sim_name_b10_rand,\n only_top_isings)\n attrs_gen_b1_rand = load_ising_stuff(sim_name_b1_rand, only_top_isings)\n loaded_plot_attrs = {'attrs_gen_b1_fix': attrs_gen_b1_fix,\n 'attrs_gen_b10_fix': attrs_gen_b10_fix, 'attrs_gen_b10_rand':\n attrs_gen_b10_rand, 'attrs_gen_b1_rand': attrs_gen_b1_rand}\n try:\n pickle_out = open('{}loaded_plot_attrs.pickle'.format(\n save_folder), 'wb')\n pickle.dump(loaded_plot_attrs, pickle_out)\n pickle_out.close()\n except Exception:\n print('Could not save pickle file')\n else:\n file = open('{}/loaded_plot_attrs.pickle'.format(save_folder), 'rb')\n loaded_plot_attrs = pickle.load(file)\n file.close()\n attrs_gen_b10_fix = loaded_plot_attrs['attrs_gen_b10_fix']\n attrs_gen_b1_fix = loaded_plot_attrs['attrs_gen_b1_fix']\n attrs_gen_b10_rand = loaded_plot_attrs['attrs_gen_b10_rand']\n attrs_gen_b1_rand = loaded_plot_attrs['attrs_gen_b1_rand']\n ylim = plot_generational_avg(attrs_gen_b10_fix, colour_b10, save_folder,\n 'fixed_time_steps_b10', alpha, s, get_axis=True)\n labels = plot_generational_avg(attrs_gen_b1_fix, colour_b1, save_folder,\n 'fixed_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n return_labels=True)\n plot_generational_avg(attrs_gen_b10_rand, colour_b10, save_folder,\n 'random_time_steps_b10', alpha, s, get_axis=False, ylim=ylim)\n plot_generational_avg(attrs_gen_b1_rand, colour_b1, save_folder,\n 'random_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n set_labels=None)\n plot_overlap(attrs_gen_b1_fix, attrs_gen_b10_fix, colour_b1, colour_b10,\n save_folder, 'Overlap_fixed_time_steps', alpha, s, ylim)\n plot_overlap(attrs_gen_b1_rand, attrs_gen_b10_rand, colour_b1,\n colour_b10, save_folder, 'Overlap_random_time_steps', alpha, s, ylim)\n\n\ndef load_ising_stuff(sim_name, only_top_isings):\n isings_avg_energy_list = load_top_isings_attr(sim_name, only_top_isings,\n 'avg_energy')\n try:\n time_steps_first_ind = load_top_isings_attr(sim_name, 1, 'time_steps')\n time_steps_each_gen = [time_steps[0] for time_steps in\n time_steps_first_ind]\n except Exception:\n energies_first_ind = load_top_isings_attr(sim_name, 1, 'energies')\n energies_first_ind = [energies[0] for energies in energies_first_ind]\n time_steps_each_gen = list(map(lambda x: len(x), energies_first_ind))\n settings = load_settings(sim_name)\n settings['pop_size'] = only_top_isings\n small_isings_list = create_small_isings(isings_avg_energy_list,\n time_steps_each_gen)\n mean_attrs_generational = create_generational_avg(small_isings_list,\n 'norm_avg_energy')\n return mean_attrs_generational\n\n\ndef create_generational_avg(isings_list, attr_name):\n mean_attrs_generational = []\n for isings in isings_list:\n attrs = []\n for I in isings:\n exec('attrs.append(I.{})'.format(attr_name))\n mean_attrs_generational.append(np.mean(attrs))\n return mean_attrs_generational\n\n\ndef plot_generational_avg(y_axis, colour, save_folder, add_save_name, alpha,\n s, get_axis=True, ylim=None, return_labels=False, set_labels=None):\n x_axis = np.arange(len(y_axis))\n plt.figure(figsize=(19, 10))\n ax = plt.scatter(x_axis, y_axis, alpha=alpha, c=colour, s=s)\n locs, labels = plt.yticks()\n if set_labels is not None:\n labels = set_labels\n for label in labels[::2]:\n label.set_visible(False)\n legend_elements = [Line2D([0], [0], marker='o', color='w', label=\n 'Critical', markerfacecolor='darkorange', markersize=25, alpha=0.75\n ), Line2D([0], [0], marker='o', color='w', label='Sub-critical',\n markerfacecolor='royalblue', markersize=25, alpha=0.75)]\n plt.legend(loc='lower right', bbox_to_anchor=(0.95, 0.05), handles=\n legend_elements)\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n if get_axis:\n ylim = plt.ylim()\n else:\n plt.ylim(ylim)\n if not path.exists(save_folder):\n makedirs(save_folder)\n save_name = '{}.png'.format(add_save_name)\n plt.savefig(save_folder + save_name, dpi=300)\n plt.show()\n if get_axis:\n return ylim\n if return_labels:\n return labels\n\n\ndef plot_overlap(y_axis_b1, y_axis_b10, colour_b1, colour_b10, save_folder,\n add_save_name, alpha, s, ylim):\n x_axis_b1 = np.arange(len(y_axis_b1))\n x_axis_b10 = np.arange(len(y_axis_b10))\n plt.figure(figsize=(19, 10))\n plt.scatter(x_axis_b1, y_axis_b1, alpha=alpha, c=colour_b1, s=s)\n plot1 = plt.scatter(x_axis_b10, y_axis_b10, alpha=alpha, c=colour_b10, s=s)\n plt.ylim(ylim)\n locs, labels = plt.yticks()\n for label in labels[::2]:\n label.set_visible(False)\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n legend_elements = [Line2D([0], [0], marker='o', color='w', label=\n 'Critical', markerfacecolor=colour_b1, markersize=25, alpha=0.75),\n Line2D([0], [0], marker='o', color='w', label='Sub-critical',\n markerfacecolor=colour_b10, markersize=25, alpha=0.75)]\n plt.legend(loc='lower right', bbox_to_anchor=(0.95, 0.05), handles=\n legend_elements)\n plt.savefig(save_folder + add_save_name, dpi=300)\n plt.show()\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass SmallIsing:\n\n def __init__(self, avg_energy, time_steps_gen):\n self.avg_energy = avg_energy\n self.time_steps_gen = time_steps_gen\n self.norm_avg_energy = avg_energy / time_steps_gen\n\n\ndef all_plots(sim_name_b1_fix, sim_name_b10_fix, sim_name_b1_rand,\n sim_name_rand, only_top_isings=20, load_previous=False):\n save_folder = 'save/plots_for_anna/'\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n matplotlib.rcParams.update({'font.size': 30})\n alpha = 0.3\n s = 25\n colour_b1 = 'darkorange'\n colour_b10 = 'royalblue'\n if not load_previous:\n attrs_gen_b10_fix = load_ising_stuff(sim_name_b10_fix, only_top_isings)\n attrs_gen_b1_fix = load_ising_stuff(sim_name_b1_fix, only_top_isings)\n attrs_gen_b10_rand = load_ising_stuff(sim_name_b10_rand,\n only_top_isings)\n attrs_gen_b1_rand = load_ising_stuff(sim_name_b1_rand, only_top_isings)\n loaded_plot_attrs = {'attrs_gen_b1_fix': attrs_gen_b1_fix,\n 'attrs_gen_b10_fix': attrs_gen_b10_fix, 'attrs_gen_b10_rand':\n attrs_gen_b10_rand, 'attrs_gen_b1_rand': attrs_gen_b1_rand}\n try:\n pickle_out = open('{}loaded_plot_attrs.pickle'.format(\n save_folder), 'wb')\n pickle.dump(loaded_plot_attrs, pickle_out)\n pickle_out.close()\n except Exception:\n print('Could not save pickle file')\n else:\n file = open('{}/loaded_plot_attrs.pickle'.format(save_folder), 'rb')\n loaded_plot_attrs = pickle.load(file)\n file.close()\n attrs_gen_b10_fix = loaded_plot_attrs['attrs_gen_b10_fix']\n attrs_gen_b1_fix = loaded_plot_attrs['attrs_gen_b1_fix']\n attrs_gen_b10_rand = loaded_plot_attrs['attrs_gen_b10_rand']\n attrs_gen_b1_rand = loaded_plot_attrs['attrs_gen_b1_rand']\n ylim = plot_generational_avg(attrs_gen_b10_fix, colour_b10, save_folder,\n 'fixed_time_steps_b10', alpha, s, get_axis=True)\n labels = plot_generational_avg(attrs_gen_b1_fix, colour_b1, save_folder,\n 'fixed_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n return_labels=True)\n plot_generational_avg(attrs_gen_b10_rand, colour_b10, save_folder,\n 'random_time_steps_b10', alpha, s, get_axis=False, ylim=ylim)\n plot_generational_avg(attrs_gen_b1_rand, colour_b1, save_folder,\n 'random_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n set_labels=None)\n plot_overlap(attrs_gen_b1_fix, attrs_gen_b10_fix, colour_b1, colour_b10,\n save_folder, 'Overlap_fixed_time_steps', alpha, s, ylim)\n plot_overlap(attrs_gen_b1_rand, attrs_gen_b10_rand, colour_b1,\n colour_b10, save_folder, 'Overlap_random_time_steps', alpha, s, ylim)\n\n\n<function token>\n\n\ndef create_generational_avg(isings_list, attr_name):\n mean_attrs_generational = []\n for isings in isings_list:\n attrs = []\n for I in isings:\n exec('attrs.append(I.{})'.format(attr_name))\n mean_attrs_generational.append(np.mean(attrs))\n return mean_attrs_generational\n\n\ndef plot_generational_avg(y_axis, colour, save_folder, add_save_name, alpha,\n s, get_axis=True, ylim=None, return_labels=False, set_labels=None):\n x_axis = np.arange(len(y_axis))\n plt.figure(figsize=(19, 10))\n ax = plt.scatter(x_axis, y_axis, alpha=alpha, c=colour, s=s)\n locs, labels = plt.yticks()\n if set_labels is not None:\n labels = set_labels\n for label in labels[::2]:\n label.set_visible(False)\n legend_elements = [Line2D([0], [0], marker='o', color='w', label=\n 'Critical', markerfacecolor='darkorange', markersize=25, alpha=0.75\n ), Line2D([0], [0], marker='o', color='w', label='Sub-critical',\n markerfacecolor='royalblue', markersize=25, alpha=0.75)]\n plt.legend(loc='lower right', bbox_to_anchor=(0.95, 0.05), handles=\n legend_elements)\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n if get_axis:\n ylim = plt.ylim()\n else:\n plt.ylim(ylim)\n if not path.exists(save_folder):\n makedirs(save_folder)\n save_name = '{}.png'.format(add_save_name)\n plt.savefig(save_folder + save_name, dpi=300)\n plt.show()\n if get_axis:\n return ylim\n if return_labels:\n return labels\n\n\ndef plot_overlap(y_axis_b1, y_axis_b10, colour_b1, colour_b10, save_folder,\n add_save_name, alpha, s, ylim):\n x_axis_b1 = np.arange(len(y_axis_b1))\n x_axis_b10 = np.arange(len(y_axis_b10))\n plt.figure(figsize=(19, 10))\n plt.scatter(x_axis_b1, y_axis_b1, alpha=alpha, c=colour_b1, s=s)\n plot1 = plt.scatter(x_axis_b10, y_axis_b10, alpha=alpha, c=colour_b10, s=s)\n plt.ylim(ylim)\n locs, labels = plt.yticks()\n for label in labels[::2]:\n label.set_visible(False)\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n legend_elements = [Line2D([0], [0], marker='o', color='w', label=\n 'Critical', markerfacecolor=colour_b1, markersize=25, alpha=0.75),\n Line2D([0], [0], marker='o', color='w', label='Sub-critical',\n markerfacecolor=colour_b10, markersize=25, alpha=0.75)]\n plt.legend(loc='lower right', bbox_to_anchor=(0.95, 0.05), handles=\n legend_elements)\n plt.savefig(save_folder + add_save_name, dpi=300)\n plt.show()\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass SmallIsing:\n\n def __init__(self, avg_energy, time_steps_gen):\n self.avg_energy = avg_energy\n self.time_steps_gen = time_steps_gen\n self.norm_avg_energy = avg_energy / time_steps_gen\n\n\ndef all_plots(sim_name_b1_fix, sim_name_b10_fix, sim_name_b1_rand,\n sim_name_rand, only_top_isings=20, load_previous=False):\n save_folder = 'save/plots_for_anna/'\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n matplotlib.rcParams.update({'font.size': 30})\n alpha = 0.3\n s = 25\n colour_b1 = 'darkorange'\n colour_b10 = 'royalblue'\n if not load_previous:\n attrs_gen_b10_fix = load_ising_stuff(sim_name_b10_fix, only_top_isings)\n attrs_gen_b1_fix = load_ising_stuff(sim_name_b1_fix, only_top_isings)\n attrs_gen_b10_rand = load_ising_stuff(sim_name_b10_rand,\n only_top_isings)\n attrs_gen_b1_rand = load_ising_stuff(sim_name_b1_rand, only_top_isings)\n loaded_plot_attrs = {'attrs_gen_b1_fix': attrs_gen_b1_fix,\n 'attrs_gen_b10_fix': attrs_gen_b10_fix, 'attrs_gen_b10_rand':\n attrs_gen_b10_rand, 'attrs_gen_b1_rand': attrs_gen_b1_rand}\n try:\n pickle_out = open('{}loaded_plot_attrs.pickle'.format(\n save_folder), 'wb')\n pickle.dump(loaded_plot_attrs, pickle_out)\n pickle_out.close()\n except Exception:\n print('Could not save pickle file')\n else:\n file = open('{}/loaded_plot_attrs.pickle'.format(save_folder), 'rb')\n loaded_plot_attrs = pickle.load(file)\n file.close()\n attrs_gen_b10_fix = loaded_plot_attrs['attrs_gen_b10_fix']\n attrs_gen_b1_fix = loaded_plot_attrs['attrs_gen_b1_fix']\n attrs_gen_b10_rand = loaded_plot_attrs['attrs_gen_b10_rand']\n attrs_gen_b1_rand = loaded_plot_attrs['attrs_gen_b1_rand']\n ylim = plot_generational_avg(attrs_gen_b10_fix, colour_b10, save_folder,\n 'fixed_time_steps_b10', alpha, s, get_axis=True)\n labels = plot_generational_avg(attrs_gen_b1_fix, colour_b1, save_folder,\n 'fixed_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n return_labels=True)\n plot_generational_avg(attrs_gen_b10_rand, colour_b10, save_folder,\n 'random_time_steps_b10', alpha, s, get_axis=False, ylim=ylim)\n plot_generational_avg(attrs_gen_b1_rand, colour_b1, save_folder,\n 'random_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n set_labels=None)\n plot_overlap(attrs_gen_b1_fix, attrs_gen_b10_fix, colour_b1, colour_b10,\n save_folder, 'Overlap_fixed_time_steps', alpha, s, ylim)\n plot_overlap(attrs_gen_b1_rand, attrs_gen_b10_rand, colour_b1,\n colour_b10, save_folder, 'Overlap_random_time_steps', alpha, s, ylim)\n\n\n<function token>\n<function token>\n\n\ndef plot_generational_avg(y_axis, colour, save_folder, add_save_name, alpha,\n s, get_axis=True, ylim=None, return_labels=False, set_labels=None):\n x_axis = np.arange(len(y_axis))\n plt.figure(figsize=(19, 10))\n ax = plt.scatter(x_axis, y_axis, alpha=alpha, c=colour, s=s)\n locs, labels = plt.yticks()\n if set_labels is not None:\n labels = set_labels\n for label in labels[::2]:\n label.set_visible(False)\n legend_elements = [Line2D([0], [0], marker='o', color='w', label=\n 'Critical', markerfacecolor='darkorange', markersize=25, alpha=0.75\n ), Line2D([0], [0], marker='o', color='w', label='Sub-critical',\n markerfacecolor='royalblue', markersize=25, alpha=0.75)]\n plt.legend(loc='lower right', bbox_to_anchor=(0.95, 0.05), handles=\n legend_elements)\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n if get_axis:\n ylim = plt.ylim()\n else:\n plt.ylim(ylim)\n if not path.exists(save_folder):\n makedirs(save_folder)\n save_name = '{}.png'.format(add_save_name)\n plt.savefig(save_folder + save_name, dpi=300)\n plt.show()\n if get_axis:\n return ylim\n if return_labels:\n return labels\n\n\ndef plot_overlap(y_axis_b1, y_axis_b10, colour_b1, colour_b10, save_folder,\n add_save_name, alpha, s, ylim):\n x_axis_b1 = np.arange(len(y_axis_b1))\n x_axis_b10 = np.arange(len(y_axis_b10))\n plt.figure(figsize=(19, 10))\n plt.scatter(x_axis_b1, y_axis_b1, alpha=alpha, c=colour_b1, s=s)\n plot1 = plt.scatter(x_axis_b10, y_axis_b10, alpha=alpha, c=colour_b10, s=s)\n plt.ylim(ylim)\n locs, labels = plt.yticks()\n for label in labels[::2]:\n label.set_visible(False)\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n legend_elements = [Line2D([0], [0], marker='o', color='w', label=\n 'Critical', markerfacecolor=colour_b1, markersize=25, alpha=0.75),\n Line2D([0], [0], marker='o', color='w', label='Sub-critical',\n markerfacecolor=colour_b10, markersize=25, alpha=0.75)]\n plt.legend(loc='lower right', bbox_to_anchor=(0.95, 0.05), handles=\n legend_elements)\n plt.savefig(save_folder + add_save_name, dpi=300)\n plt.show()\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass SmallIsing:\n\n def __init__(self, avg_energy, time_steps_gen):\n self.avg_energy = avg_energy\n self.time_steps_gen = time_steps_gen\n self.norm_avg_energy = avg_energy / time_steps_gen\n\n\ndef all_plots(sim_name_b1_fix, sim_name_b10_fix, sim_name_b1_rand,\n sim_name_rand, only_top_isings=20, load_previous=False):\n save_folder = 'save/plots_for_anna/'\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n matplotlib.rcParams.update({'font.size': 30})\n alpha = 0.3\n s = 25\n colour_b1 = 'darkorange'\n colour_b10 = 'royalblue'\n if not load_previous:\n attrs_gen_b10_fix = load_ising_stuff(sim_name_b10_fix, only_top_isings)\n attrs_gen_b1_fix = load_ising_stuff(sim_name_b1_fix, only_top_isings)\n attrs_gen_b10_rand = load_ising_stuff(sim_name_b10_rand,\n only_top_isings)\n attrs_gen_b1_rand = load_ising_stuff(sim_name_b1_rand, only_top_isings)\n loaded_plot_attrs = {'attrs_gen_b1_fix': attrs_gen_b1_fix,\n 'attrs_gen_b10_fix': attrs_gen_b10_fix, 'attrs_gen_b10_rand':\n attrs_gen_b10_rand, 'attrs_gen_b1_rand': attrs_gen_b1_rand}\n try:\n pickle_out = open('{}loaded_plot_attrs.pickle'.format(\n save_folder), 'wb')\n pickle.dump(loaded_plot_attrs, pickle_out)\n pickle_out.close()\n except Exception:\n print('Could not save pickle file')\n else:\n file = open('{}/loaded_plot_attrs.pickle'.format(save_folder), 'rb')\n loaded_plot_attrs = pickle.load(file)\n file.close()\n attrs_gen_b10_fix = loaded_plot_attrs['attrs_gen_b10_fix']\n attrs_gen_b1_fix = loaded_plot_attrs['attrs_gen_b1_fix']\n attrs_gen_b10_rand = loaded_plot_attrs['attrs_gen_b10_rand']\n attrs_gen_b1_rand = loaded_plot_attrs['attrs_gen_b1_rand']\n ylim = plot_generational_avg(attrs_gen_b10_fix, colour_b10, save_folder,\n 'fixed_time_steps_b10', alpha, s, get_axis=True)\n labels = plot_generational_avg(attrs_gen_b1_fix, colour_b1, save_folder,\n 'fixed_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n return_labels=True)\n plot_generational_avg(attrs_gen_b10_rand, colour_b10, save_folder,\n 'random_time_steps_b10', alpha, s, get_axis=False, ylim=ylim)\n plot_generational_avg(attrs_gen_b1_rand, colour_b1, save_folder,\n 'random_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n set_labels=None)\n plot_overlap(attrs_gen_b1_fix, attrs_gen_b10_fix, colour_b1, colour_b10,\n save_folder, 'Overlap_fixed_time_steps', alpha, s, ylim)\n plot_overlap(attrs_gen_b1_rand, attrs_gen_b10_rand, colour_b1,\n colour_b10, save_folder, 'Overlap_random_time_steps', alpha, s, ylim)\n\n\n<function token>\n<function token>\n\n\ndef plot_generational_avg(y_axis, colour, save_folder, add_save_name, alpha,\n s, get_axis=True, ylim=None, return_labels=False, set_labels=None):\n x_axis = np.arange(len(y_axis))\n plt.figure(figsize=(19, 10))\n ax = plt.scatter(x_axis, y_axis, alpha=alpha, c=colour, s=s)\n locs, labels = plt.yticks()\n if set_labels is not None:\n labels = set_labels\n for label in labels[::2]:\n label.set_visible(False)\n legend_elements = [Line2D([0], [0], marker='o', color='w', label=\n 'Critical', markerfacecolor='darkorange', markersize=25, alpha=0.75\n ), Line2D([0], [0], marker='o', color='w', label='Sub-critical',\n markerfacecolor='royalblue', markersize=25, alpha=0.75)]\n plt.legend(loc='lower right', bbox_to_anchor=(0.95, 0.05), handles=\n legend_elements)\n plt.xlabel('Generation')\n plt.ylabel('Performance')\n if get_axis:\n ylim = plt.ylim()\n else:\n plt.ylim(ylim)\n if not path.exists(save_folder):\n makedirs(save_folder)\n save_name = '{}.png'.format(add_save_name)\n plt.savefig(save_folder + save_name, dpi=300)\n plt.show()\n if get_axis:\n return ylim\n if return_labels:\n return labels\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass SmallIsing:\n\n def __init__(self, avg_energy, time_steps_gen):\n self.avg_energy = avg_energy\n self.time_steps_gen = time_steps_gen\n self.norm_avg_energy = avg_energy / time_steps_gen\n\n\ndef all_plots(sim_name_b1_fix, sim_name_b10_fix, sim_name_b1_rand,\n sim_name_rand, only_top_isings=20, load_previous=False):\n save_folder = 'save/plots_for_anna/'\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n matplotlib.rcParams.update({'font.size': 30})\n alpha = 0.3\n s = 25\n colour_b1 = 'darkorange'\n colour_b10 = 'royalblue'\n if not load_previous:\n attrs_gen_b10_fix = load_ising_stuff(sim_name_b10_fix, only_top_isings)\n attrs_gen_b1_fix = load_ising_stuff(sim_name_b1_fix, only_top_isings)\n attrs_gen_b10_rand = load_ising_stuff(sim_name_b10_rand,\n only_top_isings)\n attrs_gen_b1_rand = load_ising_stuff(sim_name_b1_rand, only_top_isings)\n loaded_plot_attrs = {'attrs_gen_b1_fix': attrs_gen_b1_fix,\n 'attrs_gen_b10_fix': attrs_gen_b10_fix, 'attrs_gen_b10_rand':\n attrs_gen_b10_rand, 'attrs_gen_b1_rand': attrs_gen_b1_rand}\n try:\n pickle_out = open('{}loaded_plot_attrs.pickle'.format(\n save_folder), 'wb')\n pickle.dump(loaded_plot_attrs, pickle_out)\n pickle_out.close()\n except Exception:\n print('Could not save pickle file')\n else:\n file = open('{}/loaded_plot_attrs.pickle'.format(save_folder), 'rb')\n loaded_plot_attrs = pickle.load(file)\n file.close()\n attrs_gen_b10_fix = loaded_plot_attrs['attrs_gen_b10_fix']\n attrs_gen_b1_fix = loaded_plot_attrs['attrs_gen_b1_fix']\n attrs_gen_b10_rand = loaded_plot_attrs['attrs_gen_b10_rand']\n attrs_gen_b1_rand = loaded_plot_attrs['attrs_gen_b1_rand']\n ylim = plot_generational_avg(attrs_gen_b10_fix, colour_b10, save_folder,\n 'fixed_time_steps_b10', alpha, s, get_axis=True)\n labels = plot_generational_avg(attrs_gen_b1_fix, colour_b1, save_folder,\n 'fixed_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n return_labels=True)\n plot_generational_avg(attrs_gen_b10_rand, colour_b10, save_folder,\n 'random_time_steps_b10', alpha, s, get_axis=False, ylim=ylim)\n plot_generational_avg(attrs_gen_b1_rand, colour_b1, save_folder,\n 'random_time_steps_b1', alpha, s, get_axis=False, ylim=ylim,\n set_labels=None)\n plot_overlap(attrs_gen_b1_fix, attrs_gen_b10_fix, colour_b1, colour_b10,\n save_folder, 'Overlap_fixed_time_steps', alpha, s, ylim)\n plot_overlap(attrs_gen_b1_rand, attrs_gen_b10_rand, colour_b1,\n colour_b10, save_folder, 'Overlap_random_time_steps', alpha, s, ylim)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass SmallIsing:\n\n def __init__(self, avg_energy, time_steps_gen):\n self.avg_energy = avg_energy\n self.time_steps_gen = time_steps_gen\n self.norm_avg_energy = avg_energy / time_steps_gen\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass SmallIsing:\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,803 |
5cfc18ad52fa8a07914c7b57ff975301481efffc
|
from tensorflow.keras.layers import Layer
import numpy as np
import pandas as pd
import tensorflow
# class ts_stddev(Layer):
# def __init__(self, **kwargs):
# self.window = 10
# self.stride = 10
# self.features_num = 8
# self.backward_len = 30
# self.logging = False
# self.input_data_shape = (None, self.features_num, self.backward_len, 1)
# super(ts_stddev, self).__init__(**kwargs)
#
# def call(self, inputs, **kwargs):
# assert inputs.shape[1:] == self.input_data_shape[1:]
# arr = inputs.numpy()
# arr_r10 = np.roll(arr, shift=self.window, axis=2)
#
# temp_dict = dict()
# for num in range(int(self.backward_len / self.stride)):
# arr_trim = arr_r10[:, :, num * self.stride:(num + 1) * self.stride, :]
# arr_std10 = np.std(arr_trim, axis=2)
# arr_std10_re = np.reshape(arr_std10, (arr_std10.shape[0], arr_std10.shape[1], 1, arr_std10.shape[2]))
# if self.logging:
# print(num)
# print(arr_trim.shape)
# print(arr_std10.shape)
# print(arr_std10_re.shape)
# print(arr_trim[0, :, :, 0].shape)
# print(pd.DataFrame(arr_trim[0, :, :, 0]))
# print(np.std(arr_trim[0, :, :, 0], axis=1))
# temp_dict[num] = arr_std10_re
#
# total_num = int(self.backward_len / self.stride)
# temp_list = [temp_dict[num] for num in range(1, total_num)]
# temp_list.append(temp_dict[0])
#
# result = np.concatenate(tuple(temp_list), axis=2)
# return result
#
# def compute_output_shape(self, input_shape):
# return input_shape[0], input_shape[1], int(input_shape[2] / self.stride), input_shape[3]
class ts_zscore(Layer):
def __init__(self, **kwargs):
self.window = 10
self.stride = 10
self.features_num = 8
self.backward_len = 30
self.logging = False
self.input_data_shape = (None, self.features_num, self.backward_len, 1)
super(ts_zscore, self).__init__(**kwargs)
def np_func(self,inputs):
assert inputs.shape[1:] == self.input_data_shape[1:]
arr = inputs.numpy()
arr_r10 = np.roll(arr, shift=self.window, axis=2)
temp_dict = dict()
for num in range(int(self.backward_len / self.stride)):
arr_trim = arr_r10[:, :, num * self.stride:(num + 1) * self.stride, :]
arr_mean10 = np.mean(arr_trim, axis=2)
arr_std10 = np.std(arr_trim, axis=2)
arr_zscore10 = arr_mean10 / arr_std10
arr_zscore10_re = np.reshape(arr_zscore10,
(arr_zscore10.shape[0], arr_zscore10.shape[1], 1, arr_zscore10.shape[2]))
if self.logging:
print(num)
print(arr_trim.shape)
print(arr_std10.shape)
print(arr_zscore10_re.shape)
print(arr_trim[0, :, :, 0].shape)
print(pd.DataFrame(arr_trim[0, :, :, 0]))
print(np.std(arr_trim[0, :, :, 0], axis=1))
temp_dict[num] = arr_zscore10_re
total_num = int(self.backward_len / self.stride)
temp_list = [temp_dict[num] for num in range(1, total_num)]
temp_list.append(temp_dict[0])
result = np.concatenate(tuple(temp_list), axis=2)
return result
def call(self, inputs, **kwargs):
return self.np_func(inputs)
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[1], int(input_shape[2] / self.stride), input_shape[3]
# class ts_return(Layer):
# def __init__(self, **kwargs):
# self.window = 10
# self.stride = 10
# self.features_num = 8
# self.backward_len = 30
# self.logging = False
# self.input_data_shape = (None, self.features_num, self.backward_len, 1)
# super(ts_return, self).__init__(**kwargs)
#
# def call(self, inputs, **kwargs):
# assert inputs.shape[1:] == self.input_data_shape[1:]
#
# arr = inputs.numpy()
# arr_r10 = np.roll(arr, shift=self.window, axis=2)
#
# temp_dict = dict()
# for num in range(int(self.backward_len / self.stride)):
# arr_trim = arr_r10[:, :, num * self.stride:(num + 1) * self.stride, :]
# arr_head10 = arr_trim[:, :, [0], :]
# arr_tail10 = arr_trim[:, :, [-1], :]
# arr_ret10 = arr_tail10 / arr_head10 - 1
# # arr_zscore10_re = np.reshape(arr_ret10,
# # (arr_ret10.shape[0], arr_ret10.shape[1], 1, arr_ret10.shape[2]))
#
# arr_ret10_re = arr_ret10
# if self.logging:
# print(num)
# print(arr_trim.shape)
# print(arr_ret10_re.shape)
# print(arr_trim[0, :, :, 0].shape)
# print(pd.DataFrame(arr_trim[0, :, :, 0]))
# print(np.std(arr_trim[0, :, :, 0], axis=1))
# temp_dict[num] = arr_ret10_re
#
# total_num = int(self.backward_len / self.stride)
# temp_list = [temp_dict[num] for num in range(1, total_num)]
# temp_list.append(temp_dict[0])
#
# result = np.concatenate(tuple(temp_list), axis=2)
# return result
#
# def compute_output_shape(self, input_shape):
# return input_shape[0], input_shape[1], int(input_shape[2] / self.stride), input_shape[3]
# class ts_decaylinear(Layer):
# def __init__(self, **kwargs):
# self.window = 10
# self.stride = 10
# self.features_num = 8
# self.backward_len = 30
# self.logging = False
# self.input_data_shape = (None, self.features_num, self.backward_len, 1)
# super(ts_decaylinear, self).__init__(**kwargs)
#
# def call(self, inputs, **kwargs):
# assert inputs.shape[1:] == self.input_data_shape[1:]
#
# arr = inputs.numpy()
# arr_r10 = np.roll(arr, shift=self.window, axis=2)
# # 生成长度为30的权重向量
# weight_arr = np.array(range(1, 1 + self.stride))
# weight_arr = weight_arr / weight_arr.sum()
# weight_arr2d = np.expand_dims(weight_arr, axis=0)
# weight_arr2d = np.repeat(weight_arr2d, repeats=self.features_num, axis=0)
# weight_arr3d = np.expand_dims(weight_arr2d, axis=0)
# weight_arr3d = np.repeat(weight_arr3d, repeats=inputs.shape[0], axis=0)
# weight_arr4d = np.reshape(weight_arr3d,
# newshape=(inputs.shape[0], weight_arr3d.shape[1], weight_arr3d.shape[2], 1))
#
# temp_dict = dict()
# for num in range(int(self.backward_len / self.stride)):
# arr_trim = arr_r10[:, :, num * self.stride:(num + 1) * self.stride, :]
# assert arr_trim.shape == weight_arr4d.shape
# arr_weight = arr_trim * weight_arr4d
# arr_wsum = arr_weight.sum(axis=2)
#
# arr_ret10_re = np.reshape(arr_wsum, newshape=(arr_wsum.shape[0], arr_wsum.shape[1], arr_wsum.shape[2], 1))
# if self.logging:
# print(num)
# print(arr_trim.shape)
# print(arr_ret10_re.shape)
# print(arr_trim[0, :, :, 0].shape)
# print(pd.DataFrame(arr_trim[0, :, :, 0]))
# print(np.std(arr_trim[0, :, :, 0], axis=1))
# temp_dict[num] = arr_ret10_re
#
# total_num = int(self.backward_len / self.stride)
# temp_list = [temp_dict[num] for num in range(1, total_num)]
# temp_list.append(temp_dict[0])
#
# result = np.concatenate(tuple(temp_list), axis=2)
# return result
#
# def compute_output_shape(self, input_shape):
# return input_shape[0], input_shape[1], int(input_shape[2] / self.stride), input_shape[3]
|
[
"from tensorflow.keras.layers import Layer\nimport numpy as np\nimport pandas as pd\nimport tensorflow\n\n# class ts_stddev(Layer):\n# def __init__(self, **kwargs):\n# self.window = 10\n# self.stride = 10\n# self.features_num = 8\n# self.backward_len = 30\n# self.logging = False\n# self.input_data_shape = (None, self.features_num, self.backward_len, 1)\n# super(ts_stddev, self).__init__(**kwargs)\n#\n# def call(self, inputs, **kwargs):\n# assert inputs.shape[1:] == self.input_data_shape[1:]\n# arr = inputs.numpy()\n# arr_r10 = np.roll(arr, shift=self.window, axis=2)\n#\n# temp_dict = dict()\n# for num in range(int(self.backward_len / self.stride)):\n# arr_trim = arr_r10[:, :, num * self.stride:(num + 1) * self.stride, :]\n# arr_std10 = np.std(arr_trim, axis=2)\n# arr_std10_re = np.reshape(arr_std10, (arr_std10.shape[0], arr_std10.shape[1], 1, arr_std10.shape[2]))\n# if self.logging:\n# print(num)\n# print(arr_trim.shape)\n# print(arr_std10.shape)\n# print(arr_std10_re.shape)\n# print(arr_trim[0, :, :, 0].shape)\n# print(pd.DataFrame(arr_trim[0, :, :, 0]))\n# print(np.std(arr_trim[0, :, :, 0], axis=1))\n# temp_dict[num] = arr_std10_re\n#\n# total_num = int(self.backward_len / self.stride)\n# temp_list = [temp_dict[num] for num in range(1, total_num)]\n# temp_list.append(temp_dict[0])\n#\n# result = np.concatenate(tuple(temp_list), axis=2)\n# return result\n#\n# def compute_output_shape(self, input_shape):\n# return input_shape[0], input_shape[1], int(input_shape[2] / self.stride), input_shape[3]\n\n\nclass ts_zscore(Layer):\n def __init__(self, **kwargs):\n self.window = 10\n self.stride = 10\n self.features_num = 8\n self.backward_len = 30\n self.logging = False\n self.input_data_shape = (None, self.features_num, self.backward_len, 1)\n super(ts_zscore, self).__init__(**kwargs)\n\n def np_func(self,inputs):\n assert inputs.shape[1:] == self.input_data_shape[1:]\n arr = inputs.numpy()\n arr_r10 = np.roll(arr, shift=self.window, axis=2)\n\n temp_dict = dict()\n for num in range(int(self.backward_len / self.stride)):\n arr_trim = arr_r10[:, :, num * self.stride:(num + 1) * self.stride, :]\n arr_mean10 = np.mean(arr_trim, axis=2)\n arr_std10 = np.std(arr_trim, axis=2)\n arr_zscore10 = arr_mean10 / arr_std10\n arr_zscore10_re = np.reshape(arr_zscore10,\n (arr_zscore10.shape[0], arr_zscore10.shape[1], 1, arr_zscore10.shape[2]))\n if self.logging:\n print(num)\n print(arr_trim.shape)\n print(arr_std10.shape)\n print(arr_zscore10_re.shape)\n print(arr_trim[0, :, :, 0].shape)\n print(pd.DataFrame(arr_trim[0, :, :, 0]))\n print(np.std(arr_trim[0, :, :, 0], axis=1))\n temp_dict[num] = arr_zscore10_re\n\n total_num = int(self.backward_len / self.stride)\n temp_list = [temp_dict[num] for num in range(1, total_num)]\n temp_list.append(temp_dict[0])\n\n result = np.concatenate(tuple(temp_list), axis=2)\n return result\n\n def call(self, inputs, **kwargs):\n return self.np_func(inputs)\n\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[1], int(input_shape[2] / self.stride), input_shape[3]\n\n\n# class ts_return(Layer):\n# def __init__(self, **kwargs):\n# self.window = 10\n# self.stride = 10\n# self.features_num = 8\n# self.backward_len = 30\n# self.logging = False\n# self.input_data_shape = (None, self.features_num, self.backward_len, 1)\n# super(ts_return, self).__init__(**kwargs)\n#\n# def call(self, inputs, **kwargs):\n# assert inputs.shape[1:] == self.input_data_shape[1:]\n#\n# arr = inputs.numpy()\n# arr_r10 = np.roll(arr, shift=self.window, axis=2)\n#\n# temp_dict = dict()\n# for num in range(int(self.backward_len / self.stride)):\n# arr_trim = arr_r10[:, :, num * self.stride:(num + 1) * self.stride, :]\n# arr_head10 = arr_trim[:, :, [0], :]\n# arr_tail10 = arr_trim[:, :, [-1], :]\n# arr_ret10 = arr_tail10 / arr_head10 - 1\n# # arr_zscore10_re = np.reshape(arr_ret10,\n# # (arr_ret10.shape[0], arr_ret10.shape[1], 1, arr_ret10.shape[2]))\n#\n# arr_ret10_re = arr_ret10\n# if self.logging:\n# print(num)\n# print(arr_trim.shape)\n# print(arr_ret10_re.shape)\n# print(arr_trim[0, :, :, 0].shape)\n# print(pd.DataFrame(arr_trim[0, :, :, 0]))\n# print(np.std(arr_trim[0, :, :, 0], axis=1))\n# temp_dict[num] = arr_ret10_re\n#\n# total_num = int(self.backward_len / self.stride)\n# temp_list = [temp_dict[num] for num in range(1, total_num)]\n# temp_list.append(temp_dict[0])\n#\n# result = np.concatenate(tuple(temp_list), axis=2)\n# return result\n#\n# def compute_output_shape(self, input_shape):\n# return input_shape[0], input_shape[1], int(input_shape[2] / self.stride), input_shape[3]\n\n\n# class ts_decaylinear(Layer):\n# def __init__(self, **kwargs):\n# self.window = 10\n# self.stride = 10\n# self.features_num = 8\n# self.backward_len = 30\n# self.logging = False\n# self.input_data_shape = (None, self.features_num, self.backward_len, 1)\n# super(ts_decaylinear, self).__init__(**kwargs)\n#\n# def call(self, inputs, **kwargs):\n# assert inputs.shape[1:] == self.input_data_shape[1:]\n#\n# arr = inputs.numpy()\n# arr_r10 = np.roll(arr, shift=self.window, axis=2)\n# # 生成长度为30的权重向量\n# weight_arr = np.array(range(1, 1 + self.stride))\n# weight_arr = weight_arr / weight_arr.sum()\n# weight_arr2d = np.expand_dims(weight_arr, axis=0)\n# weight_arr2d = np.repeat(weight_arr2d, repeats=self.features_num, axis=0)\n# weight_arr3d = np.expand_dims(weight_arr2d, axis=0)\n# weight_arr3d = np.repeat(weight_arr3d, repeats=inputs.shape[0], axis=0)\n# weight_arr4d = np.reshape(weight_arr3d,\n# newshape=(inputs.shape[0], weight_arr3d.shape[1], weight_arr3d.shape[2], 1))\n#\n# temp_dict = dict()\n# for num in range(int(self.backward_len / self.stride)):\n# arr_trim = arr_r10[:, :, num * self.stride:(num + 1) * self.stride, :]\n# assert arr_trim.shape == weight_arr4d.shape\n# arr_weight = arr_trim * weight_arr4d\n# arr_wsum = arr_weight.sum(axis=2)\n#\n# arr_ret10_re = np.reshape(arr_wsum, newshape=(arr_wsum.shape[0], arr_wsum.shape[1], arr_wsum.shape[2], 1))\n# if self.logging:\n# print(num)\n# print(arr_trim.shape)\n# print(arr_ret10_re.shape)\n# print(arr_trim[0, :, :, 0].shape)\n# print(pd.DataFrame(arr_trim[0, :, :, 0]))\n# print(np.std(arr_trim[0, :, :, 0], axis=1))\n# temp_dict[num] = arr_ret10_re\n#\n# total_num = int(self.backward_len / self.stride)\n# temp_list = [temp_dict[num] for num in range(1, total_num)]\n# temp_list.append(temp_dict[0])\n#\n# result = np.concatenate(tuple(temp_list), axis=2)\n# return result\n#\n# def compute_output_shape(self, input_shape):\n# return input_shape[0], input_shape[1], int(input_shape[2] / self.stride), input_shape[3]\n",
"from tensorflow.keras.layers import Layer\nimport numpy as np\nimport pandas as pd\nimport tensorflow\n\n\nclass ts_zscore(Layer):\n\n def __init__(self, **kwargs):\n self.window = 10\n self.stride = 10\n self.features_num = 8\n self.backward_len = 30\n self.logging = False\n self.input_data_shape = None, self.features_num, self.backward_len, 1\n super(ts_zscore, self).__init__(**kwargs)\n\n def np_func(self, inputs):\n assert inputs.shape[1:] == self.input_data_shape[1:]\n arr = inputs.numpy()\n arr_r10 = np.roll(arr, shift=self.window, axis=2)\n temp_dict = dict()\n for num in range(int(self.backward_len / self.stride)):\n arr_trim = arr_r10[:, :, num * self.stride:(num + 1) * self.\n stride, :]\n arr_mean10 = np.mean(arr_trim, axis=2)\n arr_std10 = np.std(arr_trim, axis=2)\n arr_zscore10 = arr_mean10 / arr_std10\n arr_zscore10_re = np.reshape(arr_zscore10, (arr_zscore10.shape[\n 0], arr_zscore10.shape[1], 1, arr_zscore10.shape[2]))\n if self.logging:\n print(num)\n print(arr_trim.shape)\n print(arr_std10.shape)\n print(arr_zscore10_re.shape)\n print(arr_trim[0, :, :, 0].shape)\n print(pd.DataFrame(arr_trim[0, :, :, 0]))\n print(np.std(arr_trim[0, :, :, 0], axis=1))\n temp_dict[num] = arr_zscore10_re\n total_num = int(self.backward_len / self.stride)\n temp_list = [temp_dict[num] for num in range(1, total_num)]\n temp_list.append(temp_dict[0])\n result = np.concatenate(tuple(temp_list), axis=2)\n return result\n\n def call(self, inputs, **kwargs):\n return self.np_func(inputs)\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[1], int(input_shape[2] / self.stride\n ), input_shape[3]\n",
"<import token>\n\n\nclass ts_zscore(Layer):\n\n def __init__(self, **kwargs):\n self.window = 10\n self.stride = 10\n self.features_num = 8\n self.backward_len = 30\n self.logging = False\n self.input_data_shape = None, self.features_num, self.backward_len, 1\n super(ts_zscore, self).__init__(**kwargs)\n\n def np_func(self, inputs):\n assert inputs.shape[1:] == self.input_data_shape[1:]\n arr = inputs.numpy()\n arr_r10 = np.roll(arr, shift=self.window, axis=2)\n temp_dict = dict()\n for num in range(int(self.backward_len / self.stride)):\n arr_trim = arr_r10[:, :, num * self.stride:(num + 1) * self.\n stride, :]\n arr_mean10 = np.mean(arr_trim, axis=2)\n arr_std10 = np.std(arr_trim, axis=2)\n arr_zscore10 = arr_mean10 / arr_std10\n arr_zscore10_re = np.reshape(arr_zscore10, (arr_zscore10.shape[\n 0], arr_zscore10.shape[1], 1, arr_zscore10.shape[2]))\n if self.logging:\n print(num)\n print(arr_trim.shape)\n print(arr_std10.shape)\n print(arr_zscore10_re.shape)\n print(arr_trim[0, :, :, 0].shape)\n print(pd.DataFrame(arr_trim[0, :, :, 0]))\n print(np.std(arr_trim[0, :, :, 0], axis=1))\n temp_dict[num] = arr_zscore10_re\n total_num = int(self.backward_len / self.stride)\n temp_list = [temp_dict[num] for num in range(1, total_num)]\n temp_list.append(temp_dict[0])\n result = np.concatenate(tuple(temp_list), axis=2)\n return result\n\n def call(self, inputs, **kwargs):\n return self.np_func(inputs)\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[1], int(input_shape[2] / self.stride\n ), input_shape[3]\n",
"<import token>\n\n\nclass ts_zscore(Layer):\n\n def __init__(self, **kwargs):\n self.window = 10\n self.stride = 10\n self.features_num = 8\n self.backward_len = 30\n self.logging = False\n self.input_data_shape = None, self.features_num, self.backward_len, 1\n super(ts_zscore, self).__init__(**kwargs)\n <function token>\n\n def call(self, inputs, **kwargs):\n return self.np_func(inputs)\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[1], int(input_shape[2] / self.stride\n ), input_shape[3]\n",
"<import token>\n\n\nclass ts_zscore(Layer):\n\n def __init__(self, **kwargs):\n self.window = 10\n self.stride = 10\n self.features_num = 8\n self.backward_len = 30\n self.logging = False\n self.input_data_shape = None, self.features_num, self.backward_len, 1\n super(ts_zscore, self).__init__(**kwargs)\n <function token>\n\n def call(self, inputs, **kwargs):\n return self.np_func(inputs)\n <function token>\n",
"<import token>\n\n\nclass ts_zscore(Layer):\n\n def __init__(self, **kwargs):\n self.window = 10\n self.stride = 10\n self.features_num = 8\n self.backward_len = 30\n self.logging = False\n self.input_data_shape = None, self.features_num, self.backward_len, 1\n super(ts_zscore, self).__init__(**kwargs)\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass ts_zscore(Layer):\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,804 |
df2ab2a69c9de9a7fc0746d379765051f37cd794
|
from abc import ABCMeta, abstractmethod
class NetworkInput(object):
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def val(self, t):
""" """
pass
|
[
"from abc import ABCMeta, abstractmethod\n\nclass NetworkInput(object):\n\n\t__metaclass__ = ABCMeta\n\n\tdef __init__(self):\n\t\tpass\n\n\t@abstractmethod\n\tdef val(self, t):\n\t\t\"\"\" \"\"\"\n\t\tpass\n",
"from abc import ABCMeta, abstractmethod\n\n\nclass NetworkInput(object):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n pass\n\n @abstractmethod\n def val(self, t):\n \"\"\" \"\"\"\n pass\n",
"<import token>\n\n\nclass NetworkInput(object):\n __metaclass__ = ABCMeta\n\n def __init__(self):\n pass\n\n @abstractmethod\n def val(self, t):\n \"\"\" \"\"\"\n pass\n",
"<import token>\n\n\nclass NetworkInput(object):\n <assignment token>\n\n def __init__(self):\n pass\n\n @abstractmethod\n def val(self, t):\n \"\"\" \"\"\"\n pass\n",
"<import token>\n\n\nclass NetworkInput(object):\n <assignment token>\n\n def __init__(self):\n pass\n <function token>\n",
"<import token>\n\n\nclass NetworkInput(object):\n <assignment token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,805 |
17b7049a636097f489b381f11d471e0e58116cf5
|
import re
import pandas as pd
import json
from pprint import pprint
# df = pd.read_json('./tweet_json.backup.txt', lines=True)
# print(df.shape)
# print(df['text'].isnull().sum())
# print(df[0])
with open('./tweet_json.backup.txt', 'r') as f:
tweets = [json.loads(t) for t in f.readlines()]
with open('./emojis.txt', 'r') as f:
emojis = f.read().split(',')
def extract_emojis(text):
''' Extracts emojis from text. '''
return [e for e in emojis if e in text]
dataset = []
for i, t in enumerate(tweets):
text = t['extended_tweet']['full_text'] if t['truncated'] else t['text']
# Remove line breaks
text = text.replace('\n', ' ').replace('\r', ' ')
# Remove links
text = re.sub(r'http\S+', '', text)
# Extract emojis
emos = extract_emojis(text)
# Remove non alphanumerics but leave spaces
text = re.sub(r'([^\s\w]|_)+', '', text)
print(text, emos, len(emos))
print('---------------------------------------------')
if emos:
dataset.append({
'text': text,
'emojis': ','.join(emos)
})
# print(text)
if i > 20:
break
with open('tweet_emoji_dataset.txt', 'w+', encoding='utf8') as f:
for d in dataset:
j = json.dumps(d, ensure_ascii=False)
f.write(j + '\n')
|
[
"import re\nimport pandas as pd\nimport json\nfrom pprint import pprint\n\n# df = pd.read_json('./tweet_json.backup.txt', lines=True)\n# print(df.shape)\n# print(df['text'].isnull().sum())\n# print(df[0])\n\nwith open('./tweet_json.backup.txt', 'r') as f:\n tweets = [json.loads(t) for t in f.readlines()]\n\nwith open('./emojis.txt', 'r') as f:\n emojis = f.read().split(',')\n\ndef extract_emojis(text):\n ''' Extracts emojis from text. '''\n return [e for e in emojis if e in text]\n\ndataset = []\nfor i, t in enumerate(tweets):\n text = t['extended_tweet']['full_text'] if t['truncated'] else t['text']\n\n # Remove line breaks\n text = text.replace('\\n', ' ').replace('\\r', ' ')\n\n # Remove links\n text = re.sub(r'http\\S+', '', text)\n\n # Extract emojis\n emos = extract_emojis(text)\n\n # Remove non alphanumerics but leave spaces\n text = re.sub(r'([^\\s\\w]|_)+', '', text)\n print(text, emos, len(emos))\n print('---------------------------------------------')\n\n if emos:\n dataset.append({\n 'text': text,\n 'emojis': ','.join(emos)\n })\n\n # print(text)\n if i > 20:\n break\n\nwith open('tweet_emoji_dataset.txt', 'w+', encoding='utf8') as f:\n for d in dataset:\n j = json.dumps(d, ensure_ascii=False)\n f.write(j + '\\n')\n",
"import re\nimport pandas as pd\nimport json\nfrom pprint import pprint\nwith open('./tweet_json.backup.txt', 'r') as f:\n tweets = [json.loads(t) for t in f.readlines()]\nwith open('./emojis.txt', 'r') as f:\n emojis = f.read().split(',')\n\n\ndef extract_emojis(text):\n \"\"\" Extracts emojis from text. \"\"\"\n return [e for e in emojis if e in text]\n\n\ndataset = []\nfor i, t in enumerate(tweets):\n text = t['extended_tweet']['full_text'] if t['truncated'] else t['text']\n text = text.replace('\\n', ' ').replace('\\r', ' ')\n text = re.sub('http\\\\S+', '', text)\n emos = extract_emojis(text)\n text = re.sub('([^\\\\s\\\\w]|_)+', '', text)\n print(text, emos, len(emos))\n print('---------------------------------------------')\n if emos:\n dataset.append({'text': text, 'emojis': ','.join(emos)})\n if i > 20:\n break\nwith open('tweet_emoji_dataset.txt', 'w+', encoding='utf8') as f:\n for d in dataset:\n j = json.dumps(d, ensure_ascii=False)\n f.write(j + '\\n')\n",
"<import token>\nwith open('./tweet_json.backup.txt', 'r') as f:\n tweets = [json.loads(t) for t in f.readlines()]\nwith open('./emojis.txt', 'r') as f:\n emojis = f.read().split(',')\n\n\ndef extract_emojis(text):\n \"\"\" Extracts emojis from text. \"\"\"\n return [e for e in emojis if e in text]\n\n\ndataset = []\nfor i, t in enumerate(tweets):\n text = t['extended_tweet']['full_text'] if t['truncated'] else t['text']\n text = text.replace('\\n', ' ').replace('\\r', ' ')\n text = re.sub('http\\\\S+', '', text)\n emos = extract_emojis(text)\n text = re.sub('([^\\\\s\\\\w]|_)+', '', text)\n print(text, emos, len(emos))\n print('---------------------------------------------')\n if emos:\n dataset.append({'text': text, 'emojis': ','.join(emos)})\n if i > 20:\n break\nwith open('tweet_emoji_dataset.txt', 'w+', encoding='utf8') as f:\n for d in dataset:\n j = json.dumps(d, ensure_ascii=False)\n f.write(j + '\\n')\n",
"<import token>\nwith open('./tweet_json.backup.txt', 'r') as f:\n tweets = [json.loads(t) for t in f.readlines()]\nwith open('./emojis.txt', 'r') as f:\n emojis = f.read().split(',')\n\n\ndef extract_emojis(text):\n \"\"\" Extracts emojis from text. \"\"\"\n return [e for e in emojis if e in text]\n\n\n<assignment token>\nfor i, t in enumerate(tweets):\n text = t['extended_tweet']['full_text'] if t['truncated'] else t['text']\n text = text.replace('\\n', ' ').replace('\\r', ' ')\n text = re.sub('http\\\\S+', '', text)\n emos = extract_emojis(text)\n text = re.sub('([^\\\\s\\\\w]|_)+', '', text)\n print(text, emos, len(emos))\n print('---------------------------------------------')\n if emos:\n dataset.append({'text': text, 'emojis': ','.join(emos)})\n if i > 20:\n break\nwith open('tweet_emoji_dataset.txt', 'w+', encoding='utf8') as f:\n for d in dataset:\n j = json.dumps(d, ensure_ascii=False)\n f.write(j + '\\n')\n",
"<import token>\n<code token>\n\n\ndef extract_emojis(text):\n \"\"\" Extracts emojis from text. \"\"\"\n return [e for e in emojis if e in text]\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
98,806 |
a6dc34eddd9ad46dc36f9cf644c27fc248383724
|
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, IntegerType, StringType, StructField
schema = StructType(
[
StructField("id", IntegerType(), False),
StructField("first_name", StringType(), True),
StructField("last_name", StringType(), False)
]
)
def main(args):
mode = args[0]
spark = SparkSession.builder.appName("Test_parquet").master("local").getOrCreate()
df = spark.read.parquet(r"C:\Users\Saurabh Singh\Downloads\userdata1.parquet") \
.select("id", "first_name", "last_name").rdd
if len(mode) == 1:
print("Mode B")
df2 = spark.read.parquet(r"C:\Users\Saurabh Singh\Downloads\userdata1.parquet")
else:
print("Mode A")
df2 = spark.createDataFrame(df, schema=schema)
df2.printSchema()
df2.show()
spark.stop()
if __name__ == '__main__':
x=input("Please Enter:")
main('')
|
[
"from pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType, IntegerType, StringType, StructField\n\nschema = StructType(\n [\n StructField(\"id\", IntegerType(), False),\n StructField(\"first_name\", StringType(), True),\n StructField(\"last_name\", StringType(), False)\n ]\n)\n\n\ndef main(args):\n mode = args[0]\n spark = SparkSession.builder.appName(\"Test_parquet\").master(\"local\").getOrCreate()\n df = spark.read.parquet(r\"C:\\Users\\Saurabh Singh\\Downloads\\userdata1.parquet\") \\\n .select(\"id\", \"first_name\", \"last_name\").rdd\n\n if len(mode) == 1:\n print(\"Mode B\")\n df2 = spark.read.parquet(r\"C:\\Users\\Saurabh Singh\\Downloads\\userdata1.parquet\")\n else:\n print(\"Mode A\")\n df2 = spark.createDataFrame(df, schema=schema)\n\n df2.printSchema()\n df2.show()\n spark.stop()\n\n\nif __name__ == '__main__':\n x=input(\"Please Enter:\")\n main('')\n",
"from pyspark.sql import SparkSession\nfrom pyspark.sql.types import StructType, IntegerType, StringType, StructField\nschema = StructType([StructField('id', IntegerType(), False), StructField(\n 'first_name', StringType(), True), StructField('last_name', StringType(\n ), False)])\n\n\ndef main(args):\n mode = args[0]\n spark = SparkSession.builder.appName('Test_parquet').master('local'\n ).getOrCreate()\n df = spark.read.parquet(\n 'C:\\\\Users\\\\Saurabh Singh\\\\Downloads\\\\userdata1.parquet').select('id',\n 'first_name', 'last_name').rdd\n if len(mode) == 1:\n print('Mode B')\n df2 = spark.read.parquet(\n 'C:\\\\Users\\\\Saurabh Singh\\\\Downloads\\\\userdata1.parquet')\n else:\n print('Mode A')\n df2 = spark.createDataFrame(df, schema=schema)\n df2.printSchema()\n df2.show()\n spark.stop()\n\n\nif __name__ == '__main__':\n x = input('Please Enter:')\n main('')\n",
"<import token>\nschema = StructType([StructField('id', IntegerType(), False), StructField(\n 'first_name', StringType(), True), StructField('last_name', StringType(\n ), False)])\n\n\ndef main(args):\n mode = args[0]\n spark = SparkSession.builder.appName('Test_parquet').master('local'\n ).getOrCreate()\n df = spark.read.parquet(\n 'C:\\\\Users\\\\Saurabh Singh\\\\Downloads\\\\userdata1.parquet').select('id',\n 'first_name', 'last_name').rdd\n if len(mode) == 1:\n print('Mode B')\n df2 = spark.read.parquet(\n 'C:\\\\Users\\\\Saurabh Singh\\\\Downloads\\\\userdata1.parquet')\n else:\n print('Mode A')\n df2 = spark.createDataFrame(df, schema=schema)\n df2.printSchema()\n df2.show()\n spark.stop()\n\n\nif __name__ == '__main__':\n x = input('Please Enter:')\n main('')\n",
"<import token>\n<assignment token>\n\n\ndef main(args):\n mode = args[0]\n spark = SparkSession.builder.appName('Test_parquet').master('local'\n ).getOrCreate()\n df = spark.read.parquet(\n 'C:\\\\Users\\\\Saurabh Singh\\\\Downloads\\\\userdata1.parquet').select('id',\n 'first_name', 'last_name').rdd\n if len(mode) == 1:\n print('Mode B')\n df2 = spark.read.parquet(\n 'C:\\\\Users\\\\Saurabh Singh\\\\Downloads\\\\userdata1.parquet')\n else:\n print('Mode A')\n df2 = spark.createDataFrame(df, schema=schema)\n df2.printSchema()\n df2.show()\n spark.stop()\n\n\nif __name__ == '__main__':\n x = input('Please Enter:')\n main('')\n",
"<import token>\n<assignment token>\n\n\ndef main(args):\n mode = args[0]\n spark = SparkSession.builder.appName('Test_parquet').master('local'\n ).getOrCreate()\n df = spark.read.parquet(\n 'C:\\\\Users\\\\Saurabh Singh\\\\Downloads\\\\userdata1.parquet').select('id',\n 'first_name', 'last_name').rdd\n if len(mode) == 1:\n print('Mode B')\n df2 = spark.read.parquet(\n 'C:\\\\Users\\\\Saurabh Singh\\\\Downloads\\\\userdata1.parquet')\n else:\n print('Mode A')\n df2 = spark.createDataFrame(df, schema=schema)\n df2.printSchema()\n df2.show()\n spark.stop()\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<code token>\n"
] | false |
98,807 |
52cad58f2cfaad31208cea7f18205c79f2cf22e6
|
import operator
def comparator(a, b):
if a[1] > b[1]:
return -1;
elif a[1] == b[1]:
if a[0] > b[0]:
return 1
else:
return -1
else:
return 1
if __name__ == '__main__':
# Read the input and split it out.
input = open('input.txt', 'r')
rooms = [x.strip() for x in input.readlines()]
infos = [[x, x[: -11], x[-10:-7], x[-6:-1]] for x in rooms]
# info[0] = original
# info[1] = encrypted
# info[2] = sector
# info[3] = checksum
sector_count = 0
for info in infos:
print info
# Establish the count of each letter.
counts = {}
for x in info[1]:
if x == '-':
continue
counts[x] = info[1].count(x)
print counts
# Sort first by the count. We then need to sort
# alphabetically to break ties. Use our custom
# comparator to achieve this.
sorted_counts = sorted(counts.items(), cmp=comparator)
print sorted_counts
# Now look into the checksum and check to see that
# the characters appear in order. Assume the room
# is legit until proven otherwise.
valid = True
for x in info[3]:
tuple = sorted_counts.pop(0)
if tuple[0] != x:
print "Wanted " + x + " but found " + tuple[0]
valid = False
break;
# Now we know if the room is valid or not.
print "Valid: " + str(valid)
if valid:
sector_count += int(info[2])
print "Sector sum: " + str(sector_count)
|
[
"import operator\n\ndef comparator(a, b):\n if a[1] > b[1]:\n return -1;\n elif a[1] == b[1]:\n if a[0] > b[0]:\n return 1\n else:\n return -1\n else:\n return 1\n\nif __name__ == '__main__':\n # Read the input and split it out.\n input = open('input.txt', 'r')\n rooms = [x.strip() for x in input.readlines()]\n infos = [[x, x[: -11], x[-10:-7], x[-6:-1]] for x in rooms]\n\n # info[0] = original\n # info[1] = encrypted\n # info[2] = sector\n # info[3] = checksum\n\n sector_count = 0\n for info in infos:\n print info\n # Establish the count of each letter.\n counts = {}\n for x in info[1]:\n if x == '-':\n continue\n counts[x] = info[1].count(x)\n print counts\n\n # Sort first by the count. We then need to sort\n # alphabetically to break ties. Use our custom\n # comparator to achieve this.\n sorted_counts = sorted(counts.items(), cmp=comparator)\n print sorted_counts\n\n # Now look into the checksum and check to see that\n # the characters appear in order. Assume the room\n # is legit until proven otherwise.\n valid = True\n for x in info[3]:\n tuple = sorted_counts.pop(0)\n if tuple[0] != x:\n print \"Wanted \" + x + \" but found \" + tuple[0]\n valid = False\n break;\n\n # Now we know if the room is valid or not.\n print \"Valid: \" + str(valid)\n if valid:\n sector_count += int(info[2])\n print \"Sector sum: \" + str(sector_count)\n\n"
] | true |
98,808 |
02de12373fd346ae9c8fe97f137e7b5dfb85d3ca
|
import pickle
import chess.pgn
import sys
f = open("lichess_db_standard_rated_2021-06.pgn")
out = open("headers.pckl","wb")
#Grab just 5min games
count = 0
flip = 0
while f:
g = chess.pgn.read_headers(f)
if not g : break
#skip unrated
try:
g["WhiteRatingDiff"]
except:
continue
if g["TimeControl"] == '300+0':
count += 1
if count % 2000 :
if flip:
print('.', end='')
else:
print(":",end='')
sys.stdout.flush()
pickle.dump(g, out)
print("games found : ", count)
|
[
"import pickle\r\nimport chess.pgn\r\nimport sys\r\n\r\n\r\nf = open(\"lichess_db_standard_rated_2021-06.pgn\")\r\nout = open(\"headers.pckl\",\"wb\")\r\n#Grab just 5min games\r\ncount = 0\r\nflip = 0\r\nwhile f:\r\n g = chess.pgn.read_headers(f)\r\n if not g : break\r\n #skip unrated\r\n try:\r\n g[\"WhiteRatingDiff\"]\r\n except:\r\n continue \r\n if g[\"TimeControl\"] == '300+0':\r\n count += 1\r\n if count % 2000 :\r\n if flip:\r\n print('.', end='')\r\n else:\r\n print(\":\",end='')\r\n sys.stdout.flush()\r\n pickle.dump(g, out)\r\nprint(\"games found : \", count)",
"import pickle\nimport chess.pgn\nimport sys\nf = open('lichess_db_standard_rated_2021-06.pgn')\nout = open('headers.pckl', 'wb')\ncount = 0\nflip = 0\nwhile f:\n g = chess.pgn.read_headers(f)\n if not g:\n break\n try:\n g['WhiteRatingDiff']\n except:\n continue\n if g['TimeControl'] == '300+0':\n count += 1\n if count % 2000:\n if flip:\n print('.', end='')\n else:\n print(':', end='')\n sys.stdout.flush()\n pickle.dump(g, out)\nprint('games found : ', count)\n",
"<import token>\nf = open('lichess_db_standard_rated_2021-06.pgn')\nout = open('headers.pckl', 'wb')\ncount = 0\nflip = 0\nwhile f:\n g = chess.pgn.read_headers(f)\n if not g:\n break\n try:\n g['WhiteRatingDiff']\n except:\n continue\n if g['TimeControl'] == '300+0':\n count += 1\n if count % 2000:\n if flip:\n print('.', end='')\n else:\n print(':', end='')\n sys.stdout.flush()\n pickle.dump(g, out)\nprint('games found : ', count)\n",
"<import token>\n<assignment token>\nwhile f:\n g = chess.pgn.read_headers(f)\n if not g:\n break\n try:\n g['WhiteRatingDiff']\n except:\n continue\n if g['TimeControl'] == '300+0':\n count += 1\n if count % 2000:\n if flip:\n print('.', end='')\n else:\n print(':', end='')\n sys.stdout.flush()\n pickle.dump(g, out)\nprint('games found : ', count)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,809 |
4f5f52d15c98066ac504ac3fd2697aee58089c12
|
# -*- coding: utf-8 -*-
import chainer
import chainer.links as L
import chainer.functions as F
from chainer import Chain
class LSTMNet(Chain):
def __init__(self, n_unit, n_out):
super(LSTMNet, self).__init__()
with self.init_scope():
self.fc1 = L.Linear(None, n_unit)
self.lstm = L.LSTM(None, n_unit)
self.fc2 = L.Linear(None, n_out)
def reset_state(self):
self.lstm.reset_state()
def __call__(self, x):
h = self.fc1(x)
h = self.lstm(h)
return self.fc2(h)
class NStepLSTMNet(Chain):
def __init__(self, n_layer, n_unit, n_out):
super(NStepLSTMNet, self).__init__()
with self.init_scope():
self.fc1 = L.Linear(None, n_unit)
self.lstm = L.NStepLSTM(n_layers=n_layer, in_size=n_unit, out_size=n_unit, dropout=0.)
self.fc2 = L.Linear(None, n_out)
self.n_layer = n_layer
self.n_unit = n_unit
def __call__(self, x):
xp = chainer.cuda.get_array_module(x[0].data)
cx = F.concat(x, axis=0)
cx = cx.reshape(-1, 1)
ex = self.fc1(cx)
x_len = [len(x_) for x_ in x]
x_section = xp.cumsum(x_len[:-1])
exs = F.split_axis(ex, x_section, 0, force_tuple=True)
_, _, h = self.lstm(None, None, exs)
ch = F.concat(h, axis=0)
ch = ch.reshape(-1, self.n_unit)
eh = self.fc2(ch)
eh = eh.reshape(-1, )
h_len = [len(h_) for h_ in h]
h_section = xp.cumsum(h_len[:-1])
ehs = F.split_axis(eh, h_section, 0, force_tuple=True)
return ehs
|
[
"# -*- coding: utf-8 -*-\nimport chainer\nimport chainer.links as L\nimport chainer.functions as F\nfrom chainer import Chain\n\n\nclass LSTMNet(Chain):\n\n def __init__(self, n_unit, n_out):\n super(LSTMNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, n_unit)\n self.lstm = L.LSTM(None, n_unit)\n self.fc2 = L.Linear(None, n_out)\n\n def reset_state(self):\n self.lstm.reset_state()\n\n def __call__(self, x):\n h = self.fc1(x)\n h = self.lstm(h)\n return self.fc2(h)\n \n\nclass NStepLSTMNet(Chain):\n\n def __init__(self, n_layer, n_unit, n_out):\n super(NStepLSTMNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, n_unit)\n self.lstm = L.NStepLSTM(n_layers=n_layer, in_size=n_unit, out_size=n_unit, dropout=0.)\n self.fc2 = L.Linear(None, n_out)\n\n self.n_layer = n_layer\n self.n_unit = n_unit\n\n def __call__(self, x):\n xp = chainer.cuda.get_array_module(x[0].data)\n\n cx = F.concat(x, axis=0)\n cx = cx.reshape(-1, 1)\n\n ex = self.fc1(cx)\n\n x_len = [len(x_) for x_ in x]\n x_section = xp.cumsum(x_len[:-1])\n exs = F.split_axis(ex, x_section, 0, force_tuple=True)\n\n _, _, h = self.lstm(None, None, exs)\n\n ch = F.concat(h, axis=0)\n ch = ch.reshape(-1, self.n_unit)\n\n eh = self.fc2(ch)\n eh = eh.reshape(-1, )\n\n h_len = [len(h_) for h_ in h]\n h_section = xp.cumsum(h_len[:-1])\n ehs = F.split_axis(eh, h_section, 0, force_tuple=True)\n\n return ehs\n\n",
"import chainer\nimport chainer.links as L\nimport chainer.functions as F\nfrom chainer import Chain\n\n\nclass LSTMNet(Chain):\n\n def __init__(self, n_unit, n_out):\n super(LSTMNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, n_unit)\n self.lstm = L.LSTM(None, n_unit)\n self.fc2 = L.Linear(None, n_out)\n\n def reset_state(self):\n self.lstm.reset_state()\n\n def __call__(self, x):\n h = self.fc1(x)\n h = self.lstm(h)\n return self.fc2(h)\n\n\nclass NStepLSTMNet(Chain):\n\n def __init__(self, n_layer, n_unit, n_out):\n super(NStepLSTMNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, n_unit)\n self.lstm = L.NStepLSTM(n_layers=n_layer, in_size=n_unit,\n out_size=n_unit, dropout=0.0)\n self.fc2 = L.Linear(None, n_out)\n self.n_layer = n_layer\n self.n_unit = n_unit\n\n def __call__(self, x):\n xp = chainer.cuda.get_array_module(x[0].data)\n cx = F.concat(x, axis=0)\n cx = cx.reshape(-1, 1)\n ex = self.fc1(cx)\n x_len = [len(x_) for x_ in x]\n x_section = xp.cumsum(x_len[:-1])\n exs = F.split_axis(ex, x_section, 0, force_tuple=True)\n _, _, h = self.lstm(None, None, exs)\n ch = F.concat(h, axis=0)\n ch = ch.reshape(-1, self.n_unit)\n eh = self.fc2(ch)\n eh = eh.reshape(-1)\n h_len = [len(h_) for h_ in h]\n h_section = xp.cumsum(h_len[:-1])\n ehs = F.split_axis(eh, h_section, 0, force_tuple=True)\n return ehs\n",
"<import token>\n\n\nclass LSTMNet(Chain):\n\n def __init__(self, n_unit, n_out):\n super(LSTMNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, n_unit)\n self.lstm = L.LSTM(None, n_unit)\n self.fc2 = L.Linear(None, n_out)\n\n def reset_state(self):\n self.lstm.reset_state()\n\n def __call__(self, x):\n h = self.fc1(x)\n h = self.lstm(h)\n return self.fc2(h)\n\n\nclass NStepLSTMNet(Chain):\n\n def __init__(self, n_layer, n_unit, n_out):\n super(NStepLSTMNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, n_unit)\n self.lstm = L.NStepLSTM(n_layers=n_layer, in_size=n_unit,\n out_size=n_unit, dropout=0.0)\n self.fc2 = L.Linear(None, n_out)\n self.n_layer = n_layer\n self.n_unit = n_unit\n\n def __call__(self, x):\n xp = chainer.cuda.get_array_module(x[0].data)\n cx = F.concat(x, axis=0)\n cx = cx.reshape(-1, 1)\n ex = self.fc1(cx)\n x_len = [len(x_) for x_ in x]\n x_section = xp.cumsum(x_len[:-1])\n exs = F.split_axis(ex, x_section, 0, force_tuple=True)\n _, _, h = self.lstm(None, None, exs)\n ch = F.concat(h, axis=0)\n ch = ch.reshape(-1, self.n_unit)\n eh = self.fc2(ch)\n eh = eh.reshape(-1)\n h_len = [len(h_) for h_ in h]\n h_section = xp.cumsum(h_len[:-1])\n ehs = F.split_axis(eh, h_section, 0, force_tuple=True)\n return ehs\n",
"<import token>\n\n\nclass LSTMNet(Chain):\n\n def __init__(self, n_unit, n_out):\n super(LSTMNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, n_unit)\n self.lstm = L.LSTM(None, n_unit)\n self.fc2 = L.Linear(None, n_out)\n\n def reset_state(self):\n self.lstm.reset_state()\n <function token>\n\n\nclass NStepLSTMNet(Chain):\n\n def __init__(self, n_layer, n_unit, n_out):\n super(NStepLSTMNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, n_unit)\n self.lstm = L.NStepLSTM(n_layers=n_layer, in_size=n_unit,\n out_size=n_unit, dropout=0.0)\n self.fc2 = L.Linear(None, n_out)\n self.n_layer = n_layer\n self.n_unit = n_unit\n\n def __call__(self, x):\n xp = chainer.cuda.get_array_module(x[0].data)\n cx = F.concat(x, axis=0)\n cx = cx.reshape(-1, 1)\n ex = self.fc1(cx)\n x_len = [len(x_) for x_ in x]\n x_section = xp.cumsum(x_len[:-1])\n exs = F.split_axis(ex, x_section, 0, force_tuple=True)\n _, _, h = self.lstm(None, None, exs)\n ch = F.concat(h, axis=0)\n ch = ch.reshape(-1, self.n_unit)\n eh = self.fc2(ch)\n eh = eh.reshape(-1)\n h_len = [len(h_) for h_ in h]\n h_section = xp.cumsum(h_len[:-1])\n ehs = F.split_axis(eh, h_section, 0, force_tuple=True)\n return ehs\n",
"<import token>\n\n\nclass LSTMNet(Chain):\n\n def __init__(self, n_unit, n_out):\n super(LSTMNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, n_unit)\n self.lstm = L.LSTM(None, n_unit)\n self.fc2 = L.Linear(None, n_out)\n <function token>\n <function token>\n\n\nclass NStepLSTMNet(Chain):\n\n def __init__(self, n_layer, n_unit, n_out):\n super(NStepLSTMNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, n_unit)\n self.lstm = L.NStepLSTM(n_layers=n_layer, in_size=n_unit,\n out_size=n_unit, dropout=0.0)\n self.fc2 = L.Linear(None, n_out)\n self.n_layer = n_layer\n self.n_unit = n_unit\n\n def __call__(self, x):\n xp = chainer.cuda.get_array_module(x[0].data)\n cx = F.concat(x, axis=0)\n cx = cx.reshape(-1, 1)\n ex = self.fc1(cx)\n x_len = [len(x_) for x_ in x]\n x_section = xp.cumsum(x_len[:-1])\n exs = F.split_axis(ex, x_section, 0, force_tuple=True)\n _, _, h = self.lstm(None, None, exs)\n ch = F.concat(h, axis=0)\n ch = ch.reshape(-1, self.n_unit)\n eh = self.fc2(ch)\n eh = eh.reshape(-1)\n h_len = [len(h_) for h_ in h]\n h_section = xp.cumsum(h_len[:-1])\n ehs = F.split_axis(eh, h_section, 0, force_tuple=True)\n return ehs\n",
"<import token>\n\n\nclass LSTMNet(Chain):\n <function token>\n <function token>\n <function token>\n\n\nclass NStepLSTMNet(Chain):\n\n def __init__(self, n_layer, n_unit, n_out):\n super(NStepLSTMNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, n_unit)\n self.lstm = L.NStepLSTM(n_layers=n_layer, in_size=n_unit,\n out_size=n_unit, dropout=0.0)\n self.fc2 = L.Linear(None, n_out)\n self.n_layer = n_layer\n self.n_unit = n_unit\n\n def __call__(self, x):\n xp = chainer.cuda.get_array_module(x[0].data)\n cx = F.concat(x, axis=0)\n cx = cx.reshape(-1, 1)\n ex = self.fc1(cx)\n x_len = [len(x_) for x_ in x]\n x_section = xp.cumsum(x_len[:-1])\n exs = F.split_axis(ex, x_section, 0, force_tuple=True)\n _, _, h = self.lstm(None, None, exs)\n ch = F.concat(h, axis=0)\n ch = ch.reshape(-1, self.n_unit)\n eh = self.fc2(ch)\n eh = eh.reshape(-1)\n h_len = [len(h_) for h_ in h]\n h_section = xp.cumsum(h_len[:-1])\n ehs = F.split_axis(eh, h_section, 0, force_tuple=True)\n return ehs\n",
"<import token>\n<class token>\n\n\nclass NStepLSTMNet(Chain):\n\n def __init__(self, n_layer, n_unit, n_out):\n super(NStepLSTMNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, n_unit)\n self.lstm = L.NStepLSTM(n_layers=n_layer, in_size=n_unit,\n out_size=n_unit, dropout=0.0)\n self.fc2 = L.Linear(None, n_out)\n self.n_layer = n_layer\n self.n_unit = n_unit\n\n def __call__(self, x):\n xp = chainer.cuda.get_array_module(x[0].data)\n cx = F.concat(x, axis=0)\n cx = cx.reshape(-1, 1)\n ex = self.fc1(cx)\n x_len = [len(x_) for x_ in x]\n x_section = xp.cumsum(x_len[:-1])\n exs = F.split_axis(ex, x_section, 0, force_tuple=True)\n _, _, h = self.lstm(None, None, exs)\n ch = F.concat(h, axis=0)\n ch = ch.reshape(-1, self.n_unit)\n eh = self.fc2(ch)\n eh = eh.reshape(-1)\n h_len = [len(h_) for h_ in h]\n h_section = xp.cumsum(h_len[:-1])\n ehs = F.split_axis(eh, h_section, 0, force_tuple=True)\n return ehs\n",
"<import token>\n<class token>\n\n\nclass NStepLSTMNet(Chain):\n\n def __init__(self, n_layer, n_unit, n_out):\n super(NStepLSTMNet, self).__init__()\n with self.init_scope():\n self.fc1 = L.Linear(None, n_unit)\n self.lstm = L.NStepLSTM(n_layers=n_layer, in_size=n_unit,\n out_size=n_unit, dropout=0.0)\n self.fc2 = L.Linear(None, n_out)\n self.n_layer = n_layer\n self.n_unit = n_unit\n <function token>\n",
"<import token>\n<class token>\n\n\nclass NStepLSTMNet(Chain):\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
98,810 |
2bf1a2e57ddaaccb1971794f8145418e5c1d5c7c
|
class Solution(object):
def init(self):
self.result = []
self.dcheck = set()
def threeSum(self,nums,target,init):
nl = len(nums)
# print nums
for j in range(nl):
if init + nums[j] * 3 > target:
return
l,r = j+1,nl-1
while l < r:
# print init,nums[j],nums[l],nums[r]
sum4 = nums[j] + nums[l] + nums[r] + init
if sum4 == target:
t = tuple([init,nums[j],nums[l],nums[r]])
if t not in self.dcheck:
self.result.append([init,nums[j],nums[l],nums[r]])
self.dcheck.add(t)
l += 1
elif sum4 < target:
l += 1
elif sum4 > target:
r -= 1
def fourSum(self, nums,target):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
self.init()
nums = sorted(nums)
l = len(nums)
if 0 < l < 4:
return self.result
for i in range(l):
if nums[i] * 4 > target:
break
self.threeSum(nums[i+1:],target,nums[i])
return self.result
if __name__ == '__main__':
s = Solution()
print s.fourSum([-1,0,1,2,-1,-4],-1)
# print s.fourSum([-3,-2,-1,0,0,1,2,3],0)
|
[
"class Solution(object):\n \n def init(self):\n self.result = []\n self.dcheck = set()\n \n def threeSum(self,nums,target,init):\n nl = len(nums)\n# print nums\n for j in range(nl):\n if init + nums[j] * 3 > target:\n return \n l,r = j+1,nl-1\n while l < r: \n# print init,nums[j],nums[l],nums[r]\n sum4 = nums[j] + nums[l] + nums[r] + init\n if sum4 == target:\n t = tuple([init,nums[j],nums[l],nums[r]]) \n if t not in self.dcheck:\n self.result.append([init,nums[j],nums[l],nums[r]])\n self.dcheck.add(t)\n l += 1\n elif sum4 < target:\n l += 1\n elif sum4 > target:\n r -= 1\n \n def fourSum(self, nums,target):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n self.init()\n nums = sorted(nums)\n l = len(nums)\n if 0 < l < 4:\n return self.result\n for i in range(l):\n if nums[i] * 4 > target:\n break\n self.threeSum(nums[i+1:],target,nums[i])\n return self.result\n \nif __name__ == '__main__':\n s = Solution()\n print s.fourSum([-1,0,1,2,-1,-4],-1)\n# print s.fourSum([-3,-2,-1,0,0,1,2,3],0)\n "
] | true |
98,811 |
1f3e1f94823386408b0e224e637e121c9a0dc58f
|
#!/usr/bin/env python
## asegura que se ejecute como codigo de python
## Nodo G, recibe de D y envia a H
#Los nodos E,F,G funcionan de manera parecida en la recepcion de los datos
#se importan las librerias
import rospy
from std_msgs.msg import String
from std_msgs.msg import Float32
from std_msgs.msg import Char
#incializacion del publisher
pub = rospy.Publisher('CharG', String, queue_size=1)
rta="";
#callback toma el dato string y lo separa para hallar el valor bajo medio y alto enviado mediante split()
def callback(data):
global rta;
dato= data.data
partes= dato.split('/')
bajo=float((partes[0]))
medio=float((partes[1]))
alto=float((partes[2]))
#opciones para respuesta baja
if bajo>alto and bajo> medio:
rta='b'
#opciones para alto'
if medio< alto and bajo<alto :
rta= 'a'
#opciones para medio
if (medio>alto and medio> bajo) or (bajo==medio and medio>alto) or (alto==medio and medio>bajo) :
rta= 'm'
#funcion talker, inicializa el nodo, el subscriber y publica a rta
def talker():
global rta;
rospy.init_node('NodoG', anonymous=True)
rospy.Subscriber('FuzzyD', String, callback)
while not rospy.is_shutdown():
rate = rospy.Rate(0.5) #0.5z
rospy.loginfo(rta)
pub.publish(rta)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
|
[
"#!/usr/bin/env python\n## asegura que se ejecute como codigo de python\n\n## Nodo G, recibe de D y envia a H\n#Los nodos E,F,G funcionan de manera parecida en la recepcion de los datos\n\n#se importan las librerias\nimport rospy\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Float32\nfrom std_msgs.msg import Char\n\n#incializacion del publisher\npub = rospy.Publisher('CharG', String, queue_size=1) \nrta=\"\";\n\n#callback toma el dato string y lo separa para hallar el valor bajo medio y alto enviado mediante split()\ndef callback(data):\n\n global rta;\n dato= data.data\n partes= dato.split('/')\n bajo=float((partes[0]))\n\n medio=float((partes[1]))\n\n alto=float((partes[2]))\n\n\n\n#opciones para respuesta baja\n if bajo>alto and bajo> medio:\n\trta='b'\n#opciones para alto'\n if medio< alto and bajo<alto :\n\trta= 'a'\n#opciones para medio\n if (medio>alto and medio> bajo) or (bajo==medio and medio>alto) or (alto==medio and medio>bajo) :\n rta= 'm'\n\n \n\n#funcion talker, inicializa el nodo, el subscriber y publica a rta\ndef talker():\n global rta;\n rospy.init_node('NodoG', anonymous=True) \n rospy.Subscriber('FuzzyD', String, callback) \n while not rospy.is_shutdown(): \n\trate = rospy.Rate(0.5) #0.5z\n\trospy.loginfo(rta)\n\tpub.publish(rta)\n\trate.sleep() \n\n\n\n\n \nif __name__ == '__main__':\n try:\n talker()\n except rospy.ROSInterruptException:\n pass\n \n"
] | true |
98,812 |
e0f71ab448bc2d15ab4811cf987e4e63a75e7cd2
|
"""
"""
class Solution(object):
def findNthDigit(self, n):
"""
:type n: int
:rtype: int
"""
digitType = 1
digitNum= 9
while n > digitType * digitNum:
n = n - digitType * digitNum
digitType += 1
digitNum *= 10
n -= 1
realNum = (n/digitType) + 10 ** (digitType - 1)
return int(str(realNum)[n % digitType])
if __name__ == '__main__':
sol = Solution()
assert sol.findNthDigit(13) == 1
assert sol.findNthDigit(3) == 3
|
[
"\"\"\"\n\n\"\"\"\n\nclass Solution(object):\n def findNthDigit(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n digitType = 1\n digitNum= 9\n while n > digitType * digitNum:\n n = n - digitType * digitNum\n digitType += 1\n digitNum *= 10\n n -= 1\n realNum = (n/digitType) + 10 ** (digitType - 1)\n return int(str(realNum)[n % digitType])\n\nif __name__ == '__main__':\n sol = Solution()\n assert sol.findNthDigit(13) == 1\n assert sol.findNthDigit(3) == 3",
"<docstring token>\n\n\nclass Solution(object):\n\n def findNthDigit(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n digitType = 1\n digitNum = 9\n while n > digitType * digitNum:\n n = n - digitType * digitNum\n digitType += 1\n digitNum *= 10\n n -= 1\n realNum = n / digitType + 10 ** (digitType - 1)\n return int(str(realNum)[n % digitType])\n\n\nif __name__ == '__main__':\n sol = Solution()\n assert sol.findNthDigit(13) == 1\n assert sol.findNthDigit(3) == 3\n",
"<docstring token>\n\n\nclass Solution(object):\n\n def findNthDigit(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n digitType = 1\n digitNum = 9\n while n > digitType * digitNum:\n n = n - digitType * digitNum\n digitType += 1\n digitNum *= 10\n n -= 1\n realNum = n / digitType + 10 ** (digitType - 1)\n return int(str(realNum)[n % digitType])\n\n\n<code token>\n",
"<docstring token>\n\n\nclass Solution(object):\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<class token>\n<code token>\n"
] | false |
98,813 |
09a48c3b7ebb7c89dd23035b7c22aa4dd3109f89
|
from .pca import pca
from .phate import phate
from .tsne import tsne
from .umap import umap
|
[
"from .pca import pca\nfrom .phate import phate\nfrom .tsne import tsne\nfrom .umap import umap\n",
"<import token>\n"
] | false |
98,814 |
d19d4857f0526af65acb804be3b5fc3370b0bb71
|
import argparse
import torch
from torch.autograd import Variable
from torchvision.utils import save_image
import numpy as np
from model import *
import os
import torch.backends.cudnn as cudnn
import time
import utils
import dataset
import math
from ripser import ripser
from persim import plot_diagrams
from pylab import subplot
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='PyTorch Cycle Domain Adaptation Training')
parser.add_argument('--dataset', default='mnist', type=str, help='source dataset')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('--epoch', default=90, type=int, metavar='N', help='number of total epoch to run')
parser.add_argument('--decay-epoch', default=30, type=int, metavar='N', help='epoch from which to start lr decay')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--maxN', type=int, default=80, help='Maximum Buffer Size')
parser.add_argument('-b', '--batch-size', default=128, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=1e-4, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--latent-size', type=int, default=64, help='dimension of latent z')
parser.add_argument('--h', type=int, default=400, help='dimension of hidden layer')
parser.add_argument('--img-size', type=int, default=28, help='input image width, height size')
parser.add_argument('--dir', default='./', type=str, help='default save directory')
parser.add_argument('--gpu', default='0', type=str, help='Multi GPU ids to use.')
source_prediction_max_result = []
target_prediction_max_result = []
best_prec_result = torch.tensor(0, dtype=torch.float32)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
torch.manual_seed(args.seed)
cuda = True if torch.cuda.is_available() else False
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
criterion_BCE = torch.nn.BCELoss(reduction='sum')
criterion = torch.nn.CrossEntropyLoss()
def loss_function(x_hat, x, mu, log_var):
BCE = criterion_BCE(x_hat.view(x_hat.size(0), -1), x.view(x.size(0), -1))
KLD = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
return BCE + KLD, BCE.item(), KLD.item()
class Memory(object):
def __init__(self, args):
self.N = args.maxN # size of ALL Buffer
self.index = 0
self.z = torch.zeros([self.N, args.latent_size], device="cpu", dtype=torch.float32)
def Insert_memory(self, z): # Actual Function
if self.index >= self.N:
self.index = 0
self.z[self.index] = z.data
del(z)
self.index = self.index + 1
def calc_TDA(self, epoch, cls_num):
path = utils.make_directory(os.path.join(utils.default_model_dir, 'tda_total', str(cls_num)))
path2 = utils.make_directory(os.path.join(utils.default_model_dir, 'tda_sub', str(cls_num)))
dgms = ripser(self.z.data, maxdim=3)['dgms']
plot_diagrams(dgms)
plt.savefig('{}/{}_total.png'.format(path, epoch))
plt.clf()
if len(dgms[0]) is not 0:
plot_diagrams(dgms, plot_only=[0], ax=subplot(221))
if len(dgms[1]) is not 0:
plot_diagrams(dgms, plot_only=[1], ax=subplot(222))
if len(dgms[2]) is not 0:
plot_diagrams(dgms, plot_only=[2], ax=subplot(223))
if len(dgms[3]) is not 0:
plot_diagrams(dgms, plot_only=[3], ax=subplot(224))
plt.savefig('{}/{}_sub.png'.format(path2, epoch))
plt.clf()
class MemorySet(object):
def __init__(self, args):
self.clsN = 10
self.Set = []
for i in range(self.clsN):
self.Set.append(Memory(args=args))
def Batch_Insert(self, z, y):
for i in range(z.size(0)):
label = y[i]
data = z[i]
self.Set[label].Insert_memory(data)
def calc_TDAs(self, epoch):
for i in range(self.clsN):
self.Set[i].calc_TDA(epoch, i)
Memory = MemorySet(args=args)
def main():
global args, best_prec_result
start_epoch = 0
utils.default_model_dir = args.dir
start_time = time.time()
train_loader, test_loader, ch, wh = dataset_selector(args.dataset)
sample = extract_sample(train_loader)
state_info = utils.model_optim_state_info()
state_info.model_init(Img=[ch, wh], H=args.h, latent_size=args.latent_size, num_class=10)
state_info.model_cuda_init()
state_info.weight_init()
state_info.optimizer_init(args)
if cuda:
print("USE", torch.cuda.device_count(), "GPUs!")
cudnn.benchmark = True
state_info.learning_scheduler_init(args)
for epoch in range(start_epoch, args.epoch):
train(state_info, train_loader, epoch)
test(state_info, test_loader, sample, epoch)
state_info.learning_step()
now = time.gmtime(time.time() - start_time)
utils.print_log('{} hours {} mins {} secs for training'.format(now.tm_hour, now.tm_min, now.tm_sec))
def train(state_info, train_loader, epoch): # all
utils.print_log('Type, Epoch, Batch, loss, BCE, KLD')
state_info.set_train_mode()
correct = torch.tensor(0, dtype=torch.float32)
total = torch.tensor(0, dtype=torch.float32)
for it, (x, y) in enumerate(train_loader):
x, y = to_var(x, FloatTensor), to_var(y, LongTensor)
x_hat, mu, log_var, z = state_info.forward(x)
# Train
state_info.optim_VAE.zero_grad()
loss, BCE, KLD = loss_function(x_hat, x, mu, log_var)
loss.backward(retain_graph=True)
state_info.optim_VAE.step()
# mapping info of <y, cls_output> print
if it % 10 == 0:
utils.print_log('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'
.format(epoch, it, loss.item(), BCE, KLD))
print('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'
.format(epoch, it, loss.item(), BCE, KLD))
utils.print_log('')
def test(state_info, test_loader, sample, epoch):
global Memory
for it, (x, y) in enumerate(test_loader):
x, y = to_var(x, FloatTensor), to_var(y, LongTensor)
x_hat, mu, log_var, z = state_info.forward(x)
Memory.Batch_Insert(z, y)
Memory.calc_TDAs(epoch)
make_sample_image(state_info, sample, epoch)
utils.print_log('')
def make_sample_image(state_info, sample, epoch):
"""Saves a grid of generated digits ranging from 0 to n_classes"""
img_path = utils.make_directory(os.path.join(utils.default_model_dir, 'image'))
sample_hat, _, _, _ = state_info.forward(sample)
sample, sample_hat = to_data(sample), to_data(sample_hat)
image = merge_images(sample, sample_hat)
save_image(image.data, os.path.join(img_path, '%d.png' % epoch), normalize=True)
def merge_images(sources, targets, row=10):
_, _, h, w = sources.shape
merged = np.zeros([3, row*h, row*w*2])
for idx, (s, t) in enumerate(zip(sources, targets)):
i = idx // row
j = idx % row
if i is row:
break
merged[:, i*h:(i+1)*h, (j*2)*h:(j*2+1)*h] = s
merged[:, i*h:(i+1)*h, (j*2+1)*h:(j*2+2)*h] = t
return torch.from_numpy(merged)
def dataset_selector(data):
if data == 'mnist':
return dataset.MNIST_loader(img_size=args.img_size)
elif data == 'svhn':
return dataset.SVHN_loader(img_size=32)
elif data == "usps":
return dataset.usps_loader(img_size=args.img_size)
elif data == "mnistm":
return dataset.MNIST_M_loader(img_size=args.img_size)
def to_data(x):
"""Converts variable to numpy."""
if torch.cuda.is_available():
x = x.cpu()
return x.data.numpy()
def to_var(x, dtype):
return Variable(x.type(dtype))
def extract_sample(train_loader):
for step, (sample, _) in enumerate(train_loader):
sample = to_var(sample, FloatTensor)
break;
return sample
if __name__=='__main__':
main()
|
[
"import argparse\nimport torch\nfrom torch.autograd import Variable\nfrom torchvision.utils import save_image\nimport numpy as np\nfrom model import *\nimport os\nimport torch.backends.cudnn as cudnn\nimport time\nimport utils\nimport dataset\nimport math\n\nfrom ripser import ripser\nfrom persim import plot_diagrams\nfrom pylab import subplot\nimport matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser(description='PyTorch Cycle Domain Adaptation Training')\nparser.add_argument('--dataset', default='mnist', type=str, help='source dataset')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')\nparser.add_argument('--epoch', default=90, type=int, metavar='N', help='number of total epoch to run')\nparser.add_argument('--decay-epoch', default=30, type=int, metavar='N', help='epoch from which to start lr decay')\nparser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')\nparser.add_argument('--maxN', type=int, default=80, help='Maximum Buffer Size')\nparser.add_argument('-b', '--batch-size', default=128, type=int, metavar='N', help='mini-batch size (default: 256)')\nparser.add_argument('--lr', '--learning-rate', default=1e-4, type=float, metavar='LR', help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')\nparser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')\nparser.add_argument('--latent-size', type=int, default=64, help='dimension of latent z')\nparser.add_argument('--h', type=int, default=400, help='dimension of hidden layer')\nparser.add_argument('--img-size', type=int, default=28, help='input image width, height size')\n\nparser.add_argument('--dir', default='./', type=str, help='default save directory')\nparser.add_argument('--gpu', default='0', type=str, help='Multi GPU ids to use.')\n\nsource_prediction_max_result = []\ntarget_prediction_max_result = []\nbest_prec_result = torch.tensor(0, dtype=torch.float32)\n\nargs = parser.parse_args()\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\ntorch.manual_seed(args.seed)\n\ncuda = True if torch.cuda.is_available() else False\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor\n\ncriterion_BCE = torch.nn.BCELoss(reduction='sum')\ncriterion = torch.nn.CrossEntropyLoss()\n\ndef loss_function(x_hat, x, mu, log_var):\n BCE = criterion_BCE(x_hat.view(x_hat.size(0), -1), x.view(x.size(0), -1))\n KLD = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())\n return BCE + KLD, BCE.item(), KLD.item()\n\nclass Memory(object):\n def __init__(self, args):\n self.N = args.maxN # size of ALL Buffer\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device=\"cpu\", dtype=torch.float32)\n\n def Insert_memory(self, z): # Actual Function\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del(z)\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir, 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir, 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\nclass MemorySet(object):\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\nMemory = MemorySet(args=args)\n\ndef main():\n global args, best_prec_result\n \n start_epoch = 0\n utils.default_model_dir = args.dir\n start_time = time.time()\n\n train_loader, test_loader, ch, wh = dataset_selector(args.dataset)\n sample = extract_sample(train_loader)\n\n state_info = utils.model_optim_state_info()\n state_info.model_init(Img=[ch, wh], H=args.h, latent_size=args.latent_size, num_class=10)\n state_info.model_cuda_init()\n state_info.weight_init()\n state_info.optimizer_init(args)\n\n if cuda:\n print(\"USE\", torch.cuda.device_count(), \"GPUs!\")\n cudnn.benchmark = True\n\n state_info.learning_scheduler_init(args)\n\n for epoch in range(start_epoch, args.epoch):\n \n train(state_info, train_loader, epoch)\n test(state_info, test_loader, sample, epoch)\n\n state_info.learning_step() \n\n now = time.gmtime(time.time() - start_time)\n utils.print_log('{} hours {} mins {} secs for training'.format(now.tm_hour, now.tm_min, now.tm_sec))\n\ndef train(state_info, train_loader, epoch): # all \n\n utils.print_log('Type, Epoch, Batch, loss, BCE, KLD')\n state_info.set_train_mode()\n correct = torch.tensor(0, dtype=torch.float32)\n total = torch.tensor(0, dtype=torch.float32)\n\n for it, (x, y) in enumerate(train_loader):\n\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n \n # Train \n state_info.optim_VAE.zero_grad()\n loss, BCE, KLD = loss_function(x_hat, x, mu, log_var)\n loss.backward(retain_graph=True)\n state_info.optim_VAE.step()\n\n # mapping info of <y, cls_output> print\n if it % 10 == 0:\n utils.print_log('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'\n .format(epoch, it, loss.item(), BCE, KLD))\n print('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'\n .format(epoch, it, loss.item(), BCE, KLD))\n\n utils.print_log('')\n\ndef test(state_info, test_loader, sample, epoch):\n global Memory\n for it, (x, y) in enumerate(test_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n Memory.Batch_Insert(z, y)\n\n Memory.calc_TDAs(epoch)\n\n make_sample_image(state_info, sample, epoch)\n utils.print_log('')\n\ndef make_sample_image(state_info, sample, epoch):\n \"\"\"Saves a grid of generated digits ranging from 0 to n_classes\"\"\"\n\n img_path = utils.make_directory(os.path.join(utils.default_model_dir, 'image'))\n sample_hat, _, _, _ = state_info.forward(sample)\n sample, sample_hat = to_data(sample), to_data(sample_hat)\n image = merge_images(sample, sample_hat)\n save_image(image.data, os.path.join(img_path, '%d.png' % epoch), normalize=True)\n\ndef merge_images(sources, targets, row=10):\n _, _, h, w = sources.shape\n merged = np.zeros([3, row*h, row*w*2])\n for idx, (s, t) in enumerate(zip(sources, targets)):\n i = idx // row\n j = idx % row\n if i is row:\n break\n merged[:, i*h:(i+1)*h, (j*2)*h:(j*2+1)*h] = s\n merged[:, i*h:(i+1)*h, (j*2+1)*h:(j*2+2)*h] = t\n\n return torch.from_numpy(merged)\n\ndef dataset_selector(data):\n if data == 'mnist':\n return dataset.MNIST_loader(img_size=args.img_size)\n elif data == 'svhn':\n return dataset.SVHN_loader(img_size=32)\n elif data == \"usps\":\n return dataset.usps_loader(img_size=args.img_size)\n elif data == \"mnistm\":\n return dataset.MNIST_M_loader(img_size=args.img_size)\n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()\n\ndef to_var(x, dtype):\n return Variable(x.type(dtype))\n\ndef extract_sample(train_loader):\n\n for step, (sample, _) in enumerate(train_loader):\n sample = to_var(sample, FloatTensor)\n break;\n return sample\n\nif __name__=='__main__':\n main()",
"import argparse\nimport torch\nfrom torch.autograd import Variable\nfrom torchvision.utils import save_image\nimport numpy as np\nfrom model import *\nimport os\nimport torch.backends.cudnn as cudnn\nimport time\nimport utils\nimport dataset\nimport math\nfrom ripser import ripser\nfrom persim import plot_diagrams\nfrom pylab import subplot\nimport matplotlib.pyplot as plt\nparser = argparse.ArgumentParser(description=\n 'PyTorch Cycle Domain Adaptation Training')\nparser.add_argument('--dataset', default='mnist', type=str, help=\n 'source dataset')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epoch', default=90, type=int, metavar='N', help=\n 'number of total epoch to run')\nparser.add_argument('--decay-epoch', default=30, type=int, metavar='N',\n help='epoch from which to start lr decay')\nparser.add_argument('--seed', type=int, default=1, metavar='S', help=\n 'random seed (default: 1)')\nparser.add_argument('--maxN', type=int, default=80, help='Maximum Buffer Size')\nparser.add_argument('-b', '--batch-size', default=128, type=int, metavar=\n 'N', help='mini-batch size (default: 256)')\nparser.add_argument('--lr', '--learning-rate', default=0.0001, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=0.0001, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--b1', type=float, default=0.5, help=\n 'adam: decay of first order momentum of gradient')\nparser.add_argument('--b2', type=float, default=0.999, help=\n 'adam: decay of first order momentum of gradient')\nparser.add_argument('--latent-size', type=int, default=64, help=\n 'dimension of latent z')\nparser.add_argument('--h', type=int, default=400, help=\n 'dimension of hidden layer')\nparser.add_argument('--img-size', type=int, default=28, help=\n 'input image width, height size')\nparser.add_argument('--dir', default='./', type=str, help=\n 'default save directory')\nparser.add_argument('--gpu', default='0', type=str, help=\n 'Multi GPU ids to use.')\nsource_prediction_max_result = []\ntarget_prediction_max_result = []\nbest_prec_result = torch.tensor(0, dtype=torch.float32)\nargs = parser.parse_args()\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\ntorch.manual_seed(args.seed)\ncuda = True if torch.cuda.is_available() else False\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor\ncriterion_BCE = torch.nn.BCELoss(reduction='sum')\ncriterion = torch.nn.CrossEntropyLoss()\n\n\ndef loss_function(x_hat, x, mu, log_var):\n BCE = criterion_BCE(x_hat.view(x_hat.size(0), -1), x.view(x.size(0), -1))\n KLD = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())\n return BCE + KLD, BCE.item(), KLD.item()\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n\n def Insert_memory(self, z):\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del z\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\nMemory = MemorySet(args=args)\n\n\ndef main():\n global args, best_prec_result\n start_epoch = 0\n utils.default_model_dir = args.dir\n start_time = time.time()\n train_loader, test_loader, ch, wh = dataset_selector(args.dataset)\n sample = extract_sample(train_loader)\n state_info = utils.model_optim_state_info()\n state_info.model_init(Img=[ch, wh], H=args.h, latent_size=args.\n latent_size, num_class=10)\n state_info.model_cuda_init()\n state_info.weight_init()\n state_info.optimizer_init(args)\n if cuda:\n print('USE', torch.cuda.device_count(), 'GPUs!')\n cudnn.benchmark = True\n state_info.learning_scheduler_init(args)\n for epoch in range(start_epoch, args.epoch):\n train(state_info, train_loader, epoch)\n test(state_info, test_loader, sample, epoch)\n state_info.learning_step()\n now = time.gmtime(time.time() - start_time)\n utils.print_log('{} hours {} mins {} secs for training'.format(now.\n tm_hour, now.tm_min, now.tm_sec))\n\n\ndef train(state_info, train_loader, epoch):\n utils.print_log('Type, Epoch, Batch, loss, BCE, KLD')\n state_info.set_train_mode()\n correct = torch.tensor(0, dtype=torch.float32)\n total = torch.tensor(0, dtype=torch.float32)\n for it, (x, y) in enumerate(train_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n state_info.optim_VAE.zero_grad()\n loss, BCE, KLD = loss_function(x_hat, x, mu, log_var)\n loss.backward(retain_graph=True)\n state_info.optim_VAE.step()\n if it % 10 == 0:\n utils.print_log('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(\n epoch, it, loss.item(), BCE, KLD))\n print('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(epoch, it,\n loss.item(), BCE, KLD))\n utils.print_log('')\n\n\ndef test(state_info, test_loader, sample, epoch):\n global Memory\n for it, (x, y) in enumerate(test_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n Memory.Batch_Insert(z, y)\n Memory.calc_TDAs(epoch)\n make_sample_image(state_info, sample, epoch)\n utils.print_log('')\n\n\ndef make_sample_image(state_info, sample, epoch):\n \"\"\"Saves a grid of generated digits ranging from 0 to n_classes\"\"\"\n img_path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'image'))\n sample_hat, _, _, _ = state_info.forward(sample)\n sample, sample_hat = to_data(sample), to_data(sample_hat)\n image = merge_images(sample, sample_hat)\n save_image(image.data, os.path.join(img_path, '%d.png' % epoch),\n normalize=True)\n\n\ndef merge_images(sources, targets, row=10):\n _, _, h, w = sources.shape\n merged = np.zeros([3, row * h, row * w * 2])\n for idx, (s, t) in enumerate(zip(sources, targets)):\n i = idx // row\n j = idx % row\n if i is row:\n break\n merged[:, i * h:(i + 1) * h, j * 2 * h:(j * 2 + 1) * h] = s\n merged[:, i * h:(i + 1) * h, (j * 2 + 1) * h:(j * 2 + 2) * h] = t\n return torch.from_numpy(merged)\n\n\ndef dataset_selector(data):\n if data == 'mnist':\n return dataset.MNIST_loader(img_size=args.img_size)\n elif data == 'svhn':\n return dataset.SVHN_loader(img_size=32)\n elif data == 'usps':\n return dataset.usps_loader(img_size=args.img_size)\n elif data == 'mnistm':\n return dataset.MNIST_M_loader(img_size=args.img_size)\n\n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()\n\n\ndef to_var(x, dtype):\n return Variable(x.type(dtype))\n\n\ndef extract_sample(train_loader):\n for step, (sample, _) in enumerate(train_loader):\n sample = to_var(sample, FloatTensor)\n break\n return sample\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\nparser = argparse.ArgumentParser(description=\n 'PyTorch Cycle Domain Adaptation Training')\nparser.add_argument('--dataset', default='mnist', type=str, help=\n 'source dataset')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epoch', default=90, type=int, metavar='N', help=\n 'number of total epoch to run')\nparser.add_argument('--decay-epoch', default=30, type=int, metavar='N',\n help='epoch from which to start lr decay')\nparser.add_argument('--seed', type=int, default=1, metavar='S', help=\n 'random seed (default: 1)')\nparser.add_argument('--maxN', type=int, default=80, help='Maximum Buffer Size')\nparser.add_argument('-b', '--batch-size', default=128, type=int, metavar=\n 'N', help='mini-batch size (default: 256)')\nparser.add_argument('--lr', '--learning-rate', default=0.0001, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=0.0001, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--b1', type=float, default=0.5, help=\n 'adam: decay of first order momentum of gradient')\nparser.add_argument('--b2', type=float, default=0.999, help=\n 'adam: decay of first order momentum of gradient')\nparser.add_argument('--latent-size', type=int, default=64, help=\n 'dimension of latent z')\nparser.add_argument('--h', type=int, default=400, help=\n 'dimension of hidden layer')\nparser.add_argument('--img-size', type=int, default=28, help=\n 'input image width, height size')\nparser.add_argument('--dir', default='./', type=str, help=\n 'default save directory')\nparser.add_argument('--gpu', default='0', type=str, help=\n 'Multi GPU ids to use.')\nsource_prediction_max_result = []\ntarget_prediction_max_result = []\nbest_prec_result = torch.tensor(0, dtype=torch.float32)\nargs = parser.parse_args()\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\ntorch.manual_seed(args.seed)\ncuda = True if torch.cuda.is_available() else False\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor\ncriterion_BCE = torch.nn.BCELoss(reduction='sum')\ncriterion = torch.nn.CrossEntropyLoss()\n\n\ndef loss_function(x_hat, x, mu, log_var):\n BCE = criterion_BCE(x_hat.view(x_hat.size(0), -1), x.view(x.size(0), -1))\n KLD = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())\n return BCE + KLD, BCE.item(), KLD.item()\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n\n def Insert_memory(self, z):\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del z\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\nMemory = MemorySet(args=args)\n\n\ndef main():\n global args, best_prec_result\n start_epoch = 0\n utils.default_model_dir = args.dir\n start_time = time.time()\n train_loader, test_loader, ch, wh = dataset_selector(args.dataset)\n sample = extract_sample(train_loader)\n state_info = utils.model_optim_state_info()\n state_info.model_init(Img=[ch, wh], H=args.h, latent_size=args.\n latent_size, num_class=10)\n state_info.model_cuda_init()\n state_info.weight_init()\n state_info.optimizer_init(args)\n if cuda:\n print('USE', torch.cuda.device_count(), 'GPUs!')\n cudnn.benchmark = True\n state_info.learning_scheduler_init(args)\n for epoch in range(start_epoch, args.epoch):\n train(state_info, train_loader, epoch)\n test(state_info, test_loader, sample, epoch)\n state_info.learning_step()\n now = time.gmtime(time.time() - start_time)\n utils.print_log('{} hours {} mins {} secs for training'.format(now.\n tm_hour, now.tm_min, now.tm_sec))\n\n\ndef train(state_info, train_loader, epoch):\n utils.print_log('Type, Epoch, Batch, loss, BCE, KLD')\n state_info.set_train_mode()\n correct = torch.tensor(0, dtype=torch.float32)\n total = torch.tensor(0, dtype=torch.float32)\n for it, (x, y) in enumerate(train_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n state_info.optim_VAE.zero_grad()\n loss, BCE, KLD = loss_function(x_hat, x, mu, log_var)\n loss.backward(retain_graph=True)\n state_info.optim_VAE.step()\n if it % 10 == 0:\n utils.print_log('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(\n epoch, it, loss.item(), BCE, KLD))\n print('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(epoch, it,\n loss.item(), BCE, KLD))\n utils.print_log('')\n\n\ndef test(state_info, test_loader, sample, epoch):\n global Memory\n for it, (x, y) in enumerate(test_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n Memory.Batch_Insert(z, y)\n Memory.calc_TDAs(epoch)\n make_sample_image(state_info, sample, epoch)\n utils.print_log('')\n\n\ndef make_sample_image(state_info, sample, epoch):\n \"\"\"Saves a grid of generated digits ranging from 0 to n_classes\"\"\"\n img_path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'image'))\n sample_hat, _, _, _ = state_info.forward(sample)\n sample, sample_hat = to_data(sample), to_data(sample_hat)\n image = merge_images(sample, sample_hat)\n save_image(image.data, os.path.join(img_path, '%d.png' % epoch),\n normalize=True)\n\n\ndef merge_images(sources, targets, row=10):\n _, _, h, w = sources.shape\n merged = np.zeros([3, row * h, row * w * 2])\n for idx, (s, t) in enumerate(zip(sources, targets)):\n i = idx // row\n j = idx % row\n if i is row:\n break\n merged[:, i * h:(i + 1) * h, j * 2 * h:(j * 2 + 1) * h] = s\n merged[:, i * h:(i + 1) * h, (j * 2 + 1) * h:(j * 2 + 2) * h] = t\n return torch.from_numpy(merged)\n\n\ndef dataset_selector(data):\n if data == 'mnist':\n return dataset.MNIST_loader(img_size=args.img_size)\n elif data == 'svhn':\n return dataset.SVHN_loader(img_size=32)\n elif data == 'usps':\n return dataset.usps_loader(img_size=args.img_size)\n elif data == 'mnistm':\n return dataset.MNIST_M_loader(img_size=args.img_size)\n\n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()\n\n\ndef to_var(x, dtype):\n return Variable(x.type(dtype))\n\n\ndef extract_sample(train_loader):\n for step, (sample, _) in enumerate(train_loader):\n sample = to_var(sample, FloatTensor)\n break\n return sample\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<assignment token>\nparser.add_argument('--dataset', default='mnist', type=str, help=\n 'source dataset')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epoch', default=90, type=int, metavar='N', help=\n 'number of total epoch to run')\nparser.add_argument('--decay-epoch', default=30, type=int, metavar='N',\n help='epoch from which to start lr decay')\nparser.add_argument('--seed', type=int, default=1, metavar='S', help=\n 'random seed (default: 1)')\nparser.add_argument('--maxN', type=int, default=80, help='Maximum Buffer Size')\nparser.add_argument('-b', '--batch-size', default=128, type=int, metavar=\n 'N', help='mini-batch size (default: 256)')\nparser.add_argument('--lr', '--learning-rate', default=0.0001, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=0.0001, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\nparser.add_argument('--b1', type=float, default=0.5, help=\n 'adam: decay of first order momentum of gradient')\nparser.add_argument('--b2', type=float, default=0.999, help=\n 'adam: decay of first order momentum of gradient')\nparser.add_argument('--latent-size', type=int, default=64, help=\n 'dimension of latent z')\nparser.add_argument('--h', type=int, default=400, help=\n 'dimension of hidden layer')\nparser.add_argument('--img-size', type=int, default=28, help=\n 'input image width, height size')\nparser.add_argument('--dir', default='./', type=str, help=\n 'default save directory')\nparser.add_argument('--gpu', default='0', type=str, help=\n 'Multi GPU ids to use.')\n<assignment token>\ntorch.manual_seed(args.seed)\n<assignment token>\n\n\ndef loss_function(x_hat, x, mu, log_var):\n BCE = criterion_BCE(x_hat.view(x_hat.size(0), -1), x.view(x.size(0), -1))\n KLD = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())\n return BCE + KLD, BCE.item(), KLD.item()\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n\n def Insert_memory(self, z):\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del z\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n\n\ndef main():\n global args, best_prec_result\n start_epoch = 0\n utils.default_model_dir = args.dir\n start_time = time.time()\n train_loader, test_loader, ch, wh = dataset_selector(args.dataset)\n sample = extract_sample(train_loader)\n state_info = utils.model_optim_state_info()\n state_info.model_init(Img=[ch, wh], H=args.h, latent_size=args.\n latent_size, num_class=10)\n state_info.model_cuda_init()\n state_info.weight_init()\n state_info.optimizer_init(args)\n if cuda:\n print('USE', torch.cuda.device_count(), 'GPUs!')\n cudnn.benchmark = True\n state_info.learning_scheduler_init(args)\n for epoch in range(start_epoch, args.epoch):\n train(state_info, train_loader, epoch)\n test(state_info, test_loader, sample, epoch)\n state_info.learning_step()\n now = time.gmtime(time.time() - start_time)\n utils.print_log('{} hours {} mins {} secs for training'.format(now.\n tm_hour, now.tm_min, now.tm_sec))\n\n\ndef train(state_info, train_loader, epoch):\n utils.print_log('Type, Epoch, Batch, loss, BCE, KLD')\n state_info.set_train_mode()\n correct = torch.tensor(0, dtype=torch.float32)\n total = torch.tensor(0, dtype=torch.float32)\n for it, (x, y) in enumerate(train_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n state_info.optim_VAE.zero_grad()\n loss, BCE, KLD = loss_function(x_hat, x, mu, log_var)\n loss.backward(retain_graph=True)\n state_info.optim_VAE.step()\n if it % 10 == 0:\n utils.print_log('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(\n epoch, it, loss.item(), BCE, KLD))\n print('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(epoch, it,\n loss.item(), BCE, KLD))\n utils.print_log('')\n\n\ndef test(state_info, test_loader, sample, epoch):\n global Memory\n for it, (x, y) in enumerate(test_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n Memory.Batch_Insert(z, y)\n Memory.calc_TDAs(epoch)\n make_sample_image(state_info, sample, epoch)\n utils.print_log('')\n\n\ndef make_sample_image(state_info, sample, epoch):\n \"\"\"Saves a grid of generated digits ranging from 0 to n_classes\"\"\"\n img_path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'image'))\n sample_hat, _, _, _ = state_info.forward(sample)\n sample, sample_hat = to_data(sample), to_data(sample_hat)\n image = merge_images(sample, sample_hat)\n save_image(image.data, os.path.join(img_path, '%d.png' % epoch),\n normalize=True)\n\n\ndef merge_images(sources, targets, row=10):\n _, _, h, w = sources.shape\n merged = np.zeros([3, row * h, row * w * 2])\n for idx, (s, t) in enumerate(zip(sources, targets)):\n i = idx // row\n j = idx % row\n if i is row:\n break\n merged[:, i * h:(i + 1) * h, j * 2 * h:(j * 2 + 1) * h] = s\n merged[:, i * h:(i + 1) * h, (j * 2 + 1) * h:(j * 2 + 2) * h] = t\n return torch.from_numpy(merged)\n\n\ndef dataset_selector(data):\n if data == 'mnist':\n return dataset.MNIST_loader(img_size=args.img_size)\n elif data == 'svhn':\n return dataset.SVHN_loader(img_size=32)\n elif data == 'usps':\n return dataset.usps_loader(img_size=args.img_size)\n elif data == 'mnistm':\n return dataset.MNIST_M_loader(img_size=args.img_size)\n\n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()\n\n\ndef to_var(x, dtype):\n return Variable(x.type(dtype))\n\n\ndef extract_sample(train_loader):\n for step, (sample, _) in enumerate(train_loader):\n sample = to_var(sample, FloatTensor)\n break\n return sample\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef loss_function(x_hat, x, mu, log_var):\n BCE = criterion_BCE(x_hat.view(x_hat.size(0), -1), x.view(x.size(0), -1))\n KLD = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())\n return BCE + KLD, BCE.item(), KLD.item()\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n\n def Insert_memory(self, z):\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del z\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n\n\ndef main():\n global args, best_prec_result\n start_epoch = 0\n utils.default_model_dir = args.dir\n start_time = time.time()\n train_loader, test_loader, ch, wh = dataset_selector(args.dataset)\n sample = extract_sample(train_loader)\n state_info = utils.model_optim_state_info()\n state_info.model_init(Img=[ch, wh], H=args.h, latent_size=args.\n latent_size, num_class=10)\n state_info.model_cuda_init()\n state_info.weight_init()\n state_info.optimizer_init(args)\n if cuda:\n print('USE', torch.cuda.device_count(), 'GPUs!')\n cudnn.benchmark = True\n state_info.learning_scheduler_init(args)\n for epoch in range(start_epoch, args.epoch):\n train(state_info, train_loader, epoch)\n test(state_info, test_loader, sample, epoch)\n state_info.learning_step()\n now = time.gmtime(time.time() - start_time)\n utils.print_log('{} hours {} mins {} secs for training'.format(now.\n tm_hour, now.tm_min, now.tm_sec))\n\n\ndef train(state_info, train_loader, epoch):\n utils.print_log('Type, Epoch, Batch, loss, BCE, KLD')\n state_info.set_train_mode()\n correct = torch.tensor(0, dtype=torch.float32)\n total = torch.tensor(0, dtype=torch.float32)\n for it, (x, y) in enumerate(train_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n state_info.optim_VAE.zero_grad()\n loss, BCE, KLD = loss_function(x_hat, x, mu, log_var)\n loss.backward(retain_graph=True)\n state_info.optim_VAE.step()\n if it % 10 == 0:\n utils.print_log('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(\n epoch, it, loss.item(), BCE, KLD))\n print('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(epoch, it,\n loss.item(), BCE, KLD))\n utils.print_log('')\n\n\ndef test(state_info, test_loader, sample, epoch):\n global Memory\n for it, (x, y) in enumerate(test_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n Memory.Batch_Insert(z, y)\n Memory.calc_TDAs(epoch)\n make_sample_image(state_info, sample, epoch)\n utils.print_log('')\n\n\ndef make_sample_image(state_info, sample, epoch):\n \"\"\"Saves a grid of generated digits ranging from 0 to n_classes\"\"\"\n img_path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'image'))\n sample_hat, _, _, _ = state_info.forward(sample)\n sample, sample_hat = to_data(sample), to_data(sample_hat)\n image = merge_images(sample, sample_hat)\n save_image(image.data, os.path.join(img_path, '%d.png' % epoch),\n normalize=True)\n\n\ndef merge_images(sources, targets, row=10):\n _, _, h, w = sources.shape\n merged = np.zeros([3, row * h, row * w * 2])\n for idx, (s, t) in enumerate(zip(sources, targets)):\n i = idx // row\n j = idx % row\n if i is row:\n break\n merged[:, i * h:(i + 1) * h, j * 2 * h:(j * 2 + 1) * h] = s\n merged[:, i * h:(i + 1) * h, (j * 2 + 1) * h:(j * 2 + 2) * h] = t\n return torch.from_numpy(merged)\n\n\ndef dataset_selector(data):\n if data == 'mnist':\n return dataset.MNIST_loader(img_size=args.img_size)\n elif data == 'svhn':\n return dataset.SVHN_loader(img_size=32)\n elif data == 'usps':\n return dataset.usps_loader(img_size=args.img_size)\n elif data == 'mnistm':\n return dataset.MNIST_M_loader(img_size=args.img_size)\n\n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()\n\n\ndef to_var(x, dtype):\n return Variable(x.type(dtype))\n\n\ndef extract_sample(train_loader):\n for step, (sample, _) in enumerate(train_loader):\n sample = to_var(sample, FloatTensor)\n break\n return sample\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n\n def Insert_memory(self, z):\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del z\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n\n\ndef main():\n global args, best_prec_result\n start_epoch = 0\n utils.default_model_dir = args.dir\n start_time = time.time()\n train_loader, test_loader, ch, wh = dataset_selector(args.dataset)\n sample = extract_sample(train_loader)\n state_info = utils.model_optim_state_info()\n state_info.model_init(Img=[ch, wh], H=args.h, latent_size=args.\n latent_size, num_class=10)\n state_info.model_cuda_init()\n state_info.weight_init()\n state_info.optimizer_init(args)\n if cuda:\n print('USE', torch.cuda.device_count(), 'GPUs!')\n cudnn.benchmark = True\n state_info.learning_scheduler_init(args)\n for epoch in range(start_epoch, args.epoch):\n train(state_info, train_loader, epoch)\n test(state_info, test_loader, sample, epoch)\n state_info.learning_step()\n now = time.gmtime(time.time() - start_time)\n utils.print_log('{} hours {} mins {} secs for training'.format(now.\n tm_hour, now.tm_min, now.tm_sec))\n\n\ndef train(state_info, train_loader, epoch):\n utils.print_log('Type, Epoch, Batch, loss, BCE, KLD')\n state_info.set_train_mode()\n correct = torch.tensor(0, dtype=torch.float32)\n total = torch.tensor(0, dtype=torch.float32)\n for it, (x, y) in enumerate(train_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n state_info.optim_VAE.zero_grad()\n loss, BCE, KLD = loss_function(x_hat, x, mu, log_var)\n loss.backward(retain_graph=True)\n state_info.optim_VAE.step()\n if it % 10 == 0:\n utils.print_log('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(\n epoch, it, loss.item(), BCE, KLD))\n print('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(epoch, it,\n loss.item(), BCE, KLD))\n utils.print_log('')\n\n\ndef test(state_info, test_loader, sample, epoch):\n global Memory\n for it, (x, y) in enumerate(test_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n Memory.Batch_Insert(z, y)\n Memory.calc_TDAs(epoch)\n make_sample_image(state_info, sample, epoch)\n utils.print_log('')\n\n\ndef make_sample_image(state_info, sample, epoch):\n \"\"\"Saves a grid of generated digits ranging from 0 to n_classes\"\"\"\n img_path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'image'))\n sample_hat, _, _, _ = state_info.forward(sample)\n sample, sample_hat = to_data(sample), to_data(sample_hat)\n image = merge_images(sample, sample_hat)\n save_image(image.data, os.path.join(img_path, '%d.png' % epoch),\n normalize=True)\n\n\ndef merge_images(sources, targets, row=10):\n _, _, h, w = sources.shape\n merged = np.zeros([3, row * h, row * w * 2])\n for idx, (s, t) in enumerate(zip(sources, targets)):\n i = idx // row\n j = idx % row\n if i is row:\n break\n merged[:, i * h:(i + 1) * h, j * 2 * h:(j * 2 + 1) * h] = s\n merged[:, i * h:(i + 1) * h, (j * 2 + 1) * h:(j * 2 + 2) * h] = t\n return torch.from_numpy(merged)\n\n\ndef dataset_selector(data):\n if data == 'mnist':\n return dataset.MNIST_loader(img_size=args.img_size)\n elif data == 'svhn':\n return dataset.SVHN_loader(img_size=32)\n elif data == 'usps':\n return dataset.usps_loader(img_size=args.img_size)\n elif data == 'mnistm':\n return dataset.MNIST_M_loader(img_size=args.img_size)\n\n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()\n\n\ndef to_var(x, dtype):\n return Variable(x.type(dtype))\n\n\ndef extract_sample(train_loader):\n for step, (sample, _) in enumerate(train_loader):\n sample = to_var(sample, FloatTensor)\n break\n return sample\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n\n def Insert_memory(self, z):\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del z\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n\n\ndef main():\n global args, best_prec_result\n start_epoch = 0\n utils.default_model_dir = args.dir\n start_time = time.time()\n train_loader, test_loader, ch, wh = dataset_selector(args.dataset)\n sample = extract_sample(train_loader)\n state_info = utils.model_optim_state_info()\n state_info.model_init(Img=[ch, wh], H=args.h, latent_size=args.\n latent_size, num_class=10)\n state_info.model_cuda_init()\n state_info.weight_init()\n state_info.optimizer_init(args)\n if cuda:\n print('USE', torch.cuda.device_count(), 'GPUs!')\n cudnn.benchmark = True\n state_info.learning_scheduler_init(args)\n for epoch in range(start_epoch, args.epoch):\n train(state_info, train_loader, epoch)\n test(state_info, test_loader, sample, epoch)\n state_info.learning_step()\n now = time.gmtime(time.time() - start_time)\n utils.print_log('{} hours {} mins {} secs for training'.format(now.\n tm_hour, now.tm_min, now.tm_sec))\n\n\ndef train(state_info, train_loader, epoch):\n utils.print_log('Type, Epoch, Batch, loss, BCE, KLD')\n state_info.set_train_mode()\n correct = torch.tensor(0, dtype=torch.float32)\n total = torch.tensor(0, dtype=torch.float32)\n for it, (x, y) in enumerate(train_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n state_info.optim_VAE.zero_grad()\n loss, BCE, KLD = loss_function(x_hat, x, mu, log_var)\n loss.backward(retain_graph=True)\n state_info.optim_VAE.step()\n if it % 10 == 0:\n utils.print_log('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(\n epoch, it, loss.item(), BCE, KLD))\n print('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(epoch, it,\n loss.item(), BCE, KLD))\n utils.print_log('')\n\n\ndef test(state_info, test_loader, sample, epoch):\n global Memory\n for it, (x, y) in enumerate(test_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n Memory.Batch_Insert(z, y)\n Memory.calc_TDAs(epoch)\n make_sample_image(state_info, sample, epoch)\n utils.print_log('')\n\n\ndef make_sample_image(state_info, sample, epoch):\n \"\"\"Saves a grid of generated digits ranging from 0 to n_classes\"\"\"\n img_path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'image'))\n sample_hat, _, _, _ = state_info.forward(sample)\n sample, sample_hat = to_data(sample), to_data(sample_hat)\n image = merge_images(sample, sample_hat)\n save_image(image.data, os.path.join(img_path, '%d.png' % epoch),\n normalize=True)\n\n\n<function token>\n\n\ndef dataset_selector(data):\n if data == 'mnist':\n return dataset.MNIST_loader(img_size=args.img_size)\n elif data == 'svhn':\n return dataset.SVHN_loader(img_size=32)\n elif data == 'usps':\n return dataset.usps_loader(img_size=args.img_size)\n elif data == 'mnistm':\n return dataset.MNIST_M_loader(img_size=args.img_size)\n\n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()\n\n\ndef to_var(x, dtype):\n return Variable(x.type(dtype))\n\n\ndef extract_sample(train_loader):\n for step, (sample, _) in enumerate(train_loader):\n sample = to_var(sample, FloatTensor)\n break\n return sample\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n\n def Insert_memory(self, z):\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del z\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n\n\ndef main():\n global args, best_prec_result\n start_epoch = 0\n utils.default_model_dir = args.dir\n start_time = time.time()\n train_loader, test_loader, ch, wh = dataset_selector(args.dataset)\n sample = extract_sample(train_loader)\n state_info = utils.model_optim_state_info()\n state_info.model_init(Img=[ch, wh], H=args.h, latent_size=args.\n latent_size, num_class=10)\n state_info.model_cuda_init()\n state_info.weight_init()\n state_info.optimizer_init(args)\n if cuda:\n print('USE', torch.cuda.device_count(), 'GPUs!')\n cudnn.benchmark = True\n state_info.learning_scheduler_init(args)\n for epoch in range(start_epoch, args.epoch):\n train(state_info, train_loader, epoch)\n test(state_info, test_loader, sample, epoch)\n state_info.learning_step()\n now = time.gmtime(time.time() - start_time)\n utils.print_log('{} hours {} mins {} secs for training'.format(now.\n tm_hour, now.tm_min, now.tm_sec))\n\n\ndef train(state_info, train_loader, epoch):\n utils.print_log('Type, Epoch, Batch, loss, BCE, KLD')\n state_info.set_train_mode()\n correct = torch.tensor(0, dtype=torch.float32)\n total = torch.tensor(0, dtype=torch.float32)\n for it, (x, y) in enumerate(train_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n state_info.optim_VAE.zero_grad()\n loss, BCE, KLD = loss_function(x_hat, x, mu, log_var)\n loss.backward(retain_graph=True)\n state_info.optim_VAE.step()\n if it % 10 == 0:\n utils.print_log('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(\n epoch, it, loss.item(), BCE, KLD))\n print('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(epoch, it,\n loss.item(), BCE, KLD))\n utils.print_log('')\n\n\ndef test(state_info, test_loader, sample, epoch):\n global Memory\n for it, (x, y) in enumerate(test_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n Memory.Batch_Insert(z, y)\n Memory.calc_TDAs(epoch)\n make_sample_image(state_info, sample, epoch)\n utils.print_log('')\n\n\n<function token>\n<function token>\n\n\ndef dataset_selector(data):\n if data == 'mnist':\n return dataset.MNIST_loader(img_size=args.img_size)\n elif data == 'svhn':\n return dataset.SVHN_loader(img_size=32)\n elif data == 'usps':\n return dataset.usps_loader(img_size=args.img_size)\n elif data == 'mnistm':\n return dataset.MNIST_M_loader(img_size=args.img_size)\n\n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()\n\n\ndef to_var(x, dtype):\n return Variable(x.type(dtype))\n\n\ndef extract_sample(train_loader):\n for step, (sample, _) in enumerate(train_loader):\n sample = to_var(sample, FloatTensor)\n break\n return sample\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n\n def Insert_memory(self, z):\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del z\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n\n\ndef main():\n global args, best_prec_result\n start_epoch = 0\n utils.default_model_dir = args.dir\n start_time = time.time()\n train_loader, test_loader, ch, wh = dataset_selector(args.dataset)\n sample = extract_sample(train_loader)\n state_info = utils.model_optim_state_info()\n state_info.model_init(Img=[ch, wh], H=args.h, latent_size=args.\n latent_size, num_class=10)\n state_info.model_cuda_init()\n state_info.weight_init()\n state_info.optimizer_init(args)\n if cuda:\n print('USE', torch.cuda.device_count(), 'GPUs!')\n cudnn.benchmark = True\n state_info.learning_scheduler_init(args)\n for epoch in range(start_epoch, args.epoch):\n train(state_info, train_loader, epoch)\n test(state_info, test_loader, sample, epoch)\n state_info.learning_step()\n now = time.gmtime(time.time() - start_time)\n utils.print_log('{} hours {} mins {} secs for training'.format(now.\n tm_hour, now.tm_min, now.tm_sec))\n\n\ndef train(state_info, train_loader, epoch):\n utils.print_log('Type, Epoch, Batch, loss, BCE, KLD')\n state_info.set_train_mode()\n correct = torch.tensor(0, dtype=torch.float32)\n total = torch.tensor(0, dtype=torch.float32)\n for it, (x, y) in enumerate(train_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n state_info.optim_VAE.zero_grad()\n loss, BCE, KLD = loss_function(x_hat, x, mu, log_var)\n loss.backward(retain_graph=True)\n state_info.optim_VAE.step()\n if it % 10 == 0:\n utils.print_log('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(\n epoch, it, loss.item(), BCE, KLD))\n print('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(epoch, it,\n loss.item(), BCE, KLD))\n utils.print_log('')\n\n\ndef test(state_info, test_loader, sample, epoch):\n global Memory\n for it, (x, y) in enumerate(test_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n Memory.Batch_Insert(z, y)\n Memory.calc_TDAs(epoch)\n make_sample_image(state_info, sample, epoch)\n utils.print_log('')\n\n\n<function token>\n<function token>\n\n\ndef dataset_selector(data):\n if data == 'mnist':\n return dataset.MNIST_loader(img_size=args.img_size)\n elif data == 'svhn':\n return dataset.SVHN_loader(img_size=32)\n elif data == 'usps':\n return dataset.usps_loader(img_size=args.img_size)\n elif data == 'mnistm':\n return dataset.MNIST_M_loader(img_size=args.img_size)\n\n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()\n\n\ndef to_var(x, dtype):\n return Variable(x.type(dtype))\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n\n def Insert_memory(self, z):\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del z\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n\n\ndef main():\n global args, best_prec_result\n start_epoch = 0\n utils.default_model_dir = args.dir\n start_time = time.time()\n train_loader, test_loader, ch, wh = dataset_selector(args.dataset)\n sample = extract_sample(train_loader)\n state_info = utils.model_optim_state_info()\n state_info.model_init(Img=[ch, wh], H=args.h, latent_size=args.\n latent_size, num_class=10)\n state_info.model_cuda_init()\n state_info.weight_init()\n state_info.optimizer_init(args)\n if cuda:\n print('USE', torch.cuda.device_count(), 'GPUs!')\n cudnn.benchmark = True\n state_info.learning_scheduler_init(args)\n for epoch in range(start_epoch, args.epoch):\n train(state_info, train_loader, epoch)\n test(state_info, test_loader, sample, epoch)\n state_info.learning_step()\n now = time.gmtime(time.time() - start_time)\n utils.print_log('{} hours {} mins {} secs for training'.format(now.\n tm_hour, now.tm_min, now.tm_sec))\n\n\ndef train(state_info, train_loader, epoch):\n utils.print_log('Type, Epoch, Batch, loss, BCE, KLD')\n state_info.set_train_mode()\n correct = torch.tensor(0, dtype=torch.float32)\n total = torch.tensor(0, dtype=torch.float32)\n for it, (x, y) in enumerate(train_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n state_info.optim_VAE.zero_grad()\n loss, BCE, KLD = loss_function(x_hat, x, mu, log_var)\n loss.backward(retain_graph=True)\n state_info.optim_VAE.step()\n if it % 10 == 0:\n utils.print_log('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(\n epoch, it, loss.item(), BCE, KLD))\n print('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(epoch, it,\n loss.item(), BCE, KLD))\n utils.print_log('')\n\n\ndef test(state_info, test_loader, sample, epoch):\n global Memory\n for it, (x, y) in enumerate(test_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n Memory.Batch_Insert(z, y)\n Memory.calc_TDAs(epoch)\n make_sample_image(state_info, sample, epoch)\n utils.print_log('')\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()\n\n\ndef to_var(x, dtype):\n return Variable(x.type(dtype))\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n\n def Insert_memory(self, z):\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del z\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n<function token>\n\n\ndef train(state_info, train_loader, epoch):\n utils.print_log('Type, Epoch, Batch, loss, BCE, KLD')\n state_info.set_train_mode()\n correct = torch.tensor(0, dtype=torch.float32)\n total = torch.tensor(0, dtype=torch.float32)\n for it, (x, y) in enumerate(train_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n state_info.optim_VAE.zero_grad()\n loss, BCE, KLD = loss_function(x_hat, x, mu, log_var)\n loss.backward(retain_graph=True)\n state_info.optim_VAE.step()\n if it % 10 == 0:\n utils.print_log('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(\n epoch, it, loss.item(), BCE, KLD))\n print('Train, {}, {}, {:.6f}, {:.6f}, {:.6f}'.format(epoch, it,\n loss.item(), BCE, KLD))\n utils.print_log('')\n\n\ndef test(state_info, test_loader, sample, epoch):\n global Memory\n for it, (x, y) in enumerate(test_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n Memory.Batch_Insert(z, y)\n Memory.calc_TDAs(epoch)\n make_sample_image(state_info, sample, epoch)\n utils.print_log('')\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()\n\n\ndef to_var(x, dtype):\n return Variable(x.type(dtype))\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n\n def Insert_memory(self, z):\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del z\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n<function token>\n<function token>\n\n\ndef test(state_info, test_loader, sample, epoch):\n global Memory\n for it, (x, y) in enumerate(test_loader):\n x, y = to_var(x, FloatTensor), to_var(y, LongTensor)\n x_hat, mu, log_var, z = state_info.forward(x)\n Memory.Batch_Insert(z, y)\n Memory.calc_TDAs(epoch)\n make_sample_image(state_info, sample, epoch)\n utils.print_log('')\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()\n\n\ndef to_var(x, dtype):\n return Variable(x.type(dtype))\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n\n def Insert_memory(self, z):\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del z\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()\n\n\ndef to_var(x, dtype):\n return Variable(x.type(dtype))\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n\n def Insert_memory(self, z):\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del z\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef to_var(x, dtype):\n return Variable(x.type(dtype))\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n\n def Insert_memory(self, z):\n if self.index >= self.N:\n self.index = 0\n self.z[self.index] = z.data\n del z\n self.index = self.index + 1\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\nclass Memory(object):\n\n def __init__(self, args):\n self.N = args.maxN\n self.index = 0\n self.z = torch.zeros([self.N, args.latent_size], device='cpu',\n dtype=torch.float32)\n <function token>\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\nclass Memory(object):\n <function token>\n <function token>\n\n def calc_TDA(self, epoch, cls_num):\n path = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_total', str(cls_num)))\n path2 = utils.make_directory(os.path.join(utils.default_model_dir,\n 'tda_sub', str(cls_num)))\n dgms = ripser(self.z.data, maxdim=3)['dgms']\n plot_diagrams(dgms)\n plt.savefig('{}/{}_total.png'.format(path, epoch))\n plt.clf()\n if len(dgms[0]) is not 0:\n plot_diagrams(dgms, plot_only=[0], ax=subplot(221))\n if len(dgms[1]) is not 0:\n plot_diagrams(dgms, plot_only=[1], ax=subplot(222))\n if len(dgms[2]) is not 0:\n plot_diagrams(dgms, plot_only=[2], ax=subplot(223))\n if len(dgms[3]) is not 0:\n plot_diagrams(dgms, plot_only=[3], ax=subplot(224))\n plt.savefig('{}/{}_sub.png'.format(path2, epoch))\n plt.clf()\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\nclass Memory(object):\n <function token>\n <function token>\n <function token>\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n\n def Batch_Insert(self, z, y):\n for i in range(z.size(0)):\n label = y[i]\n data = z[i]\n self.Set[label].Insert_memory(data)\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n <function token>\n\n def calc_TDAs(self, epoch):\n for i in range(self.clsN):\n self.Set[i].calc_TDA(epoch, i)\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass MemorySet(object):\n\n def __init__(self, args):\n self.clsN = 10\n self.Set = []\n for i in range(self.clsN):\n self.Set.append(Memory(args=args))\n <function token>\n <function token>\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<class token>\n\n\nclass MemorySet(object):\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<class token>\n<class token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,815 |
c6080852e7d66e241ddcd40becd8fa4873f1bdf6
|
import sys
input = sys.stdin.buffer.readline
Q = int(input())
Query = []
for _ in range(Q):
N, M = map(int, input().split())
graph = [[] for _ in range(N)]
for _ in range(M):
a, b = map(int, input().split())
graph[a-1].append(b-1)
Query.append((N, M, graph))
for N, M, graph in Query:
Delete = [False]*N
Seq = [0]*N
for i in range(N):
if Seq[i] >= 2:
Delete[i] = True
Seq[i] = -1
for p in graph[i]:
Seq[p] = max(Seq[i] + 1, Seq[p])
ans = []
for i in range(N):
if Delete[i]:
ans.append(i+1)
print(len(ans))
print(*ans)
|
[
"import sys\ninput = sys.stdin.buffer.readline\n\n\nQ = int(input())\nQuery = []\nfor _ in range(Q):\n N, M = map(int, input().split())\n graph = [[] for _ in range(N)]\n for _ in range(M):\n a, b = map(int, input().split())\n graph[a-1].append(b-1)\n Query.append((N, M, graph))\n\nfor N, M, graph in Query:\n Delete = [False]*N\n Seq = [0]*N\n for i in range(N):\n if Seq[i] >= 2:\n Delete[i] = True\n Seq[i] = -1\n for p in graph[i]:\n Seq[p] = max(Seq[i] + 1, Seq[p])\n ans = []\n for i in range(N):\n if Delete[i]:\n ans.append(i+1)\n print(len(ans))\n print(*ans)",
"import sys\ninput = sys.stdin.buffer.readline\nQ = int(input())\nQuery = []\nfor _ in range(Q):\n N, M = map(int, input().split())\n graph = [[] for _ in range(N)]\n for _ in range(M):\n a, b = map(int, input().split())\n graph[a - 1].append(b - 1)\n Query.append((N, M, graph))\nfor N, M, graph in Query:\n Delete = [False] * N\n Seq = [0] * N\n for i in range(N):\n if Seq[i] >= 2:\n Delete[i] = True\n Seq[i] = -1\n for p in graph[i]:\n Seq[p] = max(Seq[i] + 1, Seq[p])\n ans = []\n for i in range(N):\n if Delete[i]:\n ans.append(i + 1)\n print(len(ans))\n print(*ans)\n",
"<import token>\ninput = sys.stdin.buffer.readline\nQ = int(input())\nQuery = []\nfor _ in range(Q):\n N, M = map(int, input().split())\n graph = [[] for _ in range(N)]\n for _ in range(M):\n a, b = map(int, input().split())\n graph[a - 1].append(b - 1)\n Query.append((N, M, graph))\nfor N, M, graph in Query:\n Delete = [False] * N\n Seq = [0] * N\n for i in range(N):\n if Seq[i] >= 2:\n Delete[i] = True\n Seq[i] = -1\n for p in graph[i]:\n Seq[p] = max(Seq[i] + 1, Seq[p])\n ans = []\n for i in range(N):\n if Delete[i]:\n ans.append(i + 1)\n print(len(ans))\n print(*ans)\n",
"<import token>\n<assignment token>\nfor _ in range(Q):\n N, M = map(int, input().split())\n graph = [[] for _ in range(N)]\n for _ in range(M):\n a, b = map(int, input().split())\n graph[a - 1].append(b - 1)\n Query.append((N, M, graph))\nfor N, M, graph in Query:\n Delete = [False] * N\n Seq = [0] * N\n for i in range(N):\n if Seq[i] >= 2:\n Delete[i] = True\n Seq[i] = -1\n for p in graph[i]:\n Seq[p] = max(Seq[i] + 1, Seq[p])\n ans = []\n for i in range(N):\n if Delete[i]:\n ans.append(i + 1)\n print(len(ans))\n print(*ans)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,816 |
ad13c22192b85b18eb43c73144b96a229a1bdf26
|
import json
import os
from ewiis3_python_scripts import MODEL_EVALUATION_FILE_PATH, MODEL_DIR
def store_model_selection(best_models, customer):
model_evaluations = load_model_selection()
model_evaluations[customer] = best_models
with open(MODEL_EVALUATION_FILE_PATH, 'w') as fp:
json.dump(model_evaluations, fp)
def load_model_selection():
approach_calculations = {}
if os.path.isfile(MODEL_EVALUATION_FILE_PATH):
approach_calculations = json.load(open(MODEL_EVALUATION_FILE_PATH))
return approach_calculations
def check_for_model_existence(model_path):
return os.path.isfile(model_path)
def build_model_save_path(game_id, target, type, model_name):
return '{}{}_{}_{}_{}.pkl'.format(MODEL_DIR, game_id, target, type, model_name)
|
[
"import json\nimport os\n\nfrom ewiis3_python_scripts import MODEL_EVALUATION_FILE_PATH, MODEL_DIR\n\n\ndef store_model_selection(best_models, customer):\n model_evaluations = load_model_selection()\n model_evaluations[customer] = best_models\n with open(MODEL_EVALUATION_FILE_PATH, 'w') as fp:\n json.dump(model_evaluations, fp)\n\n\ndef load_model_selection():\n approach_calculations = {}\n if os.path.isfile(MODEL_EVALUATION_FILE_PATH):\n approach_calculations = json.load(open(MODEL_EVALUATION_FILE_PATH))\n return approach_calculations\n\n\ndef check_for_model_existence(model_path):\n return os.path.isfile(model_path)\n\n\ndef build_model_save_path(game_id, target, type, model_name):\n return '{}{}_{}_{}_{}.pkl'.format(MODEL_DIR, game_id, target, type, model_name)\n",
"import json\nimport os\nfrom ewiis3_python_scripts import MODEL_EVALUATION_FILE_PATH, MODEL_DIR\n\n\ndef store_model_selection(best_models, customer):\n model_evaluations = load_model_selection()\n model_evaluations[customer] = best_models\n with open(MODEL_EVALUATION_FILE_PATH, 'w') as fp:\n json.dump(model_evaluations, fp)\n\n\ndef load_model_selection():\n approach_calculations = {}\n if os.path.isfile(MODEL_EVALUATION_FILE_PATH):\n approach_calculations = json.load(open(MODEL_EVALUATION_FILE_PATH))\n return approach_calculations\n\n\ndef check_for_model_existence(model_path):\n return os.path.isfile(model_path)\n\n\ndef build_model_save_path(game_id, target, type, model_name):\n return '{}{}_{}_{}_{}.pkl'.format(MODEL_DIR, game_id, target, type,\n model_name)\n",
"<import token>\n\n\ndef store_model_selection(best_models, customer):\n model_evaluations = load_model_selection()\n model_evaluations[customer] = best_models\n with open(MODEL_EVALUATION_FILE_PATH, 'w') as fp:\n json.dump(model_evaluations, fp)\n\n\ndef load_model_selection():\n approach_calculations = {}\n if os.path.isfile(MODEL_EVALUATION_FILE_PATH):\n approach_calculations = json.load(open(MODEL_EVALUATION_FILE_PATH))\n return approach_calculations\n\n\ndef check_for_model_existence(model_path):\n return os.path.isfile(model_path)\n\n\ndef build_model_save_path(game_id, target, type, model_name):\n return '{}{}_{}_{}_{}.pkl'.format(MODEL_DIR, game_id, target, type,\n model_name)\n",
"<import token>\n\n\ndef store_model_selection(best_models, customer):\n model_evaluations = load_model_selection()\n model_evaluations[customer] = best_models\n with open(MODEL_EVALUATION_FILE_PATH, 'w') as fp:\n json.dump(model_evaluations, fp)\n\n\ndef load_model_selection():\n approach_calculations = {}\n if os.path.isfile(MODEL_EVALUATION_FILE_PATH):\n approach_calculations = json.load(open(MODEL_EVALUATION_FILE_PATH))\n return approach_calculations\n\n\n<function token>\n\n\ndef build_model_save_path(game_id, target, type, model_name):\n return '{}{}_{}_{}_{}.pkl'.format(MODEL_DIR, game_id, target, type,\n model_name)\n",
"<import token>\n<function token>\n\n\ndef load_model_selection():\n approach_calculations = {}\n if os.path.isfile(MODEL_EVALUATION_FILE_PATH):\n approach_calculations = json.load(open(MODEL_EVALUATION_FILE_PATH))\n return approach_calculations\n\n\n<function token>\n\n\ndef build_model_save_path(game_id, target, type, model_name):\n return '{}{}_{}_{}_{}.pkl'.format(MODEL_DIR, game_id, target, type,\n model_name)\n",
"<import token>\n<function token>\n\n\ndef load_model_selection():\n approach_calculations = {}\n if os.path.isfile(MODEL_EVALUATION_FILE_PATH):\n approach_calculations = json.load(open(MODEL_EVALUATION_FILE_PATH))\n return approach_calculations\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,817 |
9dd19432e1f9bbc60d19a4b9029b919f5329046a
|
import time
import colorama
from colorama import Fore
from termcolor import colored as color
print(color("""
How to use?!
{Coded by avinoire}
{Dead_Lucifer\'s materials}""","red"))
time.sleep(0.5)
while True:
time.sleep(0.1)
print(color("""
[1] How to use?
[2] Authors
[3] Exit""","cyan"))
anv = input(f"""{Fore.RED}$ {Fore.CYAN}""")
if anv == "2":
time.sleep(0.7)
print(f"""
{Fore.MAGENTA}A {Fore.GREEN}u {Fore.RED}T {Fore.CYAN}h {Fore.YELLOW}O {Fore.MAGENTA}r {Fore.RED}S {Fore.GREEN}""")
print(color("""This programm was written by AviNoire. Material provided by Dead_Lucifer.
Check Telegramm by tags:
[Avinoire] - @avinoire
[Lucifer] - @Dead_Lucifer_666 ""","cyan"))
time.sleep(1)
elif anv == "1":
time.sleep(0.7)
print(f"""
{Fore.RED}Inscruction step by step.
{Fore.GREEN}1) start VPN
{Fore.GREEN}2) trigger bigbomb.py (bigbomb.py)
{Fore.GREEN}3) write your\'s bot token ( my should create it in @BotFather on Telegram ) and your Telegram id
{Fore.GREEN}4) go to your bot into Telegram and push Start
{Fore.GREEN}5) You will Menu on russian language
{Fore.GREEN}6) push \'Бомбер\'
{Fore.GREEN}7) Enter phone number
{Fore.GREEN}8) Relax""")
time.sleep(1)
elif anv == "3":
break
else:
print(color("{---Unknown Command---}","red"))
|
[
"import time\nimport colorama\nfrom colorama import Fore\nfrom termcolor import colored as color\n\nprint(color(\"\"\"\n How to use?!\n {Coded by avinoire}\n {Dead_Lucifer\\'s materials}\"\"\",\"red\"))\ntime.sleep(0.5)\nwhile True:\n\ttime.sleep(0.1)\n\tprint(color(\"\"\"\n[1] How to use?\n[2] Authors\n[3] Exit\"\"\",\"cyan\"))\n\tanv = input(f\"\"\"{Fore.RED}$ {Fore.CYAN}\"\"\")\n\tif anv == \"2\":\n\t\ttime.sleep(0.7)\n\t\tprint(f\"\"\"\n\t\t\t {Fore.MAGENTA}A {Fore.GREEN}u {Fore.RED}T {Fore.CYAN}h {Fore.YELLOW}O {Fore.MAGENTA}r {Fore.RED}S {Fore.GREEN}\"\"\")\n\t\tprint(color(\"\"\"This programm was written by AviNoire. Material provided by Dead_Lucifer.\n Check Telegramm by tags:\n [Avinoire] - @avinoire\n [Lucifer] - @Dead_Lucifer_666 \"\"\",\"cyan\"))\n\t\ttime.sleep(1)\n\telif anv == \"1\":\n\t\ttime.sleep(0.7)\n\t\tprint(f\"\"\"\n{Fore.RED}Inscruction step by step.\n\n{Fore.GREEN}1) start VPN\n\n{Fore.GREEN}2) trigger bigbomb.py (bigbomb.py)\n\n{Fore.GREEN}3) write your\\'s bot token ( my should create it in @BotFather on Telegram ) and your Telegram id\n\n{Fore.GREEN}4) go to your bot into Telegram and push Start\n\n{Fore.GREEN}5) You will Menu on russian language\n\n{Fore.GREEN}6) push \\'Бомбер\\'\n\n{Fore.GREEN}7) Enter phone number\n\n{Fore.GREEN}8) Relax\"\"\")\n\t\ttime.sleep(1)\n\telif anv == \"3\":\n\t\tbreak\n\telse:\n\t\tprint(color(\"{---Unknown Command---}\",\"red\"))\n",
"import time\nimport colorama\nfrom colorama import Fore\nfrom termcolor import colored as color\nprint(color(\n \"\"\"\n How to use?!\n {Coded by avinoire}\n {Dead_Lucifer's materials}\"\"\"\n , 'red'))\ntime.sleep(0.5)\nwhile True:\n time.sleep(0.1)\n print(color('\\n[1] How to use?\\n[2] Authors\\n[3] Exit', 'cyan'))\n anv = input(f'{Fore.RED}$ {Fore.CYAN}')\n if anv == '2':\n time.sleep(0.7)\n print(\n f\"\"\"\n\t\t\t {Fore.MAGENTA}A {Fore.GREEN}u {Fore.RED}T {Fore.CYAN}h {Fore.YELLOW}O {Fore.MAGENTA}r {Fore.RED}S {Fore.GREEN}\"\"\"\n )\n print(color(\n \"\"\"This programm was written by AviNoire. Material provided by Dead_Lucifer.\n Check Telegramm by tags:\n [Avinoire] - @avinoire\n [Lucifer] - @Dead_Lucifer_666 \"\"\"\n , 'cyan'))\n time.sleep(1)\n elif anv == '1':\n time.sleep(0.7)\n print(\n f\"\"\"\n{Fore.RED}Inscruction step by step.\n\n{Fore.GREEN}1) start VPN\n\n{Fore.GREEN}2) trigger bigbomb.py (bigbomb.py)\n\n{Fore.GREEN}3) write your's bot token ( my should create it in @BotFather on Telegram ) and your Telegram id\n\n{Fore.GREEN}4) go to your bot into Telegram and push Start\n\n{Fore.GREEN}5) You will Menu on russian language\n\n{Fore.GREEN}6) push 'Бомбер'\n\n{Fore.GREEN}7) Enter phone number\n\n{Fore.GREEN}8) Relax\"\"\"\n )\n time.sleep(1)\n elif anv == '3':\n break\n else:\n print(color('{---Unknown Command---}', 'red'))\n",
"<import token>\nprint(color(\n \"\"\"\n How to use?!\n {Coded by avinoire}\n {Dead_Lucifer's materials}\"\"\"\n , 'red'))\ntime.sleep(0.5)\nwhile True:\n time.sleep(0.1)\n print(color('\\n[1] How to use?\\n[2] Authors\\n[3] Exit', 'cyan'))\n anv = input(f'{Fore.RED}$ {Fore.CYAN}')\n if anv == '2':\n time.sleep(0.7)\n print(\n f\"\"\"\n\t\t\t {Fore.MAGENTA}A {Fore.GREEN}u {Fore.RED}T {Fore.CYAN}h {Fore.YELLOW}O {Fore.MAGENTA}r {Fore.RED}S {Fore.GREEN}\"\"\"\n )\n print(color(\n \"\"\"This programm was written by AviNoire. Material provided by Dead_Lucifer.\n Check Telegramm by tags:\n [Avinoire] - @avinoire\n [Lucifer] - @Dead_Lucifer_666 \"\"\"\n , 'cyan'))\n time.sleep(1)\n elif anv == '1':\n time.sleep(0.7)\n print(\n f\"\"\"\n{Fore.RED}Inscruction step by step.\n\n{Fore.GREEN}1) start VPN\n\n{Fore.GREEN}2) trigger bigbomb.py (bigbomb.py)\n\n{Fore.GREEN}3) write your's bot token ( my should create it in @BotFather on Telegram ) and your Telegram id\n\n{Fore.GREEN}4) go to your bot into Telegram and push Start\n\n{Fore.GREEN}5) You will Menu on russian language\n\n{Fore.GREEN}6) push 'Бомбер'\n\n{Fore.GREEN}7) Enter phone number\n\n{Fore.GREEN}8) Relax\"\"\"\n )\n time.sleep(1)\n elif anv == '3':\n break\n else:\n print(color('{---Unknown Command---}', 'red'))\n",
"<import token>\n<code token>\n"
] | false |
98,818 |
f5c521432524315e698ff9578c33498d8016ed82
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 00:03:13 2018
@author: amitangshu
"""
import keras
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, Input
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import DepthwiseConv2D
BASE_WEIGHT_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.6/'
def MobileNet(input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
output_dim):
"""
Define model architecture.
# Arguments
img_width: Target image widht.
img_height: Target image height.
img_channels: Target image channels.
output_dim: Dimension of model output.
# Returns
model: A Model instance.
"""
# Input
img_input = Input(shape=(img_height, img_width, img_channels))
x1 = Conv2D(32, (5, 5), strides=[2,2], padding='same')(img_input)
x1 = MaxPooling2D(pool_size=(3, 3), strides=[2,2])(x1)
# First residual block
x2=DepthwiseConv2D((3, 3),
padding='valid',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(x1)
x2 = keras.layers.normalization.BatchNormalization()(x2)
x2 = Activation('relu')(x2)
x2 = Conv2D((1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(x2)
x2 = keras.layers.normalization.BatchNormalization()(x2)
x2 = Activation('relu')(x2)
x3 = Flatten()(x2)
x3 = Activation('relu')(x3)
x3 = Dropout(0.5)(x3)
# Steering channel
steer = Dense(output_dim)(x3)
# Collision channel
coll = Dense(output_dim)(x3)
coll = Activation('sigmoid')(coll)
# Define steering-collision model
model = Model(inputs=[img_input], outputs=[steer, coll])
print(model.summary())
return model
|
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 28 00:03:13 2018\n\n@author: amitangshu\n\"\"\"\nimport keras\nfrom keras.models import Model\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Input\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.layers import DepthwiseConv2D\n\n\n\nBASE_WEIGHT_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.6/'\n\n\n\n\n\ndef MobileNet(input_shape=None,\n alpha=1.0,\n depth_multiplier=1,\n dropout=1e-3,\n include_top=True,\n weights='imagenet',\n input_tensor=None,\n pooling=None,\n classes=1000,\n output_dim):\n \"\"\"\n Define model architecture.\n \n # Arguments\n img_width: Target image widht.\n img_height: Target image height.\n img_channels: Target image channels.\n output_dim: Dimension of model output.\n \n # Returns\n model: A Model instance.\n \"\"\"\n\n # Input\n img_input = Input(shape=(img_height, img_width, img_channels))\n\n x1 = Conv2D(32, (5, 5), strides=[2,2], padding='same')(img_input)\n x1 = MaxPooling2D(pool_size=(3, 3), strides=[2,2])(x1)\n\n # First residual block\n x2=DepthwiseConv2D((3, 3),\n padding='valid',\n depth_multiplier=depth_multiplier,\n strides=strides,\n use_bias=False,\n name='conv_dw_%d' % block_id)(x1)\n x2 = keras.layers.normalization.BatchNormalization()(x2)\n x2 = Activation('relu')(x2)\n x2 = Conv2D((1, 1),\n padding='same',\n use_bias=False,\n strides=(1, 1),\n name='conv_pw_%d' % block_id)(x2)\n\n x2 = keras.layers.normalization.BatchNormalization()(x2)\n x2 = Activation('relu')(x2)\n \n x3 = Flatten()(x2)\n x3 = Activation('relu')(x3)\n x3 = Dropout(0.5)(x3)\n\n # Steering channel\n steer = Dense(output_dim)(x3)\n\n # Collision channel\n coll = Dense(output_dim)(x3)\n coll = Activation('sigmoid')(coll)\n\n # Define steering-collision model\n model = Model(inputs=[img_input], outputs=[steer, coll])\n print(model.summary())\n\n return model\n\n"
] | true |
98,819 |
a8a48a003a0135ae0a5bd16a9c896d537035253b
|
from blackbox.handlers.databases import MariaDB
from blackbox.handlers.databases._base import BlackboxDatabase
class MySQL(MariaDB, BlackboxDatabase):
"""A Database handler that will do a mysqldump for MySQL, backing up all tables."""
pass
|
[
"from blackbox.handlers.databases import MariaDB\nfrom blackbox.handlers.databases._base import BlackboxDatabase\n\n\nclass MySQL(MariaDB, BlackboxDatabase):\n \"\"\"A Database handler that will do a mysqldump for MySQL, backing up all tables.\"\"\"\n\n pass\n",
"from blackbox.handlers.databases import MariaDB\nfrom blackbox.handlers.databases._base import BlackboxDatabase\n\n\nclass MySQL(MariaDB, BlackboxDatabase):\n \"\"\"A Database handler that will do a mysqldump for MySQL, backing up all tables.\"\"\"\n pass\n",
"<import token>\n\n\nclass MySQL(MariaDB, BlackboxDatabase):\n \"\"\"A Database handler that will do a mysqldump for MySQL, backing up all tables.\"\"\"\n pass\n",
"<import token>\n\n\nclass MySQL(MariaDB, BlackboxDatabase):\n <docstring token>\n pass\n",
"<import token>\n<class token>\n"
] | false |
98,820 |
c577ae40fcf61da72dea68a448ff44a6968d7892
|
lb=int(input("enter lower bound: "))
ub=int(input("enter upper bound: "))
print("Displaying numbers in asending order")
i=lb
while(i<=ub):
print(i,end=",")
i=i+1
# To go to the next line
print()
print("Displaying numbers in desending order")
i=ub
while(i>=lb):
print(i,end=",")
i=i-1
|
[
"lb=int(input(\"enter lower bound: \"))\nub=int(input(\"enter upper bound: \"))\n\nprint(\"Displaying numbers in asending order\")\ni=lb\nwhile(i<=ub):\n print(i,end=\",\")\n i=i+1\n\n# To go to the next line\nprint()\n\nprint(\"Displaying numbers in desending order\")\ni=ub\nwhile(i>=lb):\n print(i,end=\",\")\n i=i-1\n\n \n ",
"lb = int(input('enter lower bound: '))\nub = int(input('enter upper bound: '))\nprint('Displaying numbers in asending order')\ni = lb\nwhile i <= ub:\n print(i, end=',')\n i = i + 1\nprint()\nprint('Displaying numbers in desending order')\ni = ub\nwhile i >= lb:\n print(i, end=',')\n i = i - 1\n",
"<assignment token>\nprint('Displaying numbers in asending order')\n<assignment token>\nwhile i <= ub:\n print(i, end=',')\n i = i + 1\nprint()\nprint('Displaying numbers in desending order')\n<assignment token>\nwhile i >= lb:\n print(i, end=',')\n i = i - 1\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,821 |
c017ae6a50f60f0f54c9462eba39e5353cb6d59c
|
import random
from random_word import RandomWords
word_list = ["laptop", "magazine", "phone", "clarify", "abbreviate", "lucky", "luxury", "example", "absurd",
"subway", "syndrome"]
stages = [ '''
+---+
| |
O |
/|\ |
/ \ |
|
=========
''' , '''
+---+
| |
O |
/|\ |
/ |
|
=========
''' , '''
+---+
| |
O |
/| |
|
|
=========
''' , '''
+---+
| |
O |
| |
|
|
=========
''' , '''
+---+
| |
O |
|
|
|
=========
''' , '''
+---+
| |
|
|
|
|
=========
''']
# Choosing random word from word list
def blanks_(word_length):
display_blanks = []
for _ in range(word_length):
display_blanks += "_"
return display_blanks
# Ask user to guess a letter
def user_guess():
user_guess_ = input("Guess a letter: ")
return user_guess_.lower()
# Check if user guessed word matches the chosen word
def main():
# r = RandomWords()
# word_list = r.get_random_words()
chosen_word = random.choice(word_list)
word_length = len(chosen_word)
blanks = blanks_(word_length)
print(blanks)
lives = 6
word_guess_end = False
while not word_guess_end:
guess = user_guess()
for position in range(word_length):
letter = chosen_word[position]
if letter == guess:
blanks[position] = letter
print(blanks)
if guess not in chosen_word:
lives = lives - 1
print(stages[lives])
if lives == 0:
word_guess_end = True
print("You lose")
print("The word is : ", chosen_word)
if "_" not in blanks:
word_guess_end = True
print("You win!!")
if __name__ == "__main__":
print( '''
_
| |
| |__ __ _ _ __ __ _ _ __ ___ __ _ _ __
| '_ \ / _` | '_ \ / _` | '_ ` _ \ / _` | '_ \
| | | | (_| | | | | (_| | | | | | | (_| | | | |
|_| |_|\__,_|_| |_|\__, |_| |_| |_|\__,_|_| |_|
__/ |
|___/
''' )
print("You have 6 lives")
main()
|
[
"import random\r\nfrom random_word import RandomWords\r\n\r\nword_list = [\"laptop\", \"magazine\", \"phone\", \"clarify\", \"abbreviate\", \"lucky\", \"luxury\", \"example\", \"absurd\",\r\n \"subway\", \"syndrome\"]\r\n\r\n\r\nstages = [ '''\r\n +---+\r\n | |\r\n O |\r\n /|\\ |\r\n / \\ |\r\n |\r\n========= \r\n''' , '''\r\n\r\n +---+\r\n | |\r\n O |\r\n /|\\ |\r\n / |\r\n |\r\n========= \r\n''' , '''\r\n\r\n +---+\r\n | |\r\n O |\r\n /| |\r\n |\r\n |\r\n========= \r\n''' , '''\r\n\r\n +---+\r\n | |\r\n O |\r\n | |\r\n |\r\n |\r\n========= \r\n''' , '''\r\n\r\n +---+\r\n | |\r\n O |\r\n |\r\n |\r\n |\r\n========= \r\n''' , '''\r\n\r\n +---+\r\n | | \r\n |\r\n |\r\n |\r\n |\r\n========= \r\n''']\r\n# Choosing random word from word list\r\n\r\n\r\ndef blanks_(word_length):\r\n display_blanks = []\r\n for _ in range(word_length):\r\n display_blanks += \"_\"\r\n return display_blanks\r\n\r\n\r\n# Ask user to guess a letter\r\n\r\ndef user_guess():\r\n user_guess_ = input(\"Guess a letter: \")\r\n return user_guess_.lower()\r\n\r\n# Check if user guessed word matches the chosen word\r\n\r\n\r\ndef main():\r\n # r = RandomWords()\r\n # word_list = r.get_random_words()\r\n chosen_word = random.choice(word_list)\r\n word_length = len(chosen_word)\r\n blanks = blanks_(word_length)\r\n print(blanks)\r\n lives = 6\r\n word_guess_end = False\r\n\r\n while not word_guess_end:\r\n guess = user_guess()\r\n for position in range(word_length):\r\n letter = chosen_word[position]\r\n if letter == guess:\r\n blanks[position] = letter\r\n print(blanks)\r\n\r\n if guess not in chosen_word:\r\n lives = lives - 1\r\n print(stages[lives])\r\n if lives == 0:\r\n word_guess_end = True\r\n print(\"You lose\")\r\n print(\"The word is : \", chosen_word)\r\n\r\n if \"_\" not in blanks:\r\n word_guess_end = True\r\n print(\"You win!!\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print( ''' \r\n _ \r\n| | \r\n| |__ __ _ _ __ __ _ _ __ ___ __ _ _ __ \r\n| '_ \\ / _` | '_ \\ / _` | '_ ` _ \\ / _` | '_ \\ \r\n| | | | (_| | | | | (_| | | | | | | (_| | | | |\r\n|_| |_|\\__,_|_| |_|\\__, |_| |_| |_|\\__,_|_| |_|\r\n __/ | \r\n |___/ \r\n''' )\r\n\r\n print(\"You have 6 lives\")\r\n main()\r\n",
"import random\nfrom random_word import RandomWords\nword_list = ['laptop', 'magazine', 'phone', 'clarify', 'abbreviate',\n 'lucky', 'luxury', 'example', 'absurd', 'subway', 'syndrome']\nstages = [\n \"\"\"\n +---+\n | |\n O |\n /|\\\\ |\n / \\\\ |\n |\n========= \n\"\"\",\n \"\"\"\n\n +---+\n | |\n O |\n /|\\\\ |\n / |\n |\n========= \n\"\"\",\n \"\"\"\n\n +---+\n | |\n O |\n /| |\n |\n |\n========= \n\"\"\",\n \"\"\"\n\n +---+\n | |\n O |\n | |\n |\n |\n========= \n\"\"\",\n \"\"\"\n\n +---+\n | |\n O |\n |\n |\n |\n========= \n\"\"\",\n \"\"\"\n\n +---+\n | | \n |\n |\n |\n |\n========= \n\"\"\"]\n\n\ndef blanks_(word_length):\n display_blanks = []\n for _ in range(word_length):\n display_blanks += '_'\n return display_blanks\n\n\ndef user_guess():\n user_guess_ = input('Guess a letter: ')\n return user_guess_.lower()\n\n\ndef main():\n chosen_word = random.choice(word_list)\n word_length = len(chosen_word)\n blanks = blanks_(word_length)\n print(blanks)\n lives = 6\n word_guess_end = False\n while not word_guess_end:\n guess = user_guess()\n for position in range(word_length):\n letter = chosen_word[position]\n if letter == guess:\n blanks[position] = letter\n print(blanks)\n if guess not in chosen_word:\n lives = lives - 1\n print(stages[lives])\n if lives == 0:\n word_guess_end = True\n print('You lose')\n print('The word is : ', chosen_word)\n if '_' not in blanks:\n word_guess_end = True\n print('You win!!')\n\n\nif __name__ == '__main__':\n print(\n \"\"\" \n _ \n| | \n| |__ __ _ _ __ __ _ _ __ ___ __ _ _ __ \n| '_ \\\\ / _` | '_ \\\\ / _` | '_ ` _ \\\\ / _` | '_ \\\\ \n| | | | (_| | | | | (_| | | | | | | (_| | | | |\n|_| |_|\\\\__,_|_| |_|\\\\__, |_| |_| |_|\\\\__,_|_| |_|\n __/ | \n |___/ \n\"\"\"\n )\n print('You have 6 lives')\n main()\n",
"<import token>\nword_list = ['laptop', 'magazine', 'phone', 'clarify', 'abbreviate',\n 'lucky', 'luxury', 'example', 'absurd', 'subway', 'syndrome']\nstages = [\n \"\"\"\n +---+\n | |\n O |\n /|\\\\ |\n / \\\\ |\n |\n========= \n\"\"\",\n \"\"\"\n\n +---+\n | |\n O |\n /|\\\\ |\n / |\n |\n========= \n\"\"\",\n \"\"\"\n\n +---+\n | |\n O |\n /| |\n |\n |\n========= \n\"\"\",\n \"\"\"\n\n +---+\n | |\n O |\n | |\n |\n |\n========= \n\"\"\",\n \"\"\"\n\n +---+\n | |\n O |\n |\n |\n |\n========= \n\"\"\",\n \"\"\"\n\n +---+\n | | \n |\n |\n |\n |\n========= \n\"\"\"]\n\n\ndef blanks_(word_length):\n display_blanks = []\n for _ in range(word_length):\n display_blanks += '_'\n return display_blanks\n\n\ndef user_guess():\n user_guess_ = input('Guess a letter: ')\n return user_guess_.lower()\n\n\ndef main():\n chosen_word = random.choice(word_list)\n word_length = len(chosen_word)\n blanks = blanks_(word_length)\n print(blanks)\n lives = 6\n word_guess_end = False\n while not word_guess_end:\n guess = user_guess()\n for position in range(word_length):\n letter = chosen_word[position]\n if letter == guess:\n blanks[position] = letter\n print(blanks)\n if guess not in chosen_word:\n lives = lives - 1\n print(stages[lives])\n if lives == 0:\n word_guess_end = True\n print('You lose')\n print('The word is : ', chosen_word)\n if '_' not in blanks:\n word_guess_end = True\n print('You win!!')\n\n\nif __name__ == '__main__':\n print(\n \"\"\" \n _ \n| | \n| |__ __ _ _ __ __ _ _ __ ___ __ _ _ __ \n| '_ \\\\ / _` | '_ \\\\ / _` | '_ ` _ \\\\ / _` | '_ \\\\ \n| | | | (_| | | | | (_| | | | | | | (_| | | | |\n|_| |_|\\\\__,_|_| |_|\\\\__, |_| |_| |_|\\\\__,_|_| |_|\n __/ | \n |___/ \n\"\"\"\n )\n print('You have 6 lives')\n main()\n",
"<import token>\n<assignment token>\n\n\ndef blanks_(word_length):\n display_blanks = []\n for _ in range(word_length):\n display_blanks += '_'\n return display_blanks\n\n\ndef user_guess():\n user_guess_ = input('Guess a letter: ')\n return user_guess_.lower()\n\n\ndef main():\n chosen_word = random.choice(word_list)\n word_length = len(chosen_word)\n blanks = blanks_(word_length)\n print(blanks)\n lives = 6\n word_guess_end = False\n while not word_guess_end:\n guess = user_guess()\n for position in range(word_length):\n letter = chosen_word[position]\n if letter == guess:\n blanks[position] = letter\n print(blanks)\n if guess not in chosen_word:\n lives = lives - 1\n print(stages[lives])\n if lives == 0:\n word_guess_end = True\n print('You lose')\n print('The word is : ', chosen_word)\n if '_' not in blanks:\n word_guess_end = True\n print('You win!!')\n\n\nif __name__ == '__main__':\n print(\n \"\"\" \n _ \n| | \n| |__ __ _ _ __ __ _ _ __ ___ __ _ _ __ \n| '_ \\\\ / _` | '_ \\\\ / _` | '_ ` _ \\\\ / _` | '_ \\\\ \n| | | | (_| | | | | (_| | | | | | | (_| | | | |\n|_| |_|\\\\__,_|_| |_|\\\\__, |_| |_| |_|\\\\__,_|_| |_|\n __/ | \n |___/ \n\"\"\"\n )\n print('You have 6 lives')\n main()\n",
"<import token>\n<assignment token>\n\n\ndef blanks_(word_length):\n display_blanks = []\n for _ in range(word_length):\n display_blanks += '_'\n return display_blanks\n\n\ndef user_guess():\n user_guess_ = input('Guess a letter: ')\n return user_guess_.lower()\n\n\ndef main():\n chosen_word = random.choice(word_list)\n word_length = len(chosen_word)\n blanks = blanks_(word_length)\n print(blanks)\n lives = 6\n word_guess_end = False\n while not word_guess_end:\n guess = user_guess()\n for position in range(word_length):\n letter = chosen_word[position]\n if letter == guess:\n blanks[position] = letter\n print(blanks)\n if guess not in chosen_word:\n lives = lives - 1\n print(stages[lives])\n if lives == 0:\n word_guess_end = True\n print('You lose')\n print('The word is : ', chosen_word)\n if '_' not in blanks:\n word_guess_end = True\n print('You win!!')\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef blanks_(word_length):\n display_blanks = []\n for _ in range(word_length):\n display_blanks += '_'\n return display_blanks\n\n\n<function token>\n\n\ndef main():\n chosen_word = random.choice(word_list)\n word_length = len(chosen_word)\n blanks = blanks_(word_length)\n print(blanks)\n lives = 6\n word_guess_end = False\n while not word_guess_end:\n guess = user_guess()\n for position in range(word_length):\n letter = chosen_word[position]\n if letter == guess:\n blanks[position] = letter\n print(blanks)\n if guess not in chosen_word:\n lives = lives - 1\n print(stages[lives])\n if lives == 0:\n word_guess_end = True\n print('You lose')\n print('The word is : ', chosen_word)\n if '_' not in blanks:\n word_guess_end = True\n print('You win!!')\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef blanks_(word_length):\n display_blanks = []\n for _ in range(word_length):\n display_blanks += '_'\n return display_blanks\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,822 |
bc0b7883331bb020bc2665208e06b49b6d2df700
|
"""The Geodataframe adapter implementation."""
import logging
import warnings
from os.path import join
from pathlib import Path
from typing import NewType, Union
import geopandas as gpd
import numpy as np
from shapely.geometry import box
from .. import io
from .data_adapter import DataAdapter
logger = logging.getLogger(__name__)
__all__ = ["GeoDataFrameAdapter", "GeoDataframeSource"]
GeoDataframeSource = NewType("GeoDataframeSource", Union[str, Path])
class GeoDataFrameAdapter(DataAdapter):
"""The Geodataframe adapter implementation."""
_DEFAULT_DRIVER = "vector"
_DRIVERS = {
"xy": "xy",
"csv": "csv",
"parquet": "parquet",
"xls": "xls",
"xlsx": "xlsx",
}
def __init__(
self,
path: str,
driver: str = None,
filesystem: str = "local",
crs: Union[int, str, dict] = None,
nodata: Union[dict, float, int] = None,
rename: dict = {},
unit_mult: dict = {},
unit_add: dict = {},
meta: dict = {},
attrs: dict = {},
driver_kwargs: dict = {},
name: str = "", # optional for now
catalog_name: str = "", # optional for now
provider=None,
version=None,
**kwargs,
):
"""Initiate data adapter for geospatial vector data.
This object contains all properties required to read supported files into
a single unified :py:func:`geopandas.GeoDataFrame`.
In addition it keeps meta data to be able to reproduce which data is used.
Parameters
----------
path: str, Path
Path to data source. If the dataset consists of multiple files, the path may
contain {variable} placeholders as well as path
search pattern using a '*' wildcard.
driver: {'vector', 'vector_table'}, optional
Driver to read files with, for 'vector' :py:func:`~geopandas.read_file`,
for {'vector_table'} :py:func:`hydromt.io.open_vector_from_table`
By default the driver is inferred from the file extension and falls back to
'vector' if unknown.
filesystem: {'local', 'gcs', 's3'}, optional
Filesystem where the data is stored (local, cloud, http etc.).
By default, local.
crs: int, dict, or str, optional
Coordinate Reference System. Accepts EPSG codes (int or str);
proj (str or dict) or wkt (str). Only used if the data has no native CRS.
nodata: dictionary, float, int, optional
Missing value number. Only used if the data has no native missing value.
Nodata values can be differentiated between variables using a dictionary.
rename: dict, optional
Mapping of native data source variable to output source variable name as
required by hydroMT.
unit_mult, unit_add: dict, optional
Scaling multiplication and addition to change to map from the native
data unit to the output data unit as required by hydroMT.
meta: dict, optional
Metadata information of dataset, prefably containing the following keys:
{'source_version', 'source_url', 'source_license',
'paper_ref', 'paper_doi', 'category'}
placeholders: dict, optional
Placeholders to expand yaml entry to multiple entries (name and path)
based on placeholder values
attrs: dict, optional
Additional attributes relating to data variables. For instance unit
or long name of the variable.
driver_kwargs, dict, optional
Additional key-word arguments passed to the driver.
name, catalog_name: str, optional
Name of the dataset and catalog, optional for now.
"""
if kwargs:
warnings.warn(
"Passing additional keyword arguments to be used by the "
"GeoDataFrameAdapter driver is deprecated and will be removed "
"in a future version. Please use 'driver_kwargs' instead.",
DeprecationWarning,
)
driver_kwargs.update(kwargs)
super().__init__(
path=path,
driver=driver,
filesystem=filesystem,
nodata=nodata,
rename=rename,
unit_mult=unit_mult,
unit_add=unit_add,
meta=meta,
attrs=attrs,
driver_kwargs=driver_kwargs,
name=name,
catalog_name=catalog_name,
provider=provider,
version=version,
)
self.crs = crs
def to_file(
self,
data_root,
data_name,
bbox=None,
driver=None,
variables=None,
logger=logger,
**kwargs,
):
"""Save a data slice to file.
Parameters
----------
data_root : str, Path
Path to output folder
data_name : str
Name of output file without extension.
bbox : array-like of floats
(xmin, ymin, xmax, ymax) bounding box of area of interest.
driver : str, optional
Driver to write file, e.g.: 'GPKG', 'ESRI Shapefile' or any fiona data type,
by default None
variables : list of str, optional
Names of GeoDataset variables to return. By default all dataset variables
are returned.
logger : logger object, optional
The logger object used for logging messages. If not provided, the default
logger will be used.
**kwargs
Additional keyword arguments that are passed to the geopandas driver.
Returns
-------
fn_out: str
Absolute path to output file
driver: str
Name of driver to read data with, see
:py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`
"""
kwargs.pop("time_tuple", None)
gdf = self.get_data(bbox=bbox, variables=variables, logger=logger)
if gdf.index.size == 0:
return None, None, None
read_kwargs = {}
if driver is None:
_lst = ["csv", "parquet", "xls", "xlsx", "xy", "vector_table"]
driver = "csv" if self.driver in _lst else "GPKG"
# always write netcdf
if driver == "csv":
fn_out = join(data_root, f"{data_name}.csv")
if not np.all(gdf.geometry.type == "Point"):
raise ValueError(
f"{data_name} contains other geometries than 'Point' "
"which cannot be written to csv."
)
gdf["x"], gdf["y"] = gdf.geometry.x, gdf.geometry.y
gdf.drop(columns="geometry").to_csv(fn_out, **kwargs)
read_kwargs["index_col"] = 0
elif driver == "parquet":
fn_out = join(data_root, f"{data_name}.parquet")
if not np.all(gdf.geometry.type == "Point"):
raise ValueError(
f"{data_name} contains other geometries than 'Point' "
"which cannot be written to parquet."
)
gdf["x"], gdf["y"] = gdf.geometry.x, gdf.geometry.y
gdf.drop(columns="geometry").to_parquet(fn_out, **kwargs)
else:
driver_extensions = {
"ESRI Shapefile": ".shp",
}
ext = driver_extensions.get(driver, driver).lower()
fn_out = join(data_root, f"{data_name}.{ext}")
gdf.to_file(fn_out, driver=driver, **kwargs)
driver = "vector"
return fn_out, driver, read_kwargs
def get_data(
self,
bbox=None,
geom=None,
predicate="intersects",
buffer=0,
logger=logger,
variables=None,
# **kwargs, # this is not used, for testing only
):
"""Return a clipped and unified GeoDataFrame (vector).
For a detailed description see:
:py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`
"""
# If variable is string, convert to list
if variables:
variables = np.atleast_1d(variables).tolist()
if "storage_options" in self.driver_kwargs:
# not sure if storage options can be passed to fiona.open()
# for now throw NotImplemented Error
raise NotImplementedError(
"Remote file storage_options not implemented for GeoDataFrame"
)
_ = self.resolve_paths() # throw nice error if data not found
kwargs = self.driver_kwargs.copy()
# parse geom, bbox and buffer arguments
clip_str = ""
if geom is None and bbox is not None:
# convert bbox to geom with crs EPGS:4326 to apply buffer later
geom = gpd.GeoDataFrame(geometry=[box(*bbox)], crs=4326)
clip_str = " and clip to bbox (epsg:4326)"
elif geom is not None:
clip_str = f" and clip to geom (epsg:{geom.crs.to_epsg():d})"
if geom is not None:
# make sure geom is projected > buffer in meters!
if geom.crs.is_geographic and buffer > 0:
geom = geom.to_crs(3857)
geom = geom.buffer(buffer) # a buffer with zero fixes some topology errors
bbox_str = ", ".join([f"{c:.3f}" for c in geom.total_bounds])
clip_str = f"{clip_str} [{bbox_str}]"
if kwargs.pop("within", False): # for backward compatibility
predicate = "contains"
# read and clip
logger.info(f"GeoDataFrame: Read {self.driver} data{clip_str}.")
if self.driver in [
"csv",
"parquet",
"xls",
"xlsx",
"xy",
"vector",
"vector_table",
]:
# "csv", "xls", "xlsx", "xy" deprecated use vector_table instead.
# specific driver should be added to open_vector kwargs
if "driver" not in kwargs and self.driver in ["csv", "xls", "xlsx", "xy"]:
warnings.warn(
"using the driver setting is deprecated. Please use"
"vector_table instead."
)
kwargs.update(driver=self.driver)
# Check if file-object is required because of additional options
gdf = io.open_vector(
self.path, crs=self.crs, geom=geom, predicate=predicate, **kwargs
)
else:
raise ValueError(f"GeoDataFrame: driver {self.driver} unknown.")
# rename and select columns
if self.rename:
rename = {k: v for k, v in self.rename.items() if k in gdf.columns}
gdf = gdf.rename(columns=rename)
if variables is not None:
if np.any([var not in gdf.columns for var in variables]):
raise ValueError(f"GeoDataFrame: Not all variables found: {variables}")
if "geometry" not in variables: # always keep geometry column
variables = variables + ["geometry"]
gdf = gdf.loc[:, variables]
# nodata and unit conversion for numeric data
if gdf.index.size == 0:
logger.warning(f"GeoDataFrame: No data within spatial domain {self.path}.")
else:
# parse nodata values
cols = gdf.select_dtypes([np.number]).columns
if self.nodata is not None and len(cols) > 0:
if not isinstance(self.nodata, dict):
nodata = {c: self.nodata for c in cols}
else:
nodata = self.nodata
for c in cols:
mv = nodata.get(c, None)
if mv is not None:
is_nodata = np.isin(gdf[c], np.atleast_1d(mv))
gdf[c] = np.where(is_nodata, np.nan, gdf[c])
# unit conversion
unit_names = list(self.unit_mult.keys()) + list(self.unit_add.keys())
unit_names = [k for k in unit_names if k in gdf.columns]
if len(unit_names) > 0:
logger.debug(
f"GeoDataFrame: Convert units for {len(unit_names)} columns."
)
for name in list(set(unit_names)): # unique
m = self.unit_mult.get(name, 1)
a = self.unit_add.get(name, 0)
gdf[name] = gdf[name] * m + a
# set meta data
gdf.attrs.update(self.meta)
# set column attributes
for col in self.attrs:
if col in gdf.columns:
gdf[col].attrs.update(**self.attrs[col])
return gdf
|
[
"\"\"\"The Geodataframe adapter implementation.\"\"\"\nimport logging\nimport warnings\nfrom os.path import join\nfrom pathlib import Path\nfrom typing import NewType, Union\n\nimport geopandas as gpd\nimport numpy as np\nfrom shapely.geometry import box\n\nfrom .. import io\nfrom .data_adapter import DataAdapter\n\nlogger = logging.getLogger(__name__)\n\n__all__ = [\"GeoDataFrameAdapter\", \"GeoDataframeSource\"]\n\nGeoDataframeSource = NewType(\"GeoDataframeSource\", Union[str, Path])\n\n\nclass GeoDataFrameAdapter(DataAdapter):\n\n \"\"\"The Geodataframe adapter implementation.\"\"\"\n\n _DEFAULT_DRIVER = \"vector\"\n _DRIVERS = {\n \"xy\": \"xy\",\n \"csv\": \"csv\",\n \"parquet\": \"parquet\",\n \"xls\": \"xls\",\n \"xlsx\": \"xlsx\",\n }\n\n def __init__(\n self,\n path: str,\n driver: str = None,\n filesystem: str = \"local\",\n crs: Union[int, str, dict] = None,\n nodata: Union[dict, float, int] = None,\n rename: dict = {},\n unit_mult: dict = {},\n unit_add: dict = {},\n meta: dict = {},\n attrs: dict = {},\n driver_kwargs: dict = {},\n name: str = \"\", # optional for now\n catalog_name: str = \"\", # optional for now\n provider=None,\n version=None,\n **kwargs,\n ):\n \"\"\"Initiate data adapter for geospatial vector data.\n\n This object contains all properties required to read supported files into\n a single unified :py:func:`geopandas.GeoDataFrame`.\n In addition it keeps meta data to be able to reproduce which data is used.\n\n Parameters\n ----------\n path: str, Path\n Path to data source. If the dataset consists of multiple files, the path may\n contain {variable} placeholders as well as path\n search pattern using a '*' wildcard.\n driver: {'vector', 'vector_table'}, optional\n Driver to read files with, for 'vector' :py:func:`~geopandas.read_file`,\n for {'vector_table'} :py:func:`hydromt.io.open_vector_from_table`\n By default the driver is inferred from the file extension and falls back to\n 'vector' if unknown.\n filesystem: {'local', 'gcs', 's3'}, optional\n Filesystem where the data is stored (local, cloud, http etc.).\n By default, local.\n crs: int, dict, or str, optional\n Coordinate Reference System. Accepts EPSG codes (int or str);\n proj (str or dict) or wkt (str). Only used if the data has no native CRS.\n nodata: dictionary, float, int, optional\n Missing value number. Only used if the data has no native missing value.\n Nodata values can be differentiated between variables using a dictionary.\n rename: dict, optional\n Mapping of native data source variable to output source variable name as\n required by hydroMT.\n unit_mult, unit_add: dict, optional\n Scaling multiplication and addition to change to map from the native\n data unit to the output data unit as required by hydroMT.\n meta: dict, optional\n Metadata information of dataset, prefably containing the following keys:\n {'source_version', 'source_url', 'source_license',\n 'paper_ref', 'paper_doi', 'category'}\n placeholders: dict, optional\n Placeholders to expand yaml entry to multiple entries (name and path)\n based on placeholder values\n attrs: dict, optional\n Additional attributes relating to data variables. For instance unit\n or long name of the variable.\n driver_kwargs, dict, optional\n Additional key-word arguments passed to the driver.\n name, catalog_name: str, optional\n Name of the dataset and catalog, optional for now.\n \"\"\"\n if kwargs:\n warnings.warn(\n \"Passing additional keyword arguments to be used by the \"\n \"GeoDataFrameAdapter driver is deprecated and will be removed \"\n \"in a future version. Please use 'driver_kwargs' instead.\",\n DeprecationWarning,\n )\n driver_kwargs.update(kwargs)\n super().__init__(\n path=path,\n driver=driver,\n filesystem=filesystem,\n nodata=nodata,\n rename=rename,\n unit_mult=unit_mult,\n unit_add=unit_add,\n meta=meta,\n attrs=attrs,\n driver_kwargs=driver_kwargs,\n name=name,\n catalog_name=catalog_name,\n provider=provider,\n version=version,\n )\n self.crs = crs\n\n def to_file(\n self,\n data_root,\n data_name,\n bbox=None,\n driver=None,\n variables=None,\n logger=logger,\n **kwargs,\n ):\n \"\"\"Save a data slice to file.\n\n Parameters\n ----------\n data_root : str, Path\n Path to output folder\n data_name : str\n Name of output file without extension.\n bbox : array-like of floats\n (xmin, ymin, xmax, ymax) bounding box of area of interest.\n driver : str, optional\n Driver to write file, e.g.: 'GPKG', 'ESRI Shapefile' or any fiona data type,\n by default None\n variables : list of str, optional\n Names of GeoDataset variables to return. By default all dataset variables\n are returned.\n logger : logger object, optional\n The logger object used for logging messages. If not provided, the default\n logger will be used.\n **kwargs\n Additional keyword arguments that are passed to the geopandas driver.\n\n Returns\n -------\n fn_out: str\n Absolute path to output file\n driver: str\n Name of driver to read data with, see\n :py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`\n \"\"\"\n kwargs.pop(\"time_tuple\", None)\n gdf = self.get_data(bbox=bbox, variables=variables, logger=logger)\n if gdf.index.size == 0:\n return None, None, None\n\n read_kwargs = {}\n if driver is None:\n _lst = [\"csv\", \"parquet\", \"xls\", \"xlsx\", \"xy\", \"vector_table\"]\n driver = \"csv\" if self.driver in _lst else \"GPKG\"\n # always write netcdf\n if driver == \"csv\":\n fn_out = join(data_root, f\"{data_name}.csv\")\n if not np.all(gdf.geometry.type == \"Point\"):\n raise ValueError(\n f\"{data_name} contains other geometries than 'Point' \"\n \"which cannot be written to csv.\"\n )\n gdf[\"x\"], gdf[\"y\"] = gdf.geometry.x, gdf.geometry.y\n gdf.drop(columns=\"geometry\").to_csv(fn_out, **kwargs)\n read_kwargs[\"index_col\"] = 0\n elif driver == \"parquet\":\n fn_out = join(data_root, f\"{data_name}.parquet\")\n if not np.all(gdf.geometry.type == \"Point\"):\n raise ValueError(\n f\"{data_name} contains other geometries than 'Point' \"\n \"which cannot be written to parquet.\"\n )\n gdf[\"x\"], gdf[\"y\"] = gdf.geometry.x, gdf.geometry.y\n gdf.drop(columns=\"geometry\").to_parquet(fn_out, **kwargs)\n else:\n driver_extensions = {\n \"ESRI Shapefile\": \".shp\",\n }\n ext = driver_extensions.get(driver, driver).lower()\n fn_out = join(data_root, f\"{data_name}.{ext}\")\n gdf.to_file(fn_out, driver=driver, **kwargs)\n driver = \"vector\"\n\n return fn_out, driver, read_kwargs\n\n def get_data(\n self,\n bbox=None,\n geom=None,\n predicate=\"intersects\",\n buffer=0,\n logger=logger,\n variables=None,\n # **kwargs, # this is not used, for testing only\n ):\n \"\"\"Return a clipped and unified GeoDataFrame (vector).\n\n For a detailed description see:\n :py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`\n \"\"\"\n # If variable is string, convert to list\n if variables:\n variables = np.atleast_1d(variables).tolist()\n\n if \"storage_options\" in self.driver_kwargs:\n # not sure if storage options can be passed to fiona.open()\n # for now throw NotImplemented Error\n raise NotImplementedError(\n \"Remote file storage_options not implemented for GeoDataFrame\"\n )\n _ = self.resolve_paths() # throw nice error if data not found\n\n kwargs = self.driver_kwargs.copy()\n # parse geom, bbox and buffer arguments\n clip_str = \"\"\n if geom is None and bbox is not None:\n # convert bbox to geom with crs EPGS:4326 to apply buffer later\n geom = gpd.GeoDataFrame(geometry=[box(*bbox)], crs=4326)\n clip_str = \" and clip to bbox (epsg:4326)\"\n elif geom is not None:\n clip_str = f\" and clip to geom (epsg:{geom.crs.to_epsg():d})\"\n if geom is not None:\n # make sure geom is projected > buffer in meters!\n if geom.crs.is_geographic and buffer > 0:\n geom = geom.to_crs(3857)\n geom = geom.buffer(buffer) # a buffer with zero fixes some topology errors\n bbox_str = \", \".join([f\"{c:.3f}\" for c in geom.total_bounds])\n clip_str = f\"{clip_str} [{bbox_str}]\"\n if kwargs.pop(\"within\", False): # for backward compatibility\n predicate = \"contains\"\n\n # read and clip\n logger.info(f\"GeoDataFrame: Read {self.driver} data{clip_str}.\")\n if self.driver in [\n \"csv\",\n \"parquet\",\n \"xls\",\n \"xlsx\",\n \"xy\",\n \"vector\",\n \"vector_table\",\n ]:\n # \"csv\", \"xls\", \"xlsx\", \"xy\" deprecated use vector_table instead.\n # specific driver should be added to open_vector kwargs\n if \"driver\" not in kwargs and self.driver in [\"csv\", \"xls\", \"xlsx\", \"xy\"]:\n warnings.warn(\n \"using the driver setting is deprecated. Please use\"\n \"vector_table instead.\"\n )\n\n kwargs.update(driver=self.driver)\n # Check if file-object is required because of additional options\n gdf = io.open_vector(\n self.path, crs=self.crs, geom=geom, predicate=predicate, **kwargs\n )\n else:\n raise ValueError(f\"GeoDataFrame: driver {self.driver} unknown.\")\n\n # rename and select columns\n if self.rename:\n rename = {k: v for k, v in self.rename.items() if k in gdf.columns}\n gdf = gdf.rename(columns=rename)\n if variables is not None:\n if np.any([var not in gdf.columns for var in variables]):\n raise ValueError(f\"GeoDataFrame: Not all variables found: {variables}\")\n if \"geometry\" not in variables: # always keep geometry column\n variables = variables + [\"geometry\"]\n gdf = gdf.loc[:, variables]\n\n # nodata and unit conversion for numeric data\n if gdf.index.size == 0:\n logger.warning(f\"GeoDataFrame: No data within spatial domain {self.path}.\")\n else:\n # parse nodata values\n cols = gdf.select_dtypes([np.number]).columns\n if self.nodata is not None and len(cols) > 0:\n if not isinstance(self.nodata, dict):\n nodata = {c: self.nodata for c in cols}\n else:\n nodata = self.nodata\n for c in cols:\n mv = nodata.get(c, None)\n if mv is not None:\n is_nodata = np.isin(gdf[c], np.atleast_1d(mv))\n gdf[c] = np.where(is_nodata, np.nan, gdf[c])\n\n # unit conversion\n unit_names = list(self.unit_mult.keys()) + list(self.unit_add.keys())\n unit_names = [k for k in unit_names if k in gdf.columns]\n if len(unit_names) > 0:\n logger.debug(\n f\"GeoDataFrame: Convert units for {len(unit_names)} columns.\"\n )\n for name in list(set(unit_names)): # unique\n m = self.unit_mult.get(name, 1)\n a = self.unit_add.get(name, 0)\n gdf[name] = gdf[name] * m + a\n\n # set meta data\n gdf.attrs.update(self.meta)\n\n # set column attributes\n for col in self.attrs:\n if col in gdf.columns:\n gdf[col].attrs.update(**self.attrs[col])\n return gdf\n",
"<docstring token>\nimport logging\nimport warnings\nfrom os.path import join\nfrom pathlib import Path\nfrom typing import NewType, Union\nimport geopandas as gpd\nimport numpy as np\nfrom shapely.geometry import box\nfrom .. import io\nfrom .data_adapter import DataAdapter\nlogger = logging.getLogger(__name__)\n__all__ = ['GeoDataFrameAdapter', 'GeoDataframeSource']\nGeoDataframeSource = NewType('GeoDataframeSource', Union[str, Path])\n\n\nclass GeoDataFrameAdapter(DataAdapter):\n \"\"\"The Geodataframe adapter implementation.\"\"\"\n _DEFAULT_DRIVER = 'vector'\n _DRIVERS = {'xy': 'xy', 'csv': 'csv', 'parquet': 'parquet', 'xls':\n 'xls', 'xlsx': 'xlsx'}\n\n def __init__(self, path: str, driver: str=None, filesystem: str='local',\n crs: Union[int, str, dict]=None, nodata: Union[dict, float, int]=\n None, rename: dict={}, unit_mult: dict={}, unit_add: dict={}, meta:\n dict={}, attrs: dict={}, driver_kwargs: dict={}, name: str='',\n catalog_name: str='', provider=None, version=None, **kwargs):\n \"\"\"Initiate data adapter for geospatial vector data.\n\n This object contains all properties required to read supported files into\n a single unified :py:func:`geopandas.GeoDataFrame`.\n In addition it keeps meta data to be able to reproduce which data is used.\n\n Parameters\n ----------\n path: str, Path\n Path to data source. If the dataset consists of multiple files, the path may\n contain {variable} placeholders as well as path\n search pattern using a '*' wildcard.\n driver: {'vector', 'vector_table'}, optional\n Driver to read files with, for 'vector' :py:func:`~geopandas.read_file`,\n for {'vector_table'} :py:func:`hydromt.io.open_vector_from_table`\n By default the driver is inferred from the file extension and falls back to\n 'vector' if unknown.\n filesystem: {'local', 'gcs', 's3'}, optional\n Filesystem where the data is stored (local, cloud, http etc.).\n By default, local.\n crs: int, dict, or str, optional\n Coordinate Reference System. Accepts EPSG codes (int or str);\n proj (str or dict) or wkt (str). Only used if the data has no native CRS.\n nodata: dictionary, float, int, optional\n Missing value number. Only used if the data has no native missing value.\n Nodata values can be differentiated between variables using a dictionary.\n rename: dict, optional\n Mapping of native data source variable to output source variable name as\n required by hydroMT.\n unit_mult, unit_add: dict, optional\n Scaling multiplication and addition to change to map from the native\n data unit to the output data unit as required by hydroMT.\n meta: dict, optional\n Metadata information of dataset, prefably containing the following keys:\n {'source_version', 'source_url', 'source_license',\n 'paper_ref', 'paper_doi', 'category'}\n placeholders: dict, optional\n Placeholders to expand yaml entry to multiple entries (name and path)\n based on placeholder values\n attrs: dict, optional\n Additional attributes relating to data variables. For instance unit\n or long name of the variable.\n driver_kwargs, dict, optional\n Additional key-word arguments passed to the driver.\n name, catalog_name: str, optional\n Name of the dataset and catalog, optional for now.\n \"\"\"\n if kwargs:\n warnings.warn(\n \"Passing additional keyword arguments to be used by the GeoDataFrameAdapter driver is deprecated and will be removed in a future version. Please use 'driver_kwargs' instead.\"\n , DeprecationWarning)\n driver_kwargs.update(kwargs)\n super().__init__(path=path, driver=driver, filesystem=filesystem,\n nodata=nodata, rename=rename, unit_mult=unit_mult, unit_add=\n unit_add, meta=meta, attrs=attrs, driver_kwargs=driver_kwargs,\n name=name, catalog_name=catalog_name, provider=provider,\n version=version)\n self.crs = crs\n\n def to_file(self, data_root, data_name, bbox=None, driver=None,\n variables=None, logger=logger, **kwargs):\n \"\"\"Save a data slice to file.\n\n Parameters\n ----------\n data_root : str, Path\n Path to output folder\n data_name : str\n Name of output file without extension.\n bbox : array-like of floats\n (xmin, ymin, xmax, ymax) bounding box of area of interest.\n driver : str, optional\n Driver to write file, e.g.: 'GPKG', 'ESRI Shapefile' or any fiona data type,\n by default None\n variables : list of str, optional\n Names of GeoDataset variables to return. By default all dataset variables\n are returned.\n logger : logger object, optional\n The logger object used for logging messages. If not provided, the default\n logger will be used.\n **kwargs\n Additional keyword arguments that are passed to the geopandas driver.\n\n Returns\n -------\n fn_out: str\n Absolute path to output file\n driver: str\n Name of driver to read data with, see\n :py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`\n \"\"\"\n kwargs.pop('time_tuple', None)\n gdf = self.get_data(bbox=bbox, variables=variables, logger=logger)\n if gdf.index.size == 0:\n return None, None, None\n read_kwargs = {}\n if driver is None:\n _lst = ['csv', 'parquet', 'xls', 'xlsx', 'xy', 'vector_table']\n driver = 'csv' if self.driver in _lst else 'GPKG'\n if driver == 'csv':\n fn_out = join(data_root, f'{data_name}.csv')\n if not np.all(gdf.geometry.type == 'Point'):\n raise ValueError(\n f\"{data_name} contains other geometries than 'Point' which cannot be written to csv.\"\n )\n gdf['x'], gdf['y'] = gdf.geometry.x, gdf.geometry.y\n gdf.drop(columns='geometry').to_csv(fn_out, **kwargs)\n read_kwargs['index_col'] = 0\n elif driver == 'parquet':\n fn_out = join(data_root, f'{data_name}.parquet')\n if not np.all(gdf.geometry.type == 'Point'):\n raise ValueError(\n f\"{data_name} contains other geometries than 'Point' which cannot be written to parquet.\"\n )\n gdf['x'], gdf['y'] = gdf.geometry.x, gdf.geometry.y\n gdf.drop(columns='geometry').to_parquet(fn_out, **kwargs)\n else:\n driver_extensions = {'ESRI Shapefile': '.shp'}\n ext = driver_extensions.get(driver, driver).lower()\n fn_out = join(data_root, f'{data_name}.{ext}')\n gdf.to_file(fn_out, driver=driver, **kwargs)\n driver = 'vector'\n return fn_out, driver, read_kwargs\n\n def get_data(self, bbox=None, geom=None, predicate='intersects', buffer\n =0, logger=logger, variables=None):\n \"\"\"Return a clipped and unified GeoDataFrame (vector).\n\n For a detailed description see:\n :py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`\n \"\"\"\n if variables:\n variables = np.atleast_1d(variables).tolist()\n if 'storage_options' in self.driver_kwargs:\n raise NotImplementedError(\n 'Remote file storage_options not implemented for GeoDataFrame')\n _ = self.resolve_paths()\n kwargs = self.driver_kwargs.copy()\n clip_str = ''\n if geom is None and bbox is not None:\n geom = gpd.GeoDataFrame(geometry=[box(*bbox)], crs=4326)\n clip_str = ' and clip to bbox (epsg:4326)'\n elif geom is not None:\n clip_str = f' and clip to geom (epsg:{geom.crs.to_epsg():d})'\n if geom is not None:\n if geom.crs.is_geographic and buffer > 0:\n geom = geom.to_crs(3857)\n geom = geom.buffer(buffer)\n bbox_str = ', '.join([f'{c:.3f}' for c in geom.total_bounds])\n clip_str = f'{clip_str} [{bbox_str}]'\n if kwargs.pop('within', False):\n predicate = 'contains'\n logger.info(f'GeoDataFrame: Read {self.driver} data{clip_str}.')\n if self.driver in ['csv', 'parquet', 'xls', 'xlsx', 'xy', 'vector',\n 'vector_table']:\n if 'driver' not in kwargs and self.driver in ['csv', 'xls',\n 'xlsx', 'xy']:\n warnings.warn(\n 'using the driver setting is deprecated. Please usevector_table instead.'\n )\n kwargs.update(driver=self.driver)\n gdf = io.open_vector(self.path, crs=self.crs, geom=geom,\n predicate=predicate, **kwargs)\n else:\n raise ValueError(f'GeoDataFrame: driver {self.driver} unknown.')\n if self.rename:\n rename = {k: v for k, v in self.rename.items() if k in gdf.columns}\n gdf = gdf.rename(columns=rename)\n if variables is not None:\n if np.any([(var not in gdf.columns) for var in variables]):\n raise ValueError(\n f'GeoDataFrame: Not all variables found: {variables}')\n if 'geometry' not in variables:\n variables = variables + ['geometry']\n gdf = gdf.loc[:, variables]\n if gdf.index.size == 0:\n logger.warning(\n f'GeoDataFrame: No data within spatial domain {self.path}.')\n else:\n cols = gdf.select_dtypes([np.number]).columns\n if self.nodata is not None and len(cols) > 0:\n if not isinstance(self.nodata, dict):\n nodata = {c: self.nodata for c in cols}\n else:\n nodata = self.nodata\n for c in cols:\n mv = nodata.get(c, None)\n if mv is not None:\n is_nodata = np.isin(gdf[c], np.atleast_1d(mv))\n gdf[c] = np.where(is_nodata, np.nan, gdf[c])\n unit_names = list(self.unit_mult.keys()) + list(self.unit_add.\n keys())\n unit_names = [k for k in unit_names if k in gdf.columns]\n if len(unit_names) > 0:\n logger.debug(\n f'GeoDataFrame: Convert units for {len(unit_names)} columns.'\n )\n for name in list(set(unit_names)):\n m = self.unit_mult.get(name, 1)\n a = self.unit_add.get(name, 0)\n gdf[name] = gdf[name] * m + a\n gdf.attrs.update(self.meta)\n for col in self.attrs:\n if col in gdf.columns:\n gdf[col].attrs.update(**self.attrs[col])\n return gdf\n",
"<docstring token>\n<import token>\nlogger = logging.getLogger(__name__)\n__all__ = ['GeoDataFrameAdapter', 'GeoDataframeSource']\nGeoDataframeSource = NewType('GeoDataframeSource', Union[str, Path])\n\n\nclass GeoDataFrameAdapter(DataAdapter):\n \"\"\"The Geodataframe adapter implementation.\"\"\"\n _DEFAULT_DRIVER = 'vector'\n _DRIVERS = {'xy': 'xy', 'csv': 'csv', 'parquet': 'parquet', 'xls':\n 'xls', 'xlsx': 'xlsx'}\n\n def __init__(self, path: str, driver: str=None, filesystem: str='local',\n crs: Union[int, str, dict]=None, nodata: Union[dict, float, int]=\n None, rename: dict={}, unit_mult: dict={}, unit_add: dict={}, meta:\n dict={}, attrs: dict={}, driver_kwargs: dict={}, name: str='',\n catalog_name: str='', provider=None, version=None, **kwargs):\n \"\"\"Initiate data adapter for geospatial vector data.\n\n This object contains all properties required to read supported files into\n a single unified :py:func:`geopandas.GeoDataFrame`.\n In addition it keeps meta data to be able to reproduce which data is used.\n\n Parameters\n ----------\n path: str, Path\n Path to data source. If the dataset consists of multiple files, the path may\n contain {variable} placeholders as well as path\n search pattern using a '*' wildcard.\n driver: {'vector', 'vector_table'}, optional\n Driver to read files with, for 'vector' :py:func:`~geopandas.read_file`,\n for {'vector_table'} :py:func:`hydromt.io.open_vector_from_table`\n By default the driver is inferred from the file extension and falls back to\n 'vector' if unknown.\n filesystem: {'local', 'gcs', 's3'}, optional\n Filesystem where the data is stored (local, cloud, http etc.).\n By default, local.\n crs: int, dict, or str, optional\n Coordinate Reference System. Accepts EPSG codes (int or str);\n proj (str or dict) or wkt (str). Only used if the data has no native CRS.\n nodata: dictionary, float, int, optional\n Missing value number. Only used if the data has no native missing value.\n Nodata values can be differentiated between variables using a dictionary.\n rename: dict, optional\n Mapping of native data source variable to output source variable name as\n required by hydroMT.\n unit_mult, unit_add: dict, optional\n Scaling multiplication and addition to change to map from the native\n data unit to the output data unit as required by hydroMT.\n meta: dict, optional\n Metadata information of dataset, prefably containing the following keys:\n {'source_version', 'source_url', 'source_license',\n 'paper_ref', 'paper_doi', 'category'}\n placeholders: dict, optional\n Placeholders to expand yaml entry to multiple entries (name and path)\n based on placeholder values\n attrs: dict, optional\n Additional attributes relating to data variables. For instance unit\n or long name of the variable.\n driver_kwargs, dict, optional\n Additional key-word arguments passed to the driver.\n name, catalog_name: str, optional\n Name of the dataset and catalog, optional for now.\n \"\"\"\n if kwargs:\n warnings.warn(\n \"Passing additional keyword arguments to be used by the GeoDataFrameAdapter driver is deprecated and will be removed in a future version. Please use 'driver_kwargs' instead.\"\n , DeprecationWarning)\n driver_kwargs.update(kwargs)\n super().__init__(path=path, driver=driver, filesystem=filesystem,\n nodata=nodata, rename=rename, unit_mult=unit_mult, unit_add=\n unit_add, meta=meta, attrs=attrs, driver_kwargs=driver_kwargs,\n name=name, catalog_name=catalog_name, provider=provider,\n version=version)\n self.crs = crs\n\n def to_file(self, data_root, data_name, bbox=None, driver=None,\n variables=None, logger=logger, **kwargs):\n \"\"\"Save a data slice to file.\n\n Parameters\n ----------\n data_root : str, Path\n Path to output folder\n data_name : str\n Name of output file without extension.\n bbox : array-like of floats\n (xmin, ymin, xmax, ymax) bounding box of area of interest.\n driver : str, optional\n Driver to write file, e.g.: 'GPKG', 'ESRI Shapefile' or any fiona data type,\n by default None\n variables : list of str, optional\n Names of GeoDataset variables to return. By default all dataset variables\n are returned.\n logger : logger object, optional\n The logger object used for logging messages. If not provided, the default\n logger will be used.\n **kwargs\n Additional keyword arguments that are passed to the geopandas driver.\n\n Returns\n -------\n fn_out: str\n Absolute path to output file\n driver: str\n Name of driver to read data with, see\n :py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`\n \"\"\"\n kwargs.pop('time_tuple', None)\n gdf = self.get_data(bbox=bbox, variables=variables, logger=logger)\n if gdf.index.size == 0:\n return None, None, None\n read_kwargs = {}\n if driver is None:\n _lst = ['csv', 'parquet', 'xls', 'xlsx', 'xy', 'vector_table']\n driver = 'csv' if self.driver in _lst else 'GPKG'\n if driver == 'csv':\n fn_out = join(data_root, f'{data_name}.csv')\n if not np.all(gdf.geometry.type == 'Point'):\n raise ValueError(\n f\"{data_name} contains other geometries than 'Point' which cannot be written to csv.\"\n )\n gdf['x'], gdf['y'] = gdf.geometry.x, gdf.geometry.y\n gdf.drop(columns='geometry').to_csv(fn_out, **kwargs)\n read_kwargs['index_col'] = 0\n elif driver == 'parquet':\n fn_out = join(data_root, f'{data_name}.parquet')\n if not np.all(gdf.geometry.type == 'Point'):\n raise ValueError(\n f\"{data_name} contains other geometries than 'Point' which cannot be written to parquet.\"\n )\n gdf['x'], gdf['y'] = gdf.geometry.x, gdf.geometry.y\n gdf.drop(columns='geometry').to_parquet(fn_out, **kwargs)\n else:\n driver_extensions = {'ESRI Shapefile': '.shp'}\n ext = driver_extensions.get(driver, driver).lower()\n fn_out = join(data_root, f'{data_name}.{ext}')\n gdf.to_file(fn_out, driver=driver, **kwargs)\n driver = 'vector'\n return fn_out, driver, read_kwargs\n\n def get_data(self, bbox=None, geom=None, predicate='intersects', buffer\n =0, logger=logger, variables=None):\n \"\"\"Return a clipped and unified GeoDataFrame (vector).\n\n For a detailed description see:\n :py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`\n \"\"\"\n if variables:\n variables = np.atleast_1d(variables).tolist()\n if 'storage_options' in self.driver_kwargs:\n raise NotImplementedError(\n 'Remote file storage_options not implemented for GeoDataFrame')\n _ = self.resolve_paths()\n kwargs = self.driver_kwargs.copy()\n clip_str = ''\n if geom is None and bbox is not None:\n geom = gpd.GeoDataFrame(geometry=[box(*bbox)], crs=4326)\n clip_str = ' and clip to bbox (epsg:4326)'\n elif geom is not None:\n clip_str = f' and clip to geom (epsg:{geom.crs.to_epsg():d})'\n if geom is not None:\n if geom.crs.is_geographic and buffer > 0:\n geom = geom.to_crs(3857)\n geom = geom.buffer(buffer)\n bbox_str = ', '.join([f'{c:.3f}' for c in geom.total_bounds])\n clip_str = f'{clip_str} [{bbox_str}]'\n if kwargs.pop('within', False):\n predicate = 'contains'\n logger.info(f'GeoDataFrame: Read {self.driver} data{clip_str}.')\n if self.driver in ['csv', 'parquet', 'xls', 'xlsx', 'xy', 'vector',\n 'vector_table']:\n if 'driver' not in kwargs and self.driver in ['csv', 'xls',\n 'xlsx', 'xy']:\n warnings.warn(\n 'using the driver setting is deprecated. Please usevector_table instead.'\n )\n kwargs.update(driver=self.driver)\n gdf = io.open_vector(self.path, crs=self.crs, geom=geom,\n predicate=predicate, **kwargs)\n else:\n raise ValueError(f'GeoDataFrame: driver {self.driver} unknown.')\n if self.rename:\n rename = {k: v for k, v in self.rename.items() if k in gdf.columns}\n gdf = gdf.rename(columns=rename)\n if variables is not None:\n if np.any([(var not in gdf.columns) for var in variables]):\n raise ValueError(\n f'GeoDataFrame: Not all variables found: {variables}')\n if 'geometry' not in variables:\n variables = variables + ['geometry']\n gdf = gdf.loc[:, variables]\n if gdf.index.size == 0:\n logger.warning(\n f'GeoDataFrame: No data within spatial domain {self.path}.')\n else:\n cols = gdf.select_dtypes([np.number]).columns\n if self.nodata is not None and len(cols) > 0:\n if not isinstance(self.nodata, dict):\n nodata = {c: self.nodata for c in cols}\n else:\n nodata = self.nodata\n for c in cols:\n mv = nodata.get(c, None)\n if mv is not None:\n is_nodata = np.isin(gdf[c], np.atleast_1d(mv))\n gdf[c] = np.where(is_nodata, np.nan, gdf[c])\n unit_names = list(self.unit_mult.keys()) + list(self.unit_add.\n keys())\n unit_names = [k for k in unit_names if k in gdf.columns]\n if len(unit_names) > 0:\n logger.debug(\n f'GeoDataFrame: Convert units for {len(unit_names)} columns.'\n )\n for name in list(set(unit_names)):\n m = self.unit_mult.get(name, 1)\n a = self.unit_add.get(name, 0)\n gdf[name] = gdf[name] * m + a\n gdf.attrs.update(self.meta)\n for col in self.attrs:\n if col in gdf.columns:\n gdf[col].attrs.update(**self.attrs[col])\n return gdf\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass GeoDataFrameAdapter(DataAdapter):\n \"\"\"The Geodataframe adapter implementation.\"\"\"\n _DEFAULT_DRIVER = 'vector'\n _DRIVERS = {'xy': 'xy', 'csv': 'csv', 'parquet': 'parquet', 'xls':\n 'xls', 'xlsx': 'xlsx'}\n\n def __init__(self, path: str, driver: str=None, filesystem: str='local',\n crs: Union[int, str, dict]=None, nodata: Union[dict, float, int]=\n None, rename: dict={}, unit_mult: dict={}, unit_add: dict={}, meta:\n dict={}, attrs: dict={}, driver_kwargs: dict={}, name: str='',\n catalog_name: str='', provider=None, version=None, **kwargs):\n \"\"\"Initiate data adapter for geospatial vector data.\n\n This object contains all properties required to read supported files into\n a single unified :py:func:`geopandas.GeoDataFrame`.\n In addition it keeps meta data to be able to reproduce which data is used.\n\n Parameters\n ----------\n path: str, Path\n Path to data source. If the dataset consists of multiple files, the path may\n contain {variable} placeholders as well as path\n search pattern using a '*' wildcard.\n driver: {'vector', 'vector_table'}, optional\n Driver to read files with, for 'vector' :py:func:`~geopandas.read_file`,\n for {'vector_table'} :py:func:`hydromt.io.open_vector_from_table`\n By default the driver is inferred from the file extension and falls back to\n 'vector' if unknown.\n filesystem: {'local', 'gcs', 's3'}, optional\n Filesystem where the data is stored (local, cloud, http etc.).\n By default, local.\n crs: int, dict, or str, optional\n Coordinate Reference System. Accepts EPSG codes (int or str);\n proj (str or dict) or wkt (str). Only used if the data has no native CRS.\n nodata: dictionary, float, int, optional\n Missing value number. Only used if the data has no native missing value.\n Nodata values can be differentiated between variables using a dictionary.\n rename: dict, optional\n Mapping of native data source variable to output source variable name as\n required by hydroMT.\n unit_mult, unit_add: dict, optional\n Scaling multiplication and addition to change to map from the native\n data unit to the output data unit as required by hydroMT.\n meta: dict, optional\n Metadata information of dataset, prefably containing the following keys:\n {'source_version', 'source_url', 'source_license',\n 'paper_ref', 'paper_doi', 'category'}\n placeholders: dict, optional\n Placeholders to expand yaml entry to multiple entries (name and path)\n based on placeholder values\n attrs: dict, optional\n Additional attributes relating to data variables. For instance unit\n or long name of the variable.\n driver_kwargs, dict, optional\n Additional key-word arguments passed to the driver.\n name, catalog_name: str, optional\n Name of the dataset and catalog, optional for now.\n \"\"\"\n if kwargs:\n warnings.warn(\n \"Passing additional keyword arguments to be used by the GeoDataFrameAdapter driver is deprecated and will be removed in a future version. Please use 'driver_kwargs' instead.\"\n , DeprecationWarning)\n driver_kwargs.update(kwargs)\n super().__init__(path=path, driver=driver, filesystem=filesystem,\n nodata=nodata, rename=rename, unit_mult=unit_mult, unit_add=\n unit_add, meta=meta, attrs=attrs, driver_kwargs=driver_kwargs,\n name=name, catalog_name=catalog_name, provider=provider,\n version=version)\n self.crs = crs\n\n def to_file(self, data_root, data_name, bbox=None, driver=None,\n variables=None, logger=logger, **kwargs):\n \"\"\"Save a data slice to file.\n\n Parameters\n ----------\n data_root : str, Path\n Path to output folder\n data_name : str\n Name of output file without extension.\n bbox : array-like of floats\n (xmin, ymin, xmax, ymax) bounding box of area of interest.\n driver : str, optional\n Driver to write file, e.g.: 'GPKG', 'ESRI Shapefile' or any fiona data type,\n by default None\n variables : list of str, optional\n Names of GeoDataset variables to return. By default all dataset variables\n are returned.\n logger : logger object, optional\n The logger object used for logging messages. If not provided, the default\n logger will be used.\n **kwargs\n Additional keyword arguments that are passed to the geopandas driver.\n\n Returns\n -------\n fn_out: str\n Absolute path to output file\n driver: str\n Name of driver to read data with, see\n :py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`\n \"\"\"\n kwargs.pop('time_tuple', None)\n gdf = self.get_data(bbox=bbox, variables=variables, logger=logger)\n if gdf.index.size == 0:\n return None, None, None\n read_kwargs = {}\n if driver is None:\n _lst = ['csv', 'parquet', 'xls', 'xlsx', 'xy', 'vector_table']\n driver = 'csv' if self.driver in _lst else 'GPKG'\n if driver == 'csv':\n fn_out = join(data_root, f'{data_name}.csv')\n if not np.all(gdf.geometry.type == 'Point'):\n raise ValueError(\n f\"{data_name} contains other geometries than 'Point' which cannot be written to csv.\"\n )\n gdf['x'], gdf['y'] = gdf.geometry.x, gdf.geometry.y\n gdf.drop(columns='geometry').to_csv(fn_out, **kwargs)\n read_kwargs['index_col'] = 0\n elif driver == 'parquet':\n fn_out = join(data_root, f'{data_name}.parquet')\n if not np.all(gdf.geometry.type == 'Point'):\n raise ValueError(\n f\"{data_name} contains other geometries than 'Point' which cannot be written to parquet.\"\n )\n gdf['x'], gdf['y'] = gdf.geometry.x, gdf.geometry.y\n gdf.drop(columns='geometry').to_parquet(fn_out, **kwargs)\n else:\n driver_extensions = {'ESRI Shapefile': '.shp'}\n ext = driver_extensions.get(driver, driver).lower()\n fn_out = join(data_root, f'{data_name}.{ext}')\n gdf.to_file(fn_out, driver=driver, **kwargs)\n driver = 'vector'\n return fn_out, driver, read_kwargs\n\n def get_data(self, bbox=None, geom=None, predicate='intersects', buffer\n =0, logger=logger, variables=None):\n \"\"\"Return a clipped and unified GeoDataFrame (vector).\n\n For a detailed description see:\n :py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`\n \"\"\"\n if variables:\n variables = np.atleast_1d(variables).tolist()\n if 'storage_options' in self.driver_kwargs:\n raise NotImplementedError(\n 'Remote file storage_options not implemented for GeoDataFrame')\n _ = self.resolve_paths()\n kwargs = self.driver_kwargs.copy()\n clip_str = ''\n if geom is None and bbox is not None:\n geom = gpd.GeoDataFrame(geometry=[box(*bbox)], crs=4326)\n clip_str = ' and clip to bbox (epsg:4326)'\n elif geom is not None:\n clip_str = f' and clip to geom (epsg:{geom.crs.to_epsg():d})'\n if geom is not None:\n if geom.crs.is_geographic and buffer > 0:\n geom = geom.to_crs(3857)\n geom = geom.buffer(buffer)\n bbox_str = ', '.join([f'{c:.3f}' for c in geom.total_bounds])\n clip_str = f'{clip_str} [{bbox_str}]'\n if kwargs.pop('within', False):\n predicate = 'contains'\n logger.info(f'GeoDataFrame: Read {self.driver} data{clip_str}.')\n if self.driver in ['csv', 'parquet', 'xls', 'xlsx', 'xy', 'vector',\n 'vector_table']:\n if 'driver' not in kwargs and self.driver in ['csv', 'xls',\n 'xlsx', 'xy']:\n warnings.warn(\n 'using the driver setting is deprecated. Please usevector_table instead.'\n )\n kwargs.update(driver=self.driver)\n gdf = io.open_vector(self.path, crs=self.crs, geom=geom,\n predicate=predicate, **kwargs)\n else:\n raise ValueError(f'GeoDataFrame: driver {self.driver} unknown.')\n if self.rename:\n rename = {k: v for k, v in self.rename.items() if k in gdf.columns}\n gdf = gdf.rename(columns=rename)\n if variables is not None:\n if np.any([(var not in gdf.columns) for var in variables]):\n raise ValueError(\n f'GeoDataFrame: Not all variables found: {variables}')\n if 'geometry' not in variables:\n variables = variables + ['geometry']\n gdf = gdf.loc[:, variables]\n if gdf.index.size == 0:\n logger.warning(\n f'GeoDataFrame: No data within spatial domain {self.path}.')\n else:\n cols = gdf.select_dtypes([np.number]).columns\n if self.nodata is not None and len(cols) > 0:\n if not isinstance(self.nodata, dict):\n nodata = {c: self.nodata for c in cols}\n else:\n nodata = self.nodata\n for c in cols:\n mv = nodata.get(c, None)\n if mv is not None:\n is_nodata = np.isin(gdf[c], np.atleast_1d(mv))\n gdf[c] = np.where(is_nodata, np.nan, gdf[c])\n unit_names = list(self.unit_mult.keys()) + list(self.unit_add.\n keys())\n unit_names = [k for k in unit_names if k in gdf.columns]\n if len(unit_names) > 0:\n logger.debug(\n f'GeoDataFrame: Convert units for {len(unit_names)} columns.'\n )\n for name in list(set(unit_names)):\n m = self.unit_mult.get(name, 1)\n a = self.unit_add.get(name, 0)\n gdf[name] = gdf[name] * m + a\n gdf.attrs.update(self.meta)\n for col in self.attrs:\n if col in gdf.columns:\n gdf[col].attrs.update(**self.attrs[col])\n return gdf\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass GeoDataFrameAdapter(DataAdapter):\n <docstring token>\n _DEFAULT_DRIVER = 'vector'\n _DRIVERS = {'xy': 'xy', 'csv': 'csv', 'parquet': 'parquet', 'xls':\n 'xls', 'xlsx': 'xlsx'}\n\n def __init__(self, path: str, driver: str=None, filesystem: str='local',\n crs: Union[int, str, dict]=None, nodata: Union[dict, float, int]=\n None, rename: dict={}, unit_mult: dict={}, unit_add: dict={}, meta:\n dict={}, attrs: dict={}, driver_kwargs: dict={}, name: str='',\n catalog_name: str='', provider=None, version=None, **kwargs):\n \"\"\"Initiate data adapter for geospatial vector data.\n\n This object contains all properties required to read supported files into\n a single unified :py:func:`geopandas.GeoDataFrame`.\n In addition it keeps meta data to be able to reproduce which data is used.\n\n Parameters\n ----------\n path: str, Path\n Path to data source. If the dataset consists of multiple files, the path may\n contain {variable} placeholders as well as path\n search pattern using a '*' wildcard.\n driver: {'vector', 'vector_table'}, optional\n Driver to read files with, for 'vector' :py:func:`~geopandas.read_file`,\n for {'vector_table'} :py:func:`hydromt.io.open_vector_from_table`\n By default the driver is inferred from the file extension and falls back to\n 'vector' if unknown.\n filesystem: {'local', 'gcs', 's3'}, optional\n Filesystem where the data is stored (local, cloud, http etc.).\n By default, local.\n crs: int, dict, or str, optional\n Coordinate Reference System. Accepts EPSG codes (int or str);\n proj (str or dict) or wkt (str). Only used if the data has no native CRS.\n nodata: dictionary, float, int, optional\n Missing value number. Only used if the data has no native missing value.\n Nodata values can be differentiated between variables using a dictionary.\n rename: dict, optional\n Mapping of native data source variable to output source variable name as\n required by hydroMT.\n unit_mult, unit_add: dict, optional\n Scaling multiplication and addition to change to map from the native\n data unit to the output data unit as required by hydroMT.\n meta: dict, optional\n Metadata information of dataset, prefably containing the following keys:\n {'source_version', 'source_url', 'source_license',\n 'paper_ref', 'paper_doi', 'category'}\n placeholders: dict, optional\n Placeholders to expand yaml entry to multiple entries (name and path)\n based on placeholder values\n attrs: dict, optional\n Additional attributes relating to data variables. For instance unit\n or long name of the variable.\n driver_kwargs, dict, optional\n Additional key-word arguments passed to the driver.\n name, catalog_name: str, optional\n Name of the dataset and catalog, optional for now.\n \"\"\"\n if kwargs:\n warnings.warn(\n \"Passing additional keyword arguments to be used by the GeoDataFrameAdapter driver is deprecated and will be removed in a future version. Please use 'driver_kwargs' instead.\"\n , DeprecationWarning)\n driver_kwargs.update(kwargs)\n super().__init__(path=path, driver=driver, filesystem=filesystem,\n nodata=nodata, rename=rename, unit_mult=unit_mult, unit_add=\n unit_add, meta=meta, attrs=attrs, driver_kwargs=driver_kwargs,\n name=name, catalog_name=catalog_name, provider=provider,\n version=version)\n self.crs = crs\n\n def to_file(self, data_root, data_name, bbox=None, driver=None,\n variables=None, logger=logger, **kwargs):\n \"\"\"Save a data slice to file.\n\n Parameters\n ----------\n data_root : str, Path\n Path to output folder\n data_name : str\n Name of output file without extension.\n bbox : array-like of floats\n (xmin, ymin, xmax, ymax) bounding box of area of interest.\n driver : str, optional\n Driver to write file, e.g.: 'GPKG', 'ESRI Shapefile' or any fiona data type,\n by default None\n variables : list of str, optional\n Names of GeoDataset variables to return. By default all dataset variables\n are returned.\n logger : logger object, optional\n The logger object used for logging messages. If not provided, the default\n logger will be used.\n **kwargs\n Additional keyword arguments that are passed to the geopandas driver.\n\n Returns\n -------\n fn_out: str\n Absolute path to output file\n driver: str\n Name of driver to read data with, see\n :py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`\n \"\"\"\n kwargs.pop('time_tuple', None)\n gdf = self.get_data(bbox=bbox, variables=variables, logger=logger)\n if gdf.index.size == 0:\n return None, None, None\n read_kwargs = {}\n if driver is None:\n _lst = ['csv', 'parquet', 'xls', 'xlsx', 'xy', 'vector_table']\n driver = 'csv' if self.driver in _lst else 'GPKG'\n if driver == 'csv':\n fn_out = join(data_root, f'{data_name}.csv')\n if not np.all(gdf.geometry.type == 'Point'):\n raise ValueError(\n f\"{data_name} contains other geometries than 'Point' which cannot be written to csv.\"\n )\n gdf['x'], gdf['y'] = gdf.geometry.x, gdf.geometry.y\n gdf.drop(columns='geometry').to_csv(fn_out, **kwargs)\n read_kwargs['index_col'] = 0\n elif driver == 'parquet':\n fn_out = join(data_root, f'{data_name}.parquet')\n if not np.all(gdf.geometry.type == 'Point'):\n raise ValueError(\n f\"{data_name} contains other geometries than 'Point' which cannot be written to parquet.\"\n )\n gdf['x'], gdf['y'] = gdf.geometry.x, gdf.geometry.y\n gdf.drop(columns='geometry').to_parquet(fn_out, **kwargs)\n else:\n driver_extensions = {'ESRI Shapefile': '.shp'}\n ext = driver_extensions.get(driver, driver).lower()\n fn_out = join(data_root, f'{data_name}.{ext}')\n gdf.to_file(fn_out, driver=driver, **kwargs)\n driver = 'vector'\n return fn_out, driver, read_kwargs\n\n def get_data(self, bbox=None, geom=None, predicate='intersects', buffer\n =0, logger=logger, variables=None):\n \"\"\"Return a clipped and unified GeoDataFrame (vector).\n\n For a detailed description see:\n :py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`\n \"\"\"\n if variables:\n variables = np.atleast_1d(variables).tolist()\n if 'storage_options' in self.driver_kwargs:\n raise NotImplementedError(\n 'Remote file storage_options not implemented for GeoDataFrame')\n _ = self.resolve_paths()\n kwargs = self.driver_kwargs.copy()\n clip_str = ''\n if geom is None and bbox is not None:\n geom = gpd.GeoDataFrame(geometry=[box(*bbox)], crs=4326)\n clip_str = ' and clip to bbox (epsg:4326)'\n elif geom is not None:\n clip_str = f' and clip to geom (epsg:{geom.crs.to_epsg():d})'\n if geom is not None:\n if geom.crs.is_geographic and buffer > 0:\n geom = geom.to_crs(3857)\n geom = geom.buffer(buffer)\n bbox_str = ', '.join([f'{c:.3f}' for c in geom.total_bounds])\n clip_str = f'{clip_str} [{bbox_str}]'\n if kwargs.pop('within', False):\n predicate = 'contains'\n logger.info(f'GeoDataFrame: Read {self.driver} data{clip_str}.')\n if self.driver in ['csv', 'parquet', 'xls', 'xlsx', 'xy', 'vector',\n 'vector_table']:\n if 'driver' not in kwargs and self.driver in ['csv', 'xls',\n 'xlsx', 'xy']:\n warnings.warn(\n 'using the driver setting is deprecated. Please usevector_table instead.'\n )\n kwargs.update(driver=self.driver)\n gdf = io.open_vector(self.path, crs=self.crs, geom=geom,\n predicate=predicate, **kwargs)\n else:\n raise ValueError(f'GeoDataFrame: driver {self.driver} unknown.')\n if self.rename:\n rename = {k: v for k, v in self.rename.items() if k in gdf.columns}\n gdf = gdf.rename(columns=rename)\n if variables is not None:\n if np.any([(var not in gdf.columns) for var in variables]):\n raise ValueError(\n f'GeoDataFrame: Not all variables found: {variables}')\n if 'geometry' not in variables:\n variables = variables + ['geometry']\n gdf = gdf.loc[:, variables]\n if gdf.index.size == 0:\n logger.warning(\n f'GeoDataFrame: No data within spatial domain {self.path}.')\n else:\n cols = gdf.select_dtypes([np.number]).columns\n if self.nodata is not None and len(cols) > 0:\n if not isinstance(self.nodata, dict):\n nodata = {c: self.nodata for c in cols}\n else:\n nodata = self.nodata\n for c in cols:\n mv = nodata.get(c, None)\n if mv is not None:\n is_nodata = np.isin(gdf[c], np.atleast_1d(mv))\n gdf[c] = np.where(is_nodata, np.nan, gdf[c])\n unit_names = list(self.unit_mult.keys()) + list(self.unit_add.\n keys())\n unit_names = [k for k in unit_names if k in gdf.columns]\n if len(unit_names) > 0:\n logger.debug(\n f'GeoDataFrame: Convert units for {len(unit_names)} columns.'\n )\n for name in list(set(unit_names)):\n m = self.unit_mult.get(name, 1)\n a = self.unit_add.get(name, 0)\n gdf[name] = gdf[name] * m + a\n gdf.attrs.update(self.meta)\n for col in self.attrs:\n if col in gdf.columns:\n gdf[col].attrs.update(**self.attrs[col])\n return gdf\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass GeoDataFrameAdapter(DataAdapter):\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, path: str, driver: str=None, filesystem: str='local',\n crs: Union[int, str, dict]=None, nodata: Union[dict, float, int]=\n None, rename: dict={}, unit_mult: dict={}, unit_add: dict={}, meta:\n dict={}, attrs: dict={}, driver_kwargs: dict={}, name: str='',\n catalog_name: str='', provider=None, version=None, **kwargs):\n \"\"\"Initiate data adapter for geospatial vector data.\n\n This object contains all properties required to read supported files into\n a single unified :py:func:`geopandas.GeoDataFrame`.\n In addition it keeps meta data to be able to reproduce which data is used.\n\n Parameters\n ----------\n path: str, Path\n Path to data source. If the dataset consists of multiple files, the path may\n contain {variable} placeholders as well as path\n search pattern using a '*' wildcard.\n driver: {'vector', 'vector_table'}, optional\n Driver to read files with, for 'vector' :py:func:`~geopandas.read_file`,\n for {'vector_table'} :py:func:`hydromt.io.open_vector_from_table`\n By default the driver is inferred from the file extension and falls back to\n 'vector' if unknown.\n filesystem: {'local', 'gcs', 's3'}, optional\n Filesystem where the data is stored (local, cloud, http etc.).\n By default, local.\n crs: int, dict, or str, optional\n Coordinate Reference System. Accepts EPSG codes (int or str);\n proj (str or dict) or wkt (str). Only used if the data has no native CRS.\n nodata: dictionary, float, int, optional\n Missing value number. Only used if the data has no native missing value.\n Nodata values can be differentiated between variables using a dictionary.\n rename: dict, optional\n Mapping of native data source variable to output source variable name as\n required by hydroMT.\n unit_mult, unit_add: dict, optional\n Scaling multiplication and addition to change to map from the native\n data unit to the output data unit as required by hydroMT.\n meta: dict, optional\n Metadata information of dataset, prefably containing the following keys:\n {'source_version', 'source_url', 'source_license',\n 'paper_ref', 'paper_doi', 'category'}\n placeholders: dict, optional\n Placeholders to expand yaml entry to multiple entries (name and path)\n based on placeholder values\n attrs: dict, optional\n Additional attributes relating to data variables. For instance unit\n or long name of the variable.\n driver_kwargs, dict, optional\n Additional key-word arguments passed to the driver.\n name, catalog_name: str, optional\n Name of the dataset and catalog, optional for now.\n \"\"\"\n if kwargs:\n warnings.warn(\n \"Passing additional keyword arguments to be used by the GeoDataFrameAdapter driver is deprecated and will be removed in a future version. Please use 'driver_kwargs' instead.\"\n , DeprecationWarning)\n driver_kwargs.update(kwargs)\n super().__init__(path=path, driver=driver, filesystem=filesystem,\n nodata=nodata, rename=rename, unit_mult=unit_mult, unit_add=\n unit_add, meta=meta, attrs=attrs, driver_kwargs=driver_kwargs,\n name=name, catalog_name=catalog_name, provider=provider,\n version=version)\n self.crs = crs\n\n def to_file(self, data_root, data_name, bbox=None, driver=None,\n variables=None, logger=logger, **kwargs):\n \"\"\"Save a data slice to file.\n\n Parameters\n ----------\n data_root : str, Path\n Path to output folder\n data_name : str\n Name of output file without extension.\n bbox : array-like of floats\n (xmin, ymin, xmax, ymax) bounding box of area of interest.\n driver : str, optional\n Driver to write file, e.g.: 'GPKG', 'ESRI Shapefile' or any fiona data type,\n by default None\n variables : list of str, optional\n Names of GeoDataset variables to return. By default all dataset variables\n are returned.\n logger : logger object, optional\n The logger object used for logging messages. If not provided, the default\n logger will be used.\n **kwargs\n Additional keyword arguments that are passed to the geopandas driver.\n\n Returns\n -------\n fn_out: str\n Absolute path to output file\n driver: str\n Name of driver to read data with, see\n :py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`\n \"\"\"\n kwargs.pop('time_tuple', None)\n gdf = self.get_data(bbox=bbox, variables=variables, logger=logger)\n if gdf.index.size == 0:\n return None, None, None\n read_kwargs = {}\n if driver is None:\n _lst = ['csv', 'parquet', 'xls', 'xlsx', 'xy', 'vector_table']\n driver = 'csv' if self.driver in _lst else 'GPKG'\n if driver == 'csv':\n fn_out = join(data_root, f'{data_name}.csv')\n if not np.all(gdf.geometry.type == 'Point'):\n raise ValueError(\n f\"{data_name} contains other geometries than 'Point' which cannot be written to csv.\"\n )\n gdf['x'], gdf['y'] = gdf.geometry.x, gdf.geometry.y\n gdf.drop(columns='geometry').to_csv(fn_out, **kwargs)\n read_kwargs['index_col'] = 0\n elif driver == 'parquet':\n fn_out = join(data_root, f'{data_name}.parquet')\n if not np.all(gdf.geometry.type == 'Point'):\n raise ValueError(\n f\"{data_name} contains other geometries than 'Point' which cannot be written to parquet.\"\n )\n gdf['x'], gdf['y'] = gdf.geometry.x, gdf.geometry.y\n gdf.drop(columns='geometry').to_parquet(fn_out, **kwargs)\n else:\n driver_extensions = {'ESRI Shapefile': '.shp'}\n ext = driver_extensions.get(driver, driver).lower()\n fn_out = join(data_root, f'{data_name}.{ext}')\n gdf.to_file(fn_out, driver=driver, **kwargs)\n driver = 'vector'\n return fn_out, driver, read_kwargs\n\n def get_data(self, bbox=None, geom=None, predicate='intersects', buffer\n =0, logger=logger, variables=None):\n \"\"\"Return a clipped and unified GeoDataFrame (vector).\n\n For a detailed description see:\n :py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`\n \"\"\"\n if variables:\n variables = np.atleast_1d(variables).tolist()\n if 'storage_options' in self.driver_kwargs:\n raise NotImplementedError(\n 'Remote file storage_options not implemented for GeoDataFrame')\n _ = self.resolve_paths()\n kwargs = self.driver_kwargs.copy()\n clip_str = ''\n if geom is None and bbox is not None:\n geom = gpd.GeoDataFrame(geometry=[box(*bbox)], crs=4326)\n clip_str = ' and clip to bbox (epsg:4326)'\n elif geom is not None:\n clip_str = f' and clip to geom (epsg:{geom.crs.to_epsg():d})'\n if geom is not None:\n if geom.crs.is_geographic and buffer > 0:\n geom = geom.to_crs(3857)\n geom = geom.buffer(buffer)\n bbox_str = ', '.join([f'{c:.3f}' for c in geom.total_bounds])\n clip_str = f'{clip_str} [{bbox_str}]'\n if kwargs.pop('within', False):\n predicate = 'contains'\n logger.info(f'GeoDataFrame: Read {self.driver} data{clip_str}.')\n if self.driver in ['csv', 'parquet', 'xls', 'xlsx', 'xy', 'vector',\n 'vector_table']:\n if 'driver' not in kwargs and self.driver in ['csv', 'xls',\n 'xlsx', 'xy']:\n warnings.warn(\n 'using the driver setting is deprecated. Please usevector_table instead.'\n )\n kwargs.update(driver=self.driver)\n gdf = io.open_vector(self.path, crs=self.crs, geom=geom,\n predicate=predicate, **kwargs)\n else:\n raise ValueError(f'GeoDataFrame: driver {self.driver} unknown.')\n if self.rename:\n rename = {k: v for k, v in self.rename.items() if k in gdf.columns}\n gdf = gdf.rename(columns=rename)\n if variables is not None:\n if np.any([(var not in gdf.columns) for var in variables]):\n raise ValueError(\n f'GeoDataFrame: Not all variables found: {variables}')\n if 'geometry' not in variables:\n variables = variables + ['geometry']\n gdf = gdf.loc[:, variables]\n if gdf.index.size == 0:\n logger.warning(\n f'GeoDataFrame: No data within spatial domain {self.path}.')\n else:\n cols = gdf.select_dtypes([np.number]).columns\n if self.nodata is not None and len(cols) > 0:\n if not isinstance(self.nodata, dict):\n nodata = {c: self.nodata for c in cols}\n else:\n nodata = self.nodata\n for c in cols:\n mv = nodata.get(c, None)\n if mv is not None:\n is_nodata = np.isin(gdf[c], np.atleast_1d(mv))\n gdf[c] = np.where(is_nodata, np.nan, gdf[c])\n unit_names = list(self.unit_mult.keys()) + list(self.unit_add.\n keys())\n unit_names = [k for k in unit_names if k in gdf.columns]\n if len(unit_names) > 0:\n logger.debug(\n f'GeoDataFrame: Convert units for {len(unit_names)} columns.'\n )\n for name in list(set(unit_names)):\n m = self.unit_mult.get(name, 1)\n a = self.unit_add.get(name, 0)\n gdf[name] = gdf[name] * m + a\n gdf.attrs.update(self.meta)\n for col in self.attrs:\n if col in gdf.columns:\n gdf[col].attrs.update(**self.attrs[col])\n return gdf\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass GeoDataFrameAdapter(DataAdapter):\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, path: str, driver: str=None, filesystem: str='local',\n crs: Union[int, str, dict]=None, nodata: Union[dict, float, int]=\n None, rename: dict={}, unit_mult: dict={}, unit_add: dict={}, meta:\n dict={}, attrs: dict={}, driver_kwargs: dict={}, name: str='',\n catalog_name: str='', provider=None, version=None, **kwargs):\n \"\"\"Initiate data adapter for geospatial vector data.\n\n This object contains all properties required to read supported files into\n a single unified :py:func:`geopandas.GeoDataFrame`.\n In addition it keeps meta data to be able to reproduce which data is used.\n\n Parameters\n ----------\n path: str, Path\n Path to data source. If the dataset consists of multiple files, the path may\n contain {variable} placeholders as well as path\n search pattern using a '*' wildcard.\n driver: {'vector', 'vector_table'}, optional\n Driver to read files with, for 'vector' :py:func:`~geopandas.read_file`,\n for {'vector_table'} :py:func:`hydromt.io.open_vector_from_table`\n By default the driver is inferred from the file extension and falls back to\n 'vector' if unknown.\n filesystem: {'local', 'gcs', 's3'}, optional\n Filesystem where the data is stored (local, cloud, http etc.).\n By default, local.\n crs: int, dict, or str, optional\n Coordinate Reference System. Accepts EPSG codes (int or str);\n proj (str or dict) or wkt (str). Only used if the data has no native CRS.\n nodata: dictionary, float, int, optional\n Missing value number. Only used if the data has no native missing value.\n Nodata values can be differentiated between variables using a dictionary.\n rename: dict, optional\n Mapping of native data source variable to output source variable name as\n required by hydroMT.\n unit_mult, unit_add: dict, optional\n Scaling multiplication and addition to change to map from the native\n data unit to the output data unit as required by hydroMT.\n meta: dict, optional\n Metadata information of dataset, prefably containing the following keys:\n {'source_version', 'source_url', 'source_license',\n 'paper_ref', 'paper_doi', 'category'}\n placeholders: dict, optional\n Placeholders to expand yaml entry to multiple entries (name and path)\n based on placeholder values\n attrs: dict, optional\n Additional attributes relating to data variables. For instance unit\n or long name of the variable.\n driver_kwargs, dict, optional\n Additional key-word arguments passed to the driver.\n name, catalog_name: str, optional\n Name of the dataset and catalog, optional for now.\n \"\"\"\n if kwargs:\n warnings.warn(\n \"Passing additional keyword arguments to be used by the GeoDataFrameAdapter driver is deprecated and will be removed in a future version. Please use 'driver_kwargs' instead.\"\n , DeprecationWarning)\n driver_kwargs.update(kwargs)\n super().__init__(path=path, driver=driver, filesystem=filesystem,\n nodata=nodata, rename=rename, unit_mult=unit_mult, unit_add=\n unit_add, meta=meta, attrs=attrs, driver_kwargs=driver_kwargs,\n name=name, catalog_name=catalog_name, provider=provider,\n version=version)\n self.crs = crs\n <function token>\n\n def get_data(self, bbox=None, geom=None, predicate='intersects', buffer\n =0, logger=logger, variables=None):\n \"\"\"Return a clipped and unified GeoDataFrame (vector).\n\n For a detailed description see:\n :py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`\n \"\"\"\n if variables:\n variables = np.atleast_1d(variables).tolist()\n if 'storage_options' in self.driver_kwargs:\n raise NotImplementedError(\n 'Remote file storage_options not implemented for GeoDataFrame')\n _ = self.resolve_paths()\n kwargs = self.driver_kwargs.copy()\n clip_str = ''\n if geom is None and bbox is not None:\n geom = gpd.GeoDataFrame(geometry=[box(*bbox)], crs=4326)\n clip_str = ' and clip to bbox (epsg:4326)'\n elif geom is not None:\n clip_str = f' and clip to geom (epsg:{geom.crs.to_epsg():d})'\n if geom is not None:\n if geom.crs.is_geographic and buffer > 0:\n geom = geom.to_crs(3857)\n geom = geom.buffer(buffer)\n bbox_str = ', '.join([f'{c:.3f}' for c in geom.total_bounds])\n clip_str = f'{clip_str} [{bbox_str}]'\n if kwargs.pop('within', False):\n predicate = 'contains'\n logger.info(f'GeoDataFrame: Read {self.driver} data{clip_str}.')\n if self.driver in ['csv', 'parquet', 'xls', 'xlsx', 'xy', 'vector',\n 'vector_table']:\n if 'driver' not in kwargs and self.driver in ['csv', 'xls',\n 'xlsx', 'xy']:\n warnings.warn(\n 'using the driver setting is deprecated. Please usevector_table instead.'\n )\n kwargs.update(driver=self.driver)\n gdf = io.open_vector(self.path, crs=self.crs, geom=geom,\n predicate=predicate, **kwargs)\n else:\n raise ValueError(f'GeoDataFrame: driver {self.driver} unknown.')\n if self.rename:\n rename = {k: v for k, v in self.rename.items() if k in gdf.columns}\n gdf = gdf.rename(columns=rename)\n if variables is not None:\n if np.any([(var not in gdf.columns) for var in variables]):\n raise ValueError(\n f'GeoDataFrame: Not all variables found: {variables}')\n if 'geometry' not in variables:\n variables = variables + ['geometry']\n gdf = gdf.loc[:, variables]\n if gdf.index.size == 0:\n logger.warning(\n f'GeoDataFrame: No data within spatial domain {self.path}.')\n else:\n cols = gdf.select_dtypes([np.number]).columns\n if self.nodata is not None and len(cols) > 0:\n if not isinstance(self.nodata, dict):\n nodata = {c: self.nodata for c in cols}\n else:\n nodata = self.nodata\n for c in cols:\n mv = nodata.get(c, None)\n if mv is not None:\n is_nodata = np.isin(gdf[c], np.atleast_1d(mv))\n gdf[c] = np.where(is_nodata, np.nan, gdf[c])\n unit_names = list(self.unit_mult.keys()) + list(self.unit_add.\n keys())\n unit_names = [k for k in unit_names if k in gdf.columns]\n if len(unit_names) > 0:\n logger.debug(\n f'GeoDataFrame: Convert units for {len(unit_names)} columns.'\n )\n for name in list(set(unit_names)):\n m = self.unit_mult.get(name, 1)\n a = self.unit_add.get(name, 0)\n gdf[name] = gdf[name] * m + a\n gdf.attrs.update(self.meta)\n for col in self.attrs:\n if col in gdf.columns:\n gdf[col].attrs.update(**self.attrs[col])\n return gdf\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass GeoDataFrameAdapter(DataAdapter):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def get_data(self, bbox=None, geom=None, predicate='intersects', buffer\n =0, logger=logger, variables=None):\n \"\"\"Return a clipped and unified GeoDataFrame (vector).\n\n For a detailed description see:\n :py:func:`~hydromt.data_catalog.DataCatalog.get_geodataframe`\n \"\"\"\n if variables:\n variables = np.atleast_1d(variables).tolist()\n if 'storage_options' in self.driver_kwargs:\n raise NotImplementedError(\n 'Remote file storage_options not implemented for GeoDataFrame')\n _ = self.resolve_paths()\n kwargs = self.driver_kwargs.copy()\n clip_str = ''\n if geom is None and bbox is not None:\n geom = gpd.GeoDataFrame(geometry=[box(*bbox)], crs=4326)\n clip_str = ' and clip to bbox (epsg:4326)'\n elif geom is not None:\n clip_str = f' and clip to geom (epsg:{geom.crs.to_epsg():d})'\n if geom is not None:\n if geom.crs.is_geographic and buffer > 0:\n geom = geom.to_crs(3857)\n geom = geom.buffer(buffer)\n bbox_str = ', '.join([f'{c:.3f}' for c in geom.total_bounds])\n clip_str = f'{clip_str} [{bbox_str}]'\n if kwargs.pop('within', False):\n predicate = 'contains'\n logger.info(f'GeoDataFrame: Read {self.driver} data{clip_str}.')\n if self.driver in ['csv', 'parquet', 'xls', 'xlsx', 'xy', 'vector',\n 'vector_table']:\n if 'driver' not in kwargs and self.driver in ['csv', 'xls',\n 'xlsx', 'xy']:\n warnings.warn(\n 'using the driver setting is deprecated. Please usevector_table instead.'\n )\n kwargs.update(driver=self.driver)\n gdf = io.open_vector(self.path, crs=self.crs, geom=geom,\n predicate=predicate, **kwargs)\n else:\n raise ValueError(f'GeoDataFrame: driver {self.driver} unknown.')\n if self.rename:\n rename = {k: v for k, v in self.rename.items() if k in gdf.columns}\n gdf = gdf.rename(columns=rename)\n if variables is not None:\n if np.any([(var not in gdf.columns) for var in variables]):\n raise ValueError(\n f'GeoDataFrame: Not all variables found: {variables}')\n if 'geometry' not in variables:\n variables = variables + ['geometry']\n gdf = gdf.loc[:, variables]\n if gdf.index.size == 0:\n logger.warning(\n f'GeoDataFrame: No data within spatial domain {self.path}.')\n else:\n cols = gdf.select_dtypes([np.number]).columns\n if self.nodata is not None and len(cols) > 0:\n if not isinstance(self.nodata, dict):\n nodata = {c: self.nodata for c in cols}\n else:\n nodata = self.nodata\n for c in cols:\n mv = nodata.get(c, None)\n if mv is not None:\n is_nodata = np.isin(gdf[c], np.atleast_1d(mv))\n gdf[c] = np.where(is_nodata, np.nan, gdf[c])\n unit_names = list(self.unit_mult.keys()) + list(self.unit_add.\n keys())\n unit_names = [k for k in unit_names if k in gdf.columns]\n if len(unit_names) > 0:\n logger.debug(\n f'GeoDataFrame: Convert units for {len(unit_names)} columns.'\n )\n for name in list(set(unit_names)):\n m = self.unit_mult.get(name, 1)\n a = self.unit_add.get(name, 0)\n gdf[name] = gdf[name] * m + a\n gdf.attrs.update(self.meta)\n for col in self.attrs:\n if col in gdf.columns:\n gdf[col].attrs.update(**self.attrs[col])\n return gdf\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass GeoDataFrameAdapter(DataAdapter):\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n"
] | false |
98,823 |
9a7c343e701a1e66530467b609cf1cc69761bf71
|
from crosswalk.authentication import AuthenticatedView
from crosswalk.models import Domain, Entity
from crosswalk.serializers import EntitySerializer
from crosswalk.utils import import_class
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.response import Response
class BestMatch(AuthenticatedView):
def post(self, request, domain):
"""
Get the best matched entity for a given query.
If the entity is an alias of another entity, the aliased entity is
returned.
"""
data = request.data.copy()
query_field = data.get("query_field")
query_value = data.get("query_value")
return_canonical = data.get("return_canonical", True)
block_attrs = data.get("block_attrs", {})
scorer_class = data.get("scorer", "fuzzywuzzy.default_process")
try:
scorer = import_class("crosswalk.scorers.{}".format(scorer_class))
except ImportError:
return Response(
"Invalid scorer.", status=status.HTTP_400_BAD_REQUEST
)
try:
domain = Domain.objects.get(slug=domain)
except ObjectDoesNotExist:
return Response(
"Domain not found.", status=status.HTTP_404_NOT_FOUND
)
entities = Entity.objects.filter(domain=domain)
entities = entities.filter(attributes__contains=block_attrs)
if entities.count() == 0:
return Response({}, status=status.HTTP_200_OK)
entity_values = [e.attributes[query_field] for e in entities]
match, score = scorer(query_value, entity_values)
entity = entities.filter(
**{"attributes__{}".format(query_field): match}
).first()
aliased = False
if return_canonical:
while entity.alias_for:
aliased = True
entity = entity.alias_for
return Response(
{
"entity": EntitySerializer(entity).data,
"match_score": score,
"aliased": aliased,
},
status=status.HTTP_200_OK,
)
|
[
"from crosswalk.authentication import AuthenticatedView\nfrom crosswalk.models import Domain, Entity\nfrom crosswalk.serializers import EntitySerializer\nfrom crosswalk.utils import import_class\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import status\nfrom rest_framework.response import Response\n\n\nclass BestMatch(AuthenticatedView):\n def post(self, request, domain):\n \"\"\"\n Get the best matched entity for a given query.\n\n If the entity is an alias of another entity, the aliased entity is\n returned.\n \"\"\"\n data = request.data.copy()\n query_field = data.get(\"query_field\")\n query_value = data.get(\"query_value\")\n return_canonical = data.get(\"return_canonical\", True)\n block_attrs = data.get(\"block_attrs\", {})\n scorer_class = data.get(\"scorer\", \"fuzzywuzzy.default_process\")\n\n try:\n scorer = import_class(\"crosswalk.scorers.{}\".format(scorer_class))\n except ImportError:\n return Response(\n \"Invalid scorer.\", status=status.HTTP_400_BAD_REQUEST\n )\n\n try:\n domain = Domain.objects.get(slug=domain)\n except ObjectDoesNotExist:\n return Response(\n \"Domain not found.\", status=status.HTTP_404_NOT_FOUND\n )\n\n entities = Entity.objects.filter(domain=domain)\n entities = entities.filter(attributes__contains=block_attrs)\n\n if entities.count() == 0:\n return Response({}, status=status.HTTP_200_OK)\n\n entity_values = [e.attributes[query_field] for e in entities]\n\n match, score = scorer(query_value, entity_values)\n\n entity = entities.filter(\n **{\"attributes__{}\".format(query_field): match}\n ).first()\n\n aliased = False\n\n if return_canonical:\n while entity.alias_for:\n aliased = True\n entity = entity.alias_for\n\n return Response(\n {\n \"entity\": EntitySerializer(entity).data,\n \"match_score\": score,\n \"aliased\": aliased,\n },\n status=status.HTTP_200_OK,\n )\n",
"from crosswalk.authentication import AuthenticatedView\nfrom crosswalk.models import Domain, Entity\nfrom crosswalk.serializers import EntitySerializer\nfrom crosswalk.utils import import_class\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import status\nfrom rest_framework.response import Response\n\n\nclass BestMatch(AuthenticatedView):\n\n def post(self, request, domain):\n \"\"\"\n Get the best matched entity for a given query.\n\n If the entity is an alias of another entity, the aliased entity is\n returned.\n \"\"\"\n data = request.data.copy()\n query_field = data.get('query_field')\n query_value = data.get('query_value')\n return_canonical = data.get('return_canonical', True)\n block_attrs = data.get('block_attrs', {})\n scorer_class = data.get('scorer', 'fuzzywuzzy.default_process')\n try:\n scorer = import_class('crosswalk.scorers.{}'.format(scorer_class))\n except ImportError:\n return Response('Invalid scorer.', status=status.\n HTTP_400_BAD_REQUEST)\n try:\n domain = Domain.objects.get(slug=domain)\n except ObjectDoesNotExist:\n return Response('Domain not found.', status=status.\n HTTP_404_NOT_FOUND)\n entities = Entity.objects.filter(domain=domain)\n entities = entities.filter(attributes__contains=block_attrs)\n if entities.count() == 0:\n return Response({}, status=status.HTTP_200_OK)\n entity_values = [e.attributes[query_field] for e in entities]\n match, score = scorer(query_value, entity_values)\n entity = entities.filter(**{'attributes__{}'.format(query_field):\n match}).first()\n aliased = False\n if return_canonical:\n while entity.alias_for:\n aliased = True\n entity = entity.alias_for\n return Response({'entity': EntitySerializer(entity).data,\n 'match_score': score, 'aliased': aliased}, status=status.\n HTTP_200_OK)\n",
"<import token>\n\n\nclass BestMatch(AuthenticatedView):\n\n def post(self, request, domain):\n \"\"\"\n Get the best matched entity for a given query.\n\n If the entity is an alias of another entity, the aliased entity is\n returned.\n \"\"\"\n data = request.data.copy()\n query_field = data.get('query_field')\n query_value = data.get('query_value')\n return_canonical = data.get('return_canonical', True)\n block_attrs = data.get('block_attrs', {})\n scorer_class = data.get('scorer', 'fuzzywuzzy.default_process')\n try:\n scorer = import_class('crosswalk.scorers.{}'.format(scorer_class))\n except ImportError:\n return Response('Invalid scorer.', status=status.\n HTTP_400_BAD_REQUEST)\n try:\n domain = Domain.objects.get(slug=domain)\n except ObjectDoesNotExist:\n return Response('Domain not found.', status=status.\n HTTP_404_NOT_FOUND)\n entities = Entity.objects.filter(domain=domain)\n entities = entities.filter(attributes__contains=block_attrs)\n if entities.count() == 0:\n return Response({}, status=status.HTTP_200_OK)\n entity_values = [e.attributes[query_field] for e in entities]\n match, score = scorer(query_value, entity_values)\n entity = entities.filter(**{'attributes__{}'.format(query_field):\n match}).first()\n aliased = False\n if return_canonical:\n while entity.alias_for:\n aliased = True\n entity = entity.alias_for\n return Response({'entity': EntitySerializer(entity).data,\n 'match_score': score, 'aliased': aliased}, status=status.\n HTTP_200_OK)\n",
"<import token>\n\n\nclass BestMatch(AuthenticatedView):\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,824 |
91b0b52e36b21acc2e6a1a3c4db20131d590540a
|
#TODO: fix volume bar not going to zero
#TODO: replace zero volume with mute if supported under Debian
#TODO: add animation options (e.g. slide in, fade in, etc.)
#TODO: add numbers to the bar
#TODO: add support for Windows/Mac (e.g. scroll support)
#TODO: allow bar to be attached to different sides
#TODO: allow customization of bar thickness
#TODO: make bar opacity adapt to darkness/brightness of background
#TODO: modifier key makes scroll change channel pan instead
# DEPENDENCIES:
# (all working with most recent versions as of 04/12/2018)
# sudo apt install python3-tk
# sudo apt install python3-dbus # maybe?
# sudo apt install libasound2-dev
# pip3 install pyalsaaudio --user
# pip3 install plyer --user
# pip3 install psutil --user
# BUILTIN MODULES #
import sys
import time # used for delays
import math
from _thread import start_new_thread # used to run functions in parallel
import tkinter as Tk
from subprocess import call
from os.path import realpath
# SITE PACKAGES #
from plyer import notification
import alsaaudio as al
# LOCAL MODULES
import volux.temperatures as temps
from volux.dp_datatools import LivePercentage, clamp
from volux.VolumeAssistant import VolumeAssistant
from volux.VolumeBar import VolumeBar
### ---- PREFERENCES ---- ###
program_title = "volux"
program_icon = realpath('icon.png')
sound_device = "Master"
default_mixer_name = "Master"
default_opacity = 0.5
outside_zone_opacity = 0.1
bar_height = 5
### ---- SETUP STUFF ---- ###
VolAs = VolumeAssistant() # initialise a Volume Assistant object
VolBar = VolumeBar()
coreWatch = temps.CoreWatch(temps.get_cores()) # start watching cores for temperature issues
### DEFINE STATES
class VolumeMode:
def __init__(self):pass
def enter(self):
VolBar.mode = VolBar.modes['volume']
VolAs.mixer.setmute(0)
def vacate(self):
VolBar.mode = VolBar.modes['unknown']
class MuteMode:
def __init__(self): pass
def enter(self):
VolBar.mode = VolBar.modes['muted']
VolAs.mixer.setmute(1)
def vacate(self):
VolBar.mode = VolBar.modes['unknown']
class BrightnessMode:
def __init__(self): pass
def enter(self):
VolBar.mode = VolBar.modes['brightness']
def vacate(self):
VolBar.mode = VolBar.modes['unknown']
if VolAs.ismuted() == True:
return(MuteMode)
elif VolAs.ismuted() == False:
return(VolumeMode)
else: raise TypeError("_ismuted should be a bool value")
### DEFINE STATE MANAGER
class StateManager:
def __init__(self,initial_state):
self.state = initial_state
def change_state(self,new_state): # request to change states
self.state().vacate()
new_state().enter()
self.state = new_state
### CREATE A STATE MANAGER
sm = StateManager(VolumeMode)
### ---- TKINTER STUFF BEGINS ---- ###
root = Tk.Tk()
class Window(Tk.Frame):
def __init__(self,master=None):
Tk.Frame.__init__(self,master)
self.master = master
self._init_objects()
self._init_window()
self._open_message()
def _init_objects(self):
self.displaySize = VolAs._get_display_size(root) # max size of the percentage bar in pixels
self.barWidth = LivePercentage(0,self.displaySize['x']) # set width of bar
def _init_window(self):
m = self.master
m.title("Please submit an issue to Github if you see this!")
self.barHeight = bar_height # set height of bar self._update_bar()
barContainer = Tk.Frame(m)
barContainer.configure(background="BLACK")
barContainer.pack(fill=Tk.BOTH,expand=1)
self.bar = Tk.Frame(barContainer) # create the bar
self._update_bar() # update bar values
def _adjust_bar(event,movement):
if type(movement) == int: # if movement is an integer
#self.barMode = self.barModes['volume']
notchMultiplier = 5 # impact of a single scroll notch on percentage
newVol = VolAs.volume + movement*notchMultiplier
VolAs.volume = clamp(newVol,0,100)
else: raise TypeError("Value should be an integer! Not sure what happened!")
self._update_bar() # update the bar's graphical appearance
self._update_volume() # update the system volume
#TODO: support for Windows/Mac scrolling
def _scroll_up(event):
if sm.state == VolumeMode:
_adjust_bar(event,+1)
elif sm.state == BrightnessMode:
_brightness_up()
elif sm.state == MuteMode:
sm.change_state(VolumeMode)
self._update_bar()
def _scroll_down(event):
if sm.state == VolumeMode:
_adjust_bar(event,-1)
elif sm.state == BrightnessMode:
_brightness_down()
elif sm.state == MuteMode:
sm.change_state(VolumeMode)
self._update_bar()
def _middle_click(event):
if sm.state == VolumeMode: # if unmuted
sm.change_state(MuteMode) # change to muted
self._update_bar()
elif sm.state == MuteMode: # if unmuted
sm.change_state(VolumeMode) # change to muted
self._update_bar()
def _key_pressed(event):
print("key pressed",event.key)
def _key_released(event):
print("key released",event.key)
def _brightness_up(): print("WIP:"+"UP")
def _brightness_down(): print("WIP:"+"DOWN")
def _right_click(event):
if sm.state == BrightnessMode:
sm.change_state(sm.state().vacate())
else:
sm.change_state(BrightnessMode)
self._update_bar()
#print("brightness mode!")
def _brightness_mode_off():
sm.state_change(sm.state.vacate())
self.barMode = self.barModes['default']
self._update_bar()
print("brightness mode off!")
self.bar.pack(fill=Tk.Y,ipadx=5,ipady=5,side=Tk.LEFT)
m.bind("<MouseWheel>",_adjust_bar)
m.bind("<Button-2>",_middle_click)
m.bind("<Button-4>",_scroll_up)
m.bind("<Button-5>",_scroll_down)
m.bind("<Button-3>",_right_click)
m.bind("<Control-Button-4>",_brightness_up)
m.bind("<Control-Button-5>",_brightness_down)
m.bind("<Double-Button-3>",self._exit_app)
barContainer.bind("<Enter>",self._mouse_entered)
barContainer.bind("<Leave>",self._mouse_left)
def _update_loop(self,ms_per_loop=1000):
root.lift() # ensure window on top of others
self._update_bar() # update bar graphics
self.after(ms_per_loop,self._update_loop) # repeat _update_loop()
def _update_bar(self):
modeColor = VolBar.mode.color # set background based on mode color
self.barWidth.setPerc(VolAs.volume) # set the width as a percentage
newWidth = self.barWidth.getNum() # get a numerical version of the percentage
self.bar.configure(background=modeColor,width=str(newWidth)) # update the bar with these settings
def _update_volume(self):
try: self.mixer.setvolume(VolAs.volume)
except: call(["amixer","sset",str(VolAs.device),str(VolAs.volume)+"%","-q"])
def _update_mute(self):
muted = self.mixer.getmute()
if muted[0] == True: self.mixer.setmute(0)
elif muted[0] == False: self.mixer.setmute(1)
else: raise Exception("mixer's .getmute()[0] method should return True or False!")
def _mouse_entered(self,event): root.wm_attributes("-alpha",default_opacity)
def _mouse_left(self,event): root.wm_attributes("-alpha",outside_zone_opacity)
def _open_message(self):
notification.notify(
title=program_title,
message="{} launched!".format(program_title),
app_name=program_title,
app_icon=program_icon,
timeout=5)
def _exit_app(self,event):
notification.notify(
title=program_title,
message="{} now closing...".format(program_title),
app_name=program_title,
app_icon=program_icon,
timeout=10)
exit()
app = Window(root)
dispSize = VolAs._get_display_size(root)
overlay_w = dispSize['x']
overlay_h = app.barHeight
windowOffsets = {'x': 0,
'y': dispSize['y']-app.barHeight}
root.geometry("{}x{}+{}+{}".format(overlay_w,overlay_h,
windowOffsets['x'],windowOffsets['y'])) # define the size of the window
root.attributes("-topmost",True) # force window to stay on top (doesn't work in full screen applications)
root.overrideredirect(1) # remove frame of window
root.wait_visibility(root) # required for window transparency
root.wm_attributes("-alpha",outside_zone_opacity) # make window transparent
root.title(program_title)
print(sys.argv[0])
if '__main__.py' in sys.argv[0]:
app._update_loop() # must be before main loop
root.mainloop()
|
[
"#TODO: fix volume bar not going to zero\n#TODO: replace zero volume with mute if supported under Debian\n#TODO: add animation options (e.g. slide in, fade in, etc.)\n#TODO: add numbers to the bar\n#TODO: add support for Windows/Mac (e.g. scroll support)\n#TODO: allow bar to be attached to different sides\n#TODO: allow customization of bar thickness\n#TODO: make bar opacity adapt to darkness/brightness of background\n#TODO: modifier key makes scroll change channel pan instead\n\n# DEPENDENCIES:\n# (all working with most recent versions as of 04/12/2018)\n# sudo apt install python3-tk\n# sudo apt install python3-dbus # maybe?\n# sudo apt install libasound2-dev\n# pip3 install pyalsaaudio --user\n# pip3 install plyer --user\n# pip3 install psutil --user\n\n# BUILTIN MODULES #\nimport sys\nimport time # used for delays\nimport math\nfrom _thread import start_new_thread # used to run functions in parallel\nimport tkinter as Tk\nfrom subprocess import call\nfrom os.path import realpath\n# SITE PACKAGES #\nfrom plyer import notification\nimport alsaaudio as al\n# LOCAL MODULES\nimport volux.temperatures as temps\nfrom volux.dp_datatools import LivePercentage, clamp\nfrom volux.VolumeAssistant import VolumeAssistant\nfrom volux.VolumeBar import VolumeBar\n### ---- PREFERENCES ---- ###\nprogram_title = \"volux\"\nprogram_icon = realpath('icon.png')\nsound_device = \"Master\"\ndefault_mixer_name = \"Master\"\ndefault_opacity = 0.5\noutside_zone_opacity = 0.1\nbar_height = 5\n### ---- SETUP STUFF ---- ###\nVolAs = VolumeAssistant() # initialise a Volume Assistant object\nVolBar = VolumeBar()\ncoreWatch = temps.CoreWatch(temps.get_cores()) # start watching cores for temperature issues\n\n### DEFINE STATES\nclass VolumeMode:\n def __init__(self):pass\n def enter(self):\n VolBar.mode = VolBar.modes['volume']\n VolAs.mixer.setmute(0)\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\nclass MuteMode:\n def __init__(self): pass\n def enter(self):\n VolBar.mode = VolBar.modes['muted']\n VolAs.mixer.setmute(1)\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\nclass BrightnessMode:\n def __init__(self): pass\n def enter(self):\n VolBar.mode = VolBar.modes['brightness']\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return(MuteMode)\n elif VolAs.ismuted() == False:\n return(VolumeMode)\n else: raise TypeError(\"_ismuted should be a bool value\")\n### DEFINE STATE MANAGER\nclass StateManager:\n def __init__(self,initial_state):\n self.state = initial_state\n def change_state(self,new_state): # request to change states\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n### CREATE A STATE MANAGER\nsm = StateManager(VolumeMode)\n\n### ---- TKINTER STUFF BEGINS ---- ###\nroot = Tk.Tk()\nclass Window(Tk.Frame):\n def __init__(self,master=None):\n Tk.Frame.__init__(self,master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root) # max size of the percentage bar in pixels\n self.barWidth = LivePercentage(0,self.displaySize['x']) # set width of bar\n def _init_window(self):\n m = self.master\n m.title(\"Please submit an issue to Github if you see this!\")\n self.barHeight = bar_height # set height of bar self._update_bar()\n barContainer = Tk.Frame(m)\n barContainer.configure(background=\"BLACK\")\n barContainer.pack(fill=Tk.BOTH,expand=1)\n self.bar = Tk.Frame(barContainer) # create the bar\n self._update_bar() # update bar values\n def _adjust_bar(event,movement):\n if type(movement) == int: # if movement is an integer\n #self.barMode = self.barModes['volume']\n notchMultiplier = 5 # impact of a single scroll notch on percentage\n newVol = VolAs.volume + movement*notchMultiplier\n VolAs.volume = clamp(newVol,0,100)\n else: raise TypeError(\"Value should be an integer! Not sure what happened!\")\n self._update_bar() # update the bar's graphical appearance\n self._update_volume() # update the system volume\n #TODO: support for Windows/Mac scrolling\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event,+1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event,-1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n def _middle_click(event):\n if sm.state == VolumeMode: # if unmuted\n sm.change_state(MuteMode) # change to muted\n self._update_bar()\n elif sm.state == MuteMode: # if unmuted\n sm.change_state(VolumeMode) # change to muted\n self._update_bar()\n def _key_pressed(event):\n print(\"key pressed\",event.key)\n def _key_released(event):\n print(\"key released\",event.key)\n def _brightness_up(): print(\"WIP:\"+\"UP\")\n def _brightness_down(): print(\"WIP:\"+\"DOWN\")\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n #print(\"brightness mode!\")\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print(\"brightness mode off!\")\n self.bar.pack(fill=Tk.Y,ipadx=5,ipady=5,side=Tk.LEFT)\n m.bind(\"<MouseWheel>\",_adjust_bar)\n m.bind(\"<Button-2>\",_middle_click)\n m.bind(\"<Button-4>\",_scroll_up)\n m.bind(\"<Button-5>\",_scroll_down)\n m.bind(\"<Button-3>\",_right_click)\n m.bind(\"<Control-Button-4>\",_brightness_up)\n m.bind(\"<Control-Button-5>\",_brightness_down)\n m.bind(\"<Double-Button-3>\",self._exit_app)\n barContainer.bind(\"<Enter>\",self._mouse_entered)\n barContainer.bind(\"<Leave>\",self._mouse_left)\n def _update_loop(self,ms_per_loop=1000):\n root.lift() # ensure window on top of others\n self._update_bar() # update bar graphics\n self.after(ms_per_loop,self._update_loop) # repeat _update_loop()\n def _update_bar(self):\n modeColor = VolBar.mode.color # set background based on mode color\n self.barWidth.setPerc(VolAs.volume) # set the width as a percentage\n newWidth = self.barWidth.getNum() # get a numerical version of the percentage\n self.bar.configure(background=modeColor,width=str(newWidth)) # update the bar with these settings\n def _update_volume(self):\n try: self.mixer.setvolume(VolAs.volume)\n except: call([\"amixer\",\"sset\",str(VolAs.device),str(VolAs.volume)+\"%\",\"-q\"])\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True: self.mixer.setmute(0)\n elif muted[0] == False: self.mixer.setmute(1)\n else: raise Exception(\"mixer's .getmute()[0] method should return True or False!\")\n def _mouse_entered(self,event): root.wm_attributes(\"-alpha\",default_opacity)\n def _mouse_left(self,event): root.wm_attributes(\"-alpha\",outside_zone_opacity)\n def _open_message(self):\n notification.notify(\n title=program_title,\n message=\"{} launched!\".format(program_title),\n app_name=program_title,\n app_icon=program_icon,\n timeout=5)\n def _exit_app(self,event):\n notification.notify(\n title=program_title,\n message=\"{} now closing...\".format(program_title),\n app_name=program_title,\n app_icon=program_icon,\n timeout=10)\n exit()\n \napp = Window(root)\ndispSize = VolAs._get_display_size(root)\noverlay_w = dispSize['x']\noverlay_h = app.barHeight\nwindowOffsets = {'x': 0,\n 'y': dispSize['y']-app.barHeight}\nroot.geometry(\"{}x{}+{}+{}\".format(overlay_w,overlay_h,\n windowOffsets['x'],windowOffsets['y'])) # define the size of the window\nroot.attributes(\"-topmost\",True) # force window to stay on top (doesn't work in full screen applications)\nroot.overrideredirect(1) # remove frame of window\nroot.wait_visibility(root) # required for window transparency\nroot.wm_attributes(\"-alpha\",outside_zone_opacity) # make window transparent\nroot.title(program_title)\n\nprint(sys.argv[0])\nif '__main__.py' in sys.argv[0]:\n app._update_loop() # must be before main loop\n root.mainloop()\n",
"import sys\nimport time\nimport math\nfrom _thread import start_new_thread\nimport tkinter as Tk\nfrom subprocess import call\nfrom os.path import realpath\nfrom plyer import notification\nimport alsaaudio as al\nimport volux.temperatures as temps\nfrom volux.dp_datatools import LivePercentage, clamp\nfrom volux.VolumeAssistant import VolumeAssistant\nfrom volux.VolumeBar import VolumeBar\nprogram_title = 'volux'\nprogram_icon = realpath('icon.png')\nsound_device = 'Master'\ndefault_mixer_name = 'Master'\ndefault_opacity = 0.5\noutside_zone_opacity = 0.1\nbar_height = 5\nVolAs = VolumeAssistant()\nVolBar = VolumeBar()\ncoreWatch = temps.CoreWatch(temps.get_cores())\n\n\nclass VolumeMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['volume']\n VolAs.mixer.setmute(0)\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass MuteMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['muted']\n VolAs.mixer.setmute(1)\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass BrightnessMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['brightness']\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return MuteMode\n elif VolAs.ismuted() == False:\n return VolumeMode\n else:\n raise TypeError('_ismuted should be a bool value')\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\nsm = StateManager(VolumeMode)\nroot = Tk.Tk()\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\napp = Window(root)\ndispSize = VolAs._get_display_size(root)\noverlay_w = dispSize['x']\noverlay_h = app.barHeight\nwindowOffsets = {'x': 0, 'y': dispSize['y'] - app.barHeight}\nroot.geometry('{}x{}+{}+{}'.format(overlay_w, overlay_h, windowOffsets['x'],\n windowOffsets['y']))\nroot.attributes('-topmost', True)\nroot.overrideredirect(1)\nroot.wait_visibility(root)\nroot.wm_attributes('-alpha', outside_zone_opacity)\nroot.title(program_title)\nprint(sys.argv[0])\nif '__main__.py' in sys.argv[0]:\n app._update_loop()\n root.mainloop()\n",
"<import token>\nprogram_title = 'volux'\nprogram_icon = realpath('icon.png')\nsound_device = 'Master'\ndefault_mixer_name = 'Master'\ndefault_opacity = 0.5\noutside_zone_opacity = 0.1\nbar_height = 5\nVolAs = VolumeAssistant()\nVolBar = VolumeBar()\ncoreWatch = temps.CoreWatch(temps.get_cores())\n\n\nclass VolumeMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['volume']\n VolAs.mixer.setmute(0)\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass MuteMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['muted']\n VolAs.mixer.setmute(1)\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass BrightnessMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['brightness']\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return MuteMode\n elif VolAs.ismuted() == False:\n return VolumeMode\n else:\n raise TypeError('_ismuted should be a bool value')\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\nsm = StateManager(VolumeMode)\nroot = Tk.Tk()\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\napp = Window(root)\ndispSize = VolAs._get_display_size(root)\noverlay_w = dispSize['x']\noverlay_h = app.barHeight\nwindowOffsets = {'x': 0, 'y': dispSize['y'] - app.barHeight}\nroot.geometry('{}x{}+{}+{}'.format(overlay_w, overlay_h, windowOffsets['x'],\n windowOffsets['y']))\nroot.attributes('-topmost', True)\nroot.overrideredirect(1)\nroot.wait_visibility(root)\nroot.wm_attributes('-alpha', outside_zone_opacity)\nroot.title(program_title)\nprint(sys.argv[0])\nif '__main__.py' in sys.argv[0]:\n app._update_loop()\n root.mainloop()\n",
"<import token>\n<assignment token>\n\n\nclass VolumeMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['volume']\n VolAs.mixer.setmute(0)\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass MuteMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['muted']\n VolAs.mixer.setmute(1)\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass BrightnessMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['brightness']\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return MuteMode\n elif VolAs.ismuted() == False:\n return VolumeMode\n else:\n raise TypeError('_ismuted should be a bool value')\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\nroot.geometry('{}x{}+{}+{}'.format(overlay_w, overlay_h, windowOffsets['x'],\n windowOffsets['y']))\nroot.attributes('-topmost', True)\nroot.overrideredirect(1)\nroot.wait_visibility(root)\nroot.wm_attributes('-alpha', outside_zone_opacity)\nroot.title(program_title)\nprint(sys.argv[0])\nif '__main__.py' in sys.argv[0]:\n app._update_loop()\n root.mainloop()\n",
"<import token>\n<assignment token>\n\n\nclass VolumeMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['volume']\n VolAs.mixer.setmute(0)\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass MuteMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['muted']\n VolAs.mixer.setmute(1)\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass BrightnessMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['brightness']\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return MuteMode\n elif VolAs.ismuted() == False:\n return VolumeMode\n else:\n raise TypeError('_ismuted should be a bool value')\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass VolumeMode:\n <function token>\n\n def enter(self):\n VolBar.mode = VolBar.modes['volume']\n VolAs.mixer.setmute(0)\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass MuteMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['muted']\n VolAs.mixer.setmute(1)\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass BrightnessMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['brightness']\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return MuteMode\n elif VolAs.ismuted() == False:\n return VolumeMode\n else:\n raise TypeError('_ismuted should be a bool value')\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass VolumeMode:\n <function token>\n <function token>\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass MuteMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['muted']\n VolAs.mixer.setmute(1)\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass BrightnessMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['brightness']\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return MuteMode\n elif VolAs.ismuted() == False:\n return VolumeMode\n else:\n raise TypeError('_ismuted should be a bool value')\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass VolumeMode:\n <function token>\n <function token>\n <function token>\n\n\nclass MuteMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['muted']\n VolAs.mixer.setmute(1)\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass BrightnessMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['brightness']\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return MuteMode\n elif VolAs.ismuted() == False:\n return VolumeMode\n else:\n raise TypeError('_ismuted should be a bool value')\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass MuteMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['muted']\n VolAs.mixer.setmute(1)\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass BrightnessMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['brightness']\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return MuteMode\n elif VolAs.ismuted() == False:\n return VolumeMode\n else:\n raise TypeError('_ismuted should be a bool value')\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass MuteMode:\n <function token>\n\n def enter(self):\n VolBar.mode = VolBar.modes['muted']\n VolAs.mixer.setmute(1)\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass BrightnessMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['brightness']\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return MuteMode\n elif VolAs.ismuted() == False:\n return VolumeMode\n else:\n raise TypeError('_ismuted should be a bool value')\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass MuteMode:\n <function token>\n <function token>\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n\n\nclass BrightnessMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['brightness']\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return MuteMode\n elif VolAs.ismuted() == False:\n return VolumeMode\n else:\n raise TypeError('_ismuted should be a bool value')\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass MuteMode:\n <function token>\n <function token>\n <function token>\n\n\nclass BrightnessMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['brightness']\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return MuteMode\n elif VolAs.ismuted() == False:\n return VolumeMode\n else:\n raise TypeError('_ismuted should be a bool value')\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass BrightnessMode:\n\n def __init__(self):\n pass\n\n def enter(self):\n VolBar.mode = VolBar.modes['brightness']\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return MuteMode\n elif VolAs.ismuted() == False:\n return VolumeMode\n else:\n raise TypeError('_ismuted should be a bool value')\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass BrightnessMode:\n\n def __init__(self):\n pass\n <function token>\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return MuteMode\n elif VolAs.ismuted() == False:\n return VolumeMode\n else:\n raise TypeError('_ismuted should be a bool value')\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass BrightnessMode:\n <function token>\n <function token>\n\n def vacate(self):\n VolBar.mode = VolBar.modes['unknown']\n if VolAs.ismuted() == True:\n return MuteMode\n elif VolAs.ismuted() == False:\n return VolumeMode\n else:\n raise TypeError('_ismuted should be a bool value')\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass BrightnessMode:\n <function token>\n <function token>\n <function token>\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass StateManager:\n\n def __init__(self, initial_state):\n self.state = initial_state\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass StateManager:\n <function token>\n\n def change_state(self, new_state):\n self.state().vacate()\n new_state().enter()\n self.state = new_state\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass StateManager:\n <function token>\n <function token>\n\n\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n\n def _init_window(self):\n m = self.master\n m.title('Please submit an issue to Github if you see this!')\n self.barHeight = bar_height\n barContainer = Tk.Frame(m)\n barContainer.configure(background='BLACK')\n barContainer.pack(fill=Tk.BOTH, expand=1)\n self.bar = Tk.Frame(barContainer)\n self._update_bar()\n\n def _adjust_bar(event, movement):\n if type(movement) == int:\n notchMultiplier = 5\n newVol = VolAs.volume + movement * notchMultiplier\n VolAs.volume = clamp(newVol, 0, 100)\n else:\n raise TypeError(\n 'Value should be an integer! Not sure what happened!')\n self._update_bar()\n self._update_volume()\n\n def _scroll_up(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, +1)\n elif sm.state == BrightnessMode:\n _brightness_up()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _scroll_down(event):\n if sm.state == VolumeMode:\n _adjust_bar(event, -1)\n elif sm.state == BrightnessMode:\n _brightness_down()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _middle_click(event):\n if sm.state == VolumeMode:\n sm.change_state(MuteMode)\n self._update_bar()\n elif sm.state == MuteMode:\n sm.change_state(VolumeMode)\n self._update_bar()\n\n def _key_pressed(event):\n print('key pressed', event.key)\n\n def _key_released(event):\n print('key released', event.key)\n\n def _brightness_up():\n print('WIP:' + 'UP')\n\n def _brightness_down():\n print('WIP:' + 'DOWN')\n\n def _right_click(event):\n if sm.state == BrightnessMode:\n sm.change_state(sm.state().vacate())\n else:\n sm.change_state(BrightnessMode)\n self._update_bar()\n\n def _brightness_mode_off():\n sm.state_change(sm.state.vacate())\n self.barMode = self.barModes['default']\n self._update_bar()\n print('brightness mode off!')\n self.bar.pack(fill=Tk.Y, ipadx=5, ipady=5, side=Tk.LEFT)\n m.bind('<MouseWheel>', _adjust_bar)\n m.bind('<Button-2>', _middle_click)\n m.bind('<Button-4>', _scroll_up)\n m.bind('<Button-5>', _scroll_down)\n m.bind('<Button-3>', _right_click)\n m.bind('<Control-Button-4>', _brightness_up)\n m.bind('<Control-Button-5>', _brightness_down)\n m.bind('<Double-Button-3>', self._exit_app)\n barContainer.bind('<Enter>', self._mouse_entered)\n barContainer.bind('<Leave>', self._mouse_left)\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n <function token>\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n\n def _update_volume(self):\n try:\n self.mixer.setvolume(VolAs.volume)\n except:\n call(['amixer', 'sset', str(VolAs.device), str(VolAs.volume) +\n '%', '-q'])\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n\n def _init_objects(self):\n self.displaySize = VolAs._get_display_size(root)\n self.barWidth = LivePercentage(0, self.displaySize['x'])\n <function token>\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n <function token>\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n <function token>\n <function token>\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n <function token>\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n\n def _mouse_left(self, event):\n root.wm_attributes('-alpha', outside_zone_opacity)\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n <function token>\n <function token>\n\n def _update_loop(self, ms_per_loop=1000):\n root.lift()\n self._update_bar()\n self.after(ms_per_loop, self._update_loop)\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n <function token>\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n <function token>\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n <function token>\n <function token>\n <function token>\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n <function token>\n\n def _update_mute(self):\n muted = self.mixer.getmute()\n if muted[0] == True:\n self.mixer.setmute(0)\n elif muted[0] == False:\n self.mixer.setmute(1)\n else:\n raise Exception(\n \"mixer's .getmute()[0] method should return True or False!\")\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n <function token>\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass Window(Tk.Frame):\n\n def __init__(self, master=None):\n Tk.Frame.__init__(self, master)\n self.master = master\n self._init_objects()\n self._init_window()\n self._open_message()\n <function token>\n <function token>\n <function token>\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n <function token>\n <function token>\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n <function token>\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass Window(Tk.Frame):\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n <function token>\n <function token>\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n <function token>\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n\n def _exit_app(self, event):\n notification.notify(title=program_title, message=\n '{} now closing...'.format(program_title), app_name=\n program_title, app_icon=program_icon, timeout=10)\n exit()\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass Window(Tk.Frame):\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n <function token>\n <function token>\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n <function token>\n\n def _open_message(self):\n notification.notify(title=program_title, message='{} launched!'.\n format(program_title), app_name=program_title, app_icon=\n program_icon, timeout=5)\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass Window(Tk.Frame):\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n <function token>\n <function token>\n\n def _mouse_entered(self, event):\n root.wm_attributes('-alpha', default_opacity)\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass Window(Tk.Frame):\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _update_bar(self):\n modeColor = VolBar.mode.color\n self.barWidth.setPerc(VolAs.volume)\n newWidth = self.barWidth.getNum()\n self.bar.configure(background=modeColor, width=str(newWidth))\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n\n\nclass Window(Tk.Frame):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<assignment token>\n<class token>\n<assignment token>\n<code token>\n"
] | false |
98,825 |
f80f8b6d63193e9a9007935484c7336d1dc6e983
|
__author__ = 'Wout & thijs'
import argparse
#from lda_images.lda_learner import *
from lda_images.new_lda_learner import *
# given a dataset and an amount of topics, train a neural network
# to map image representations onto topic distribtutions
def main(params):
dataset = params['dataset']
topics = params['topics']
rate = params['rate']
iterations = params['iterations']
hidden_layers = params['hidden']
layers = params['layers']
pert = params['pert']
networkLearner = LDANetworkLearner(dataset, topics, rate, hidden_layers,layers, pert)
networkLearner.learnNetwork(iterations)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', dest='dataset', default='flickr30k', help='dataset: flickr8k/flickr30k')
parser.add_argument('-t', '--topics', dest='topics', type=int, default=120, help='Number of topics to learn lda model')
parser.add_argument('-i', '--iterations', dest='iterations', type=int, default= 1000000, help='Number of iterations for training the network')
parser.add_argument('-r', '--rate', dest='rate', type=float, default=0.001, help='Training rate for the neural network')
parser.add_argument('-hidden', '--hidden', dest='hidden', type=int, default=256, help='Number of hidden neurons per layer')
parser.add_argument('-l', '--layers', dest='layers', type=int, default=1, help='Number of hidden layers')
parser.add_argument('-pert', '--pert', dest='pert', type=int, default=0, help="=0 if you dont want to use perturbed dataset")
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
main(params)
|
[
"__author__ = 'Wout & thijs'\n\nimport argparse\n#from lda_images.lda_learner import *\nfrom lda_images.new_lda_learner import *\n\n# given a dataset and an amount of topics, train a neural network\n# to map image representations onto topic distribtutions\ndef main(params):\n dataset = params['dataset']\n topics = params['topics']\n rate = params['rate']\n iterations = params['iterations']\n hidden_layers = params['hidden']\n layers = params['layers']\n pert = params['pert']\n networkLearner = LDANetworkLearner(dataset, topics, rate, hidden_layers,layers, pert)\n networkLearner.learnNetwork(iterations)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--dataset', dest='dataset', default='flickr30k', help='dataset: flickr8k/flickr30k')\n parser.add_argument('-t', '--topics', dest='topics', type=int, default=120, help='Number of topics to learn lda model')\n parser.add_argument('-i', '--iterations', dest='iterations', type=int, default= 1000000, help='Number of iterations for training the network')\n parser.add_argument('-r', '--rate', dest='rate', type=float, default=0.001, help='Training rate for the neural network')\n parser.add_argument('-hidden', '--hidden', dest='hidden', type=int, default=256, help='Number of hidden neurons per layer')\n parser.add_argument('-l', '--layers', dest='layers', type=int, default=1, help='Number of hidden layers')\n parser.add_argument('-pert', '--pert', dest='pert', type=int, default=0, help=\"=0 if you dont want to use perturbed dataset\")\n args = parser.parse_args()\n params = vars(args) # convert to ordinary dict\n main(params)\n",
"__author__ = 'Wout & thijs'\nimport argparse\nfrom lda_images.new_lda_learner import *\n\n\ndef main(params):\n dataset = params['dataset']\n topics = params['topics']\n rate = params['rate']\n iterations = params['iterations']\n hidden_layers = params['hidden']\n layers = params['layers']\n pert = params['pert']\n networkLearner = LDANetworkLearner(dataset, topics, rate, hidden_layers,\n layers, pert)\n networkLearner.learnNetwork(iterations)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--dataset', dest='dataset', default=\n 'flickr30k', help='dataset: flickr8k/flickr30k')\n parser.add_argument('-t', '--topics', dest='topics', type=int, default=\n 120, help='Number of topics to learn lda model')\n parser.add_argument('-i', '--iterations', dest='iterations', type=int,\n default=1000000, help='Number of iterations for training the network')\n parser.add_argument('-r', '--rate', dest='rate', type=float, default=\n 0.001, help='Training rate for the neural network')\n parser.add_argument('-hidden', '--hidden', dest='hidden', type=int,\n default=256, help='Number of hidden neurons per layer')\n parser.add_argument('-l', '--layers', dest='layers', type=int, default=\n 1, help='Number of hidden layers')\n parser.add_argument('-pert', '--pert', dest='pert', type=int, default=0,\n help='=0 if you dont want to use perturbed dataset')\n args = parser.parse_args()\n params = vars(args)\n main(params)\n",
"__author__ = 'Wout & thijs'\n<import token>\n\n\ndef main(params):\n dataset = params['dataset']\n topics = params['topics']\n rate = params['rate']\n iterations = params['iterations']\n hidden_layers = params['hidden']\n layers = params['layers']\n pert = params['pert']\n networkLearner = LDANetworkLearner(dataset, topics, rate, hidden_layers,\n layers, pert)\n networkLearner.learnNetwork(iterations)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--dataset', dest='dataset', default=\n 'flickr30k', help='dataset: flickr8k/flickr30k')\n parser.add_argument('-t', '--topics', dest='topics', type=int, default=\n 120, help='Number of topics to learn lda model')\n parser.add_argument('-i', '--iterations', dest='iterations', type=int,\n default=1000000, help='Number of iterations for training the network')\n parser.add_argument('-r', '--rate', dest='rate', type=float, default=\n 0.001, help='Training rate for the neural network')\n parser.add_argument('-hidden', '--hidden', dest='hidden', type=int,\n default=256, help='Number of hidden neurons per layer')\n parser.add_argument('-l', '--layers', dest='layers', type=int, default=\n 1, help='Number of hidden layers')\n parser.add_argument('-pert', '--pert', dest='pert', type=int, default=0,\n help='=0 if you dont want to use perturbed dataset')\n args = parser.parse_args()\n params = vars(args)\n main(params)\n",
"<assignment token>\n<import token>\n\n\ndef main(params):\n dataset = params['dataset']\n topics = params['topics']\n rate = params['rate']\n iterations = params['iterations']\n hidden_layers = params['hidden']\n layers = params['layers']\n pert = params['pert']\n networkLearner = LDANetworkLearner(dataset, topics, rate, hidden_layers,\n layers, pert)\n networkLearner.learnNetwork(iterations)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--dataset', dest='dataset', default=\n 'flickr30k', help='dataset: flickr8k/flickr30k')\n parser.add_argument('-t', '--topics', dest='topics', type=int, default=\n 120, help='Number of topics to learn lda model')\n parser.add_argument('-i', '--iterations', dest='iterations', type=int,\n default=1000000, help='Number of iterations for training the network')\n parser.add_argument('-r', '--rate', dest='rate', type=float, default=\n 0.001, help='Training rate for the neural network')\n parser.add_argument('-hidden', '--hidden', dest='hidden', type=int,\n default=256, help='Number of hidden neurons per layer')\n parser.add_argument('-l', '--layers', dest='layers', type=int, default=\n 1, help='Number of hidden layers')\n parser.add_argument('-pert', '--pert', dest='pert', type=int, default=0,\n help='=0 if you dont want to use perturbed dataset')\n args = parser.parse_args()\n params = vars(args)\n main(params)\n",
"<assignment token>\n<import token>\n\n\ndef main(params):\n dataset = params['dataset']\n topics = params['topics']\n rate = params['rate']\n iterations = params['iterations']\n hidden_layers = params['hidden']\n layers = params['layers']\n pert = params['pert']\n networkLearner = LDANetworkLearner(dataset, topics, rate, hidden_layers,\n layers, pert)\n networkLearner.learnNetwork(iterations)\n\n\n<code token>\n",
"<assignment token>\n<import token>\n<function token>\n<code token>\n"
] | false |
98,826 |
8edfc752c0db06bbb65d50e707c57f6056d5e46a
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Date
from sqlalchemy.orm import sessionmaker
from datetime import datetime, timedelta
class ToDo:
Base = declarative_base()
class Table(Base):
"""The database model of a task"""
# noinspection SpellCheckingInspection
__tablename__ = 'task'
id = Column(Integer, primary_key=True)
task = Column(String, default='Unnamed task')
deadline = Column(Date, default=datetime.today())
def __repr__(self):
return f'{self.id}. {self.task}'
def __init__(self):
self.session = None
self.menu_choice = ''
self.init_database()
def init_database(self):
"""Creates and initializes a database"""
engine = create_engine('sqlite:///todo.db?check_same_thread=False')
self.Base.metadata.create_all(engine)
self.session = sessionmaker(bind=engine)()
def menu(self):
"""Prints menu items and accepts user choice"""
print('1) Today\'s tasks')
print('2) Week\'s tasks')
print('3) All tasks')
print('4) Missed tasks')
print('5) Add task')
print('6) Delete task')
print('0) Exit')
self.menu_choice = input()
def show_today_tasks(self):
"""Outputs all tasks for today"""
today = datetime.today()
tasks = self.session.query(self.Table).filter(self.Table.deadline == today.strftime('%Y-%m-%d')).all()
print(f'Today {today.strftime("%d %b")}:')
if tasks:
for n, task in enumerate(tasks, 1):
print(f'{n}. {task.task}')
else:
print('Nothing to do!')
print()
def show_weeks_tasks(self):
"""Outputs all tasks for next seven days"""
for day in [datetime.today() + timedelta(days=i) for i in range(7)]:
tasks = self.session.query(self.Table).filter(self.Table.deadline == day.strftime('%Y-%m-%d')).\
order_by(self.Table.deadline).all()
print(f'{day.strftime("%A")} {day.strftime("%d %b")}:')
if tasks:
for n, task in enumerate(tasks, 1):
print(f'{n}. {task.task}')
else:
print('Nothing to do!')
print()
def show_all_tasks(self):
"""Shows all tasks from the database"""
tasks = self.session.query(self.Table).order_by(self.Table.deadline).all()
print('All tasks:')
if tasks:
for n, task in enumerate(tasks, 1):
print(f'{n}. {task.task}. {task.deadline.strftime("%d %b")}')
else:
print('Nothing to do!')
print()
def show_missed_tasks(self):
"""Shows all missed tasks from the database"""
tasks = self.session.query(self.Table).filter(self.Table.deadline < datetime.today().strftime('%Y-%m-%d')).\
order_by(self.Table.deadline).all()
print('Missed tasks:')
if tasks:
for n, task in enumerate(tasks, 1):
print(f'{n}. {task.task}. {task.deadline.strftime("%d %b")}')
else:
print('Nothing is missed!')
print()
def add_task(self):
"""Add a task to the database"""
print('Enter task')
text_task = input()
print('Enter deadline')
new_task = self.Table(task=text_task, deadline=datetime.strptime(input(), '%Y-%m-%d'))
self.session.add(new_task)
self.session.commit()
print('The task has been added!')
print()
def delete_task(self):
"""Delete a chosen task from the database"""
tasks = self.session.query(self.Table).order_by(self.Table.deadline).all()
if tasks:
print('Chose the number of the task you want to delete:')
for n, task in enumerate(tasks, 1):
print(f'{n}. {task.task}. {task.deadline.strftime("%d %b")}')
self.session.query(self.Table).filter(self.Table.id == tasks[int(input())-1].id).delete()
self.session.commit()
else:
print('Nothing to delete!')
print()
def run(self):
"""Main logic of the program"""
while True:
self.menu()
if self.menu_choice == '1':
self.show_today_tasks()
elif self.menu_choice == '2':
self.show_weeks_tasks()
elif self.menu_choice == '3':
self.show_all_tasks()
elif self.menu_choice == '4':
self.show_missed_tasks()
elif self.menu_choice == '5':
self.add_task()
elif self.menu_choice == '6':
self.delete_task()
else:
print('Bye!')
break
if __name__ == '__main__':
todo = ToDo()
todo.run()
|
[
"from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Date\nfrom sqlalchemy.orm import sessionmaker\nfrom datetime import datetime, timedelta\n\n\nclass ToDo:\n Base = declarative_base()\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n # noinspection SpellCheckingInspection\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n\n def __init__(self):\n self.session = None\n self.menu_choice = ''\n self.init_database()\n\n def init_database(self):\n \"\"\"Creates and initializes a database\"\"\"\n engine = create_engine('sqlite:///todo.db?check_same_thread=False')\n self.Base.metadata.create_all(engine)\n self.session = sessionmaker(bind=engine)()\n\n def menu(self):\n \"\"\"Prints menu items and accepts user choice\"\"\"\n print('1) Today\\'s tasks')\n print('2) Week\\'s tasks')\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()\n\n def show_today_tasks(self):\n \"\"\"Outputs all tasks for today\"\"\"\n today = datetime.today()\n tasks = self.session.query(self.Table).filter(self.Table.deadline == today.strftime('%Y-%m-%d')).all()\n print(f'Today {today.strftime(\"%d %b\")}:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n\n def show_weeks_tasks(self):\n \"\"\"Outputs all tasks for next seven days\"\"\"\n for day in [datetime.today() + timedelta(days=i) for i in range(7)]:\n tasks = self.session.query(self.Table).filter(self.Table.deadline == day.strftime('%Y-%m-%d')).\\\n order_by(self.Table.deadline).all()\n print(f'{day.strftime(\"%A\")} {day.strftime(\"%d %b\")}:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n\n def show_all_tasks(self):\n \"\"\"Shows all tasks from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline).all()\n print('All tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}. {task.deadline.strftime(\"%d %b\")}')\n else:\n print('Nothing to do!')\n print()\n\n def show_missed_tasks(self):\n \"\"\"Shows all missed tasks from the database\"\"\"\n tasks = self.session.query(self.Table).filter(self.Table.deadline < datetime.today().strftime('%Y-%m-%d')).\\\n order_by(self.Table.deadline).all()\n print('Missed tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}. {task.deadline.strftime(\"%d %b\")}')\n else:\n print('Nothing is missed!')\n print()\n\n def add_task(self):\n \"\"\"Add a task to the database\"\"\"\n print('Enter task')\n text_task = input()\n print('Enter deadline')\n new_task = self.Table(task=text_task, deadline=datetime.strptime(input(), '%Y-%m-%d'))\n self.session.add(new_task)\n self.session.commit()\n print('The task has been added!')\n print()\n\n def delete_task(self):\n \"\"\"Delete a chosen task from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}. {task.deadline.strftime(\"%d %b\")}')\n self.session.query(self.Table).filter(self.Table.id == tasks[int(input())-1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()\n\n def run(self):\n \"\"\"Main logic of the program\"\"\"\n while True:\n self.menu()\n if self.menu_choice == '1':\n self.show_today_tasks()\n elif self.menu_choice == '2':\n self.show_weeks_tasks()\n elif self.menu_choice == '3':\n self.show_all_tasks()\n elif self.menu_choice == '4':\n self.show_missed_tasks()\n elif self.menu_choice == '5':\n self.add_task()\n elif self.menu_choice == '6':\n self.delete_task()\n else:\n print('Bye!')\n break\n\n\nif __name__ == '__main__':\n todo = ToDo()\n todo.run()",
"from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Date\nfrom sqlalchemy.orm import sessionmaker\nfrom datetime import datetime, timedelta\n\n\nclass ToDo:\n Base = declarative_base()\n\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n\n def __init__(self):\n self.session = None\n self.menu_choice = ''\n self.init_database()\n\n def init_database(self):\n \"\"\"Creates and initializes a database\"\"\"\n engine = create_engine('sqlite:///todo.db?check_same_thread=False')\n self.Base.metadata.create_all(engine)\n self.session = sessionmaker(bind=engine)()\n\n def menu(self):\n \"\"\"Prints menu items and accepts user choice\"\"\"\n print(\"1) Today's tasks\")\n print(\"2) Week's tasks\")\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()\n\n def show_today_tasks(self):\n \"\"\"Outputs all tasks for today\"\"\"\n today = datetime.today()\n tasks = self.session.query(self.Table).filter(self.Table.deadline ==\n today.strftime('%Y-%m-%d')).all()\n print(f\"Today {today.strftime('%d %b')}:\")\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n\n def show_weeks_tasks(self):\n \"\"\"Outputs all tasks for next seven days\"\"\"\n for day in [(datetime.today() + timedelta(days=i)) for i in range(7)]:\n tasks = self.session.query(self.Table).filter(self.Table.\n deadline == day.strftime('%Y-%m-%d')).order_by(self.Table.\n deadline).all()\n print(f\"{day.strftime('%A')} {day.strftime('%d %b')}:\")\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n\n def show_all_tasks(self):\n \"\"\"Shows all tasks from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n print('All tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n else:\n print('Nothing to do!')\n print()\n\n def show_missed_tasks(self):\n \"\"\"Shows all missed tasks from the database\"\"\"\n tasks = self.session.query(self.Table).filter(self.Table.deadline <\n datetime.today().strftime('%Y-%m-%d')).order_by(self.Table.deadline\n ).all()\n print('Missed tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n else:\n print('Nothing is missed!')\n print()\n\n def add_task(self):\n \"\"\"Add a task to the database\"\"\"\n print('Enter task')\n text_task = input()\n print('Enter deadline')\n new_task = self.Table(task=text_task, deadline=datetime.strptime(\n input(), '%Y-%m-%d'))\n self.session.add(new_task)\n self.session.commit()\n print('The task has been added!')\n print()\n\n def delete_task(self):\n \"\"\"Delete a chosen task from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n self.session.query(self.Table).filter(self.Table.id == tasks[\n int(input()) - 1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()\n\n def run(self):\n \"\"\"Main logic of the program\"\"\"\n while True:\n self.menu()\n if self.menu_choice == '1':\n self.show_today_tasks()\n elif self.menu_choice == '2':\n self.show_weeks_tasks()\n elif self.menu_choice == '3':\n self.show_all_tasks()\n elif self.menu_choice == '4':\n self.show_missed_tasks()\n elif self.menu_choice == '5':\n self.add_task()\n elif self.menu_choice == '6':\n self.delete_task()\n else:\n print('Bye!')\n break\n\n\nif __name__ == '__main__':\n todo = ToDo()\n todo.run()\n",
"<import token>\n\n\nclass ToDo:\n Base = declarative_base()\n\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n\n def __init__(self):\n self.session = None\n self.menu_choice = ''\n self.init_database()\n\n def init_database(self):\n \"\"\"Creates and initializes a database\"\"\"\n engine = create_engine('sqlite:///todo.db?check_same_thread=False')\n self.Base.metadata.create_all(engine)\n self.session = sessionmaker(bind=engine)()\n\n def menu(self):\n \"\"\"Prints menu items and accepts user choice\"\"\"\n print(\"1) Today's tasks\")\n print(\"2) Week's tasks\")\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()\n\n def show_today_tasks(self):\n \"\"\"Outputs all tasks for today\"\"\"\n today = datetime.today()\n tasks = self.session.query(self.Table).filter(self.Table.deadline ==\n today.strftime('%Y-%m-%d')).all()\n print(f\"Today {today.strftime('%d %b')}:\")\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n\n def show_weeks_tasks(self):\n \"\"\"Outputs all tasks for next seven days\"\"\"\n for day in [(datetime.today() + timedelta(days=i)) for i in range(7)]:\n tasks = self.session.query(self.Table).filter(self.Table.\n deadline == day.strftime('%Y-%m-%d')).order_by(self.Table.\n deadline).all()\n print(f\"{day.strftime('%A')} {day.strftime('%d %b')}:\")\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n\n def show_all_tasks(self):\n \"\"\"Shows all tasks from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n print('All tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n else:\n print('Nothing to do!')\n print()\n\n def show_missed_tasks(self):\n \"\"\"Shows all missed tasks from the database\"\"\"\n tasks = self.session.query(self.Table).filter(self.Table.deadline <\n datetime.today().strftime('%Y-%m-%d')).order_by(self.Table.deadline\n ).all()\n print('Missed tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n else:\n print('Nothing is missed!')\n print()\n\n def add_task(self):\n \"\"\"Add a task to the database\"\"\"\n print('Enter task')\n text_task = input()\n print('Enter deadline')\n new_task = self.Table(task=text_task, deadline=datetime.strptime(\n input(), '%Y-%m-%d'))\n self.session.add(new_task)\n self.session.commit()\n print('The task has been added!')\n print()\n\n def delete_task(self):\n \"\"\"Delete a chosen task from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n self.session.query(self.Table).filter(self.Table.id == tasks[\n int(input()) - 1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()\n\n def run(self):\n \"\"\"Main logic of the program\"\"\"\n while True:\n self.menu()\n if self.menu_choice == '1':\n self.show_today_tasks()\n elif self.menu_choice == '2':\n self.show_weeks_tasks()\n elif self.menu_choice == '3':\n self.show_all_tasks()\n elif self.menu_choice == '4':\n self.show_missed_tasks()\n elif self.menu_choice == '5':\n self.add_task()\n elif self.menu_choice == '6':\n self.delete_task()\n else:\n print('Bye!')\n break\n\n\nif __name__ == '__main__':\n todo = ToDo()\n todo.run()\n",
"<import token>\n\n\nclass ToDo:\n Base = declarative_base()\n\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n\n def __init__(self):\n self.session = None\n self.menu_choice = ''\n self.init_database()\n\n def init_database(self):\n \"\"\"Creates and initializes a database\"\"\"\n engine = create_engine('sqlite:///todo.db?check_same_thread=False')\n self.Base.metadata.create_all(engine)\n self.session = sessionmaker(bind=engine)()\n\n def menu(self):\n \"\"\"Prints menu items and accepts user choice\"\"\"\n print(\"1) Today's tasks\")\n print(\"2) Week's tasks\")\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()\n\n def show_today_tasks(self):\n \"\"\"Outputs all tasks for today\"\"\"\n today = datetime.today()\n tasks = self.session.query(self.Table).filter(self.Table.deadline ==\n today.strftime('%Y-%m-%d')).all()\n print(f\"Today {today.strftime('%d %b')}:\")\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n\n def show_weeks_tasks(self):\n \"\"\"Outputs all tasks for next seven days\"\"\"\n for day in [(datetime.today() + timedelta(days=i)) for i in range(7)]:\n tasks = self.session.query(self.Table).filter(self.Table.\n deadline == day.strftime('%Y-%m-%d')).order_by(self.Table.\n deadline).all()\n print(f\"{day.strftime('%A')} {day.strftime('%d %b')}:\")\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n\n def show_all_tasks(self):\n \"\"\"Shows all tasks from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n print('All tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n else:\n print('Nothing to do!')\n print()\n\n def show_missed_tasks(self):\n \"\"\"Shows all missed tasks from the database\"\"\"\n tasks = self.session.query(self.Table).filter(self.Table.deadline <\n datetime.today().strftime('%Y-%m-%d')).order_by(self.Table.deadline\n ).all()\n print('Missed tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n else:\n print('Nothing is missed!')\n print()\n\n def add_task(self):\n \"\"\"Add a task to the database\"\"\"\n print('Enter task')\n text_task = input()\n print('Enter deadline')\n new_task = self.Table(task=text_task, deadline=datetime.strptime(\n input(), '%Y-%m-%d'))\n self.session.add(new_task)\n self.session.commit()\n print('The task has been added!')\n print()\n\n def delete_task(self):\n \"\"\"Delete a chosen task from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n self.session.query(self.Table).filter(self.Table.id == tasks[\n int(input()) - 1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()\n\n def run(self):\n \"\"\"Main logic of the program\"\"\"\n while True:\n self.menu()\n if self.menu_choice == '1':\n self.show_today_tasks()\n elif self.menu_choice == '2':\n self.show_weeks_tasks()\n elif self.menu_choice == '3':\n self.show_all_tasks()\n elif self.menu_choice == '4':\n self.show_missed_tasks()\n elif self.menu_choice == '5':\n self.add_task()\n elif self.menu_choice == '6':\n self.delete_task()\n else:\n print('Bye!')\n break\n\n\n<code token>\n",
"<import token>\n\n\nclass ToDo:\n <assignment token>\n\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n\n def __init__(self):\n self.session = None\n self.menu_choice = ''\n self.init_database()\n\n def init_database(self):\n \"\"\"Creates and initializes a database\"\"\"\n engine = create_engine('sqlite:///todo.db?check_same_thread=False')\n self.Base.metadata.create_all(engine)\n self.session = sessionmaker(bind=engine)()\n\n def menu(self):\n \"\"\"Prints menu items and accepts user choice\"\"\"\n print(\"1) Today's tasks\")\n print(\"2) Week's tasks\")\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()\n\n def show_today_tasks(self):\n \"\"\"Outputs all tasks for today\"\"\"\n today = datetime.today()\n tasks = self.session.query(self.Table).filter(self.Table.deadline ==\n today.strftime('%Y-%m-%d')).all()\n print(f\"Today {today.strftime('%d %b')}:\")\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n\n def show_weeks_tasks(self):\n \"\"\"Outputs all tasks for next seven days\"\"\"\n for day in [(datetime.today() + timedelta(days=i)) for i in range(7)]:\n tasks = self.session.query(self.Table).filter(self.Table.\n deadline == day.strftime('%Y-%m-%d')).order_by(self.Table.\n deadline).all()\n print(f\"{day.strftime('%A')} {day.strftime('%d %b')}:\")\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n\n def show_all_tasks(self):\n \"\"\"Shows all tasks from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n print('All tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n else:\n print('Nothing to do!')\n print()\n\n def show_missed_tasks(self):\n \"\"\"Shows all missed tasks from the database\"\"\"\n tasks = self.session.query(self.Table).filter(self.Table.deadline <\n datetime.today().strftime('%Y-%m-%d')).order_by(self.Table.deadline\n ).all()\n print('Missed tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n else:\n print('Nothing is missed!')\n print()\n\n def add_task(self):\n \"\"\"Add a task to the database\"\"\"\n print('Enter task')\n text_task = input()\n print('Enter deadline')\n new_task = self.Table(task=text_task, deadline=datetime.strptime(\n input(), '%Y-%m-%d'))\n self.session.add(new_task)\n self.session.commit()\n print('The task has been added!')\n print()\n\n def delete_task(self):\n \"\"\"Delete a chosen task from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n self.session.query(self.Table).filter(self.Table.id == tasks[\n int(input()) - 1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()\n\n def run(self):\n \"\"\"Main logic of the program\"\"\"\n while True:\n self.menu()\n if self.menu_choice == '1':\n self.show_today_tasks()\n elif self.menu_choice == '2':\n self.show_weeks_tasks()\n elif self.menu_choice == '3':\n self.show_all_tasks()\n elif self.menu_choice == '4':\n self.show_missed_tasks()\n elif self.menu_choice == '5':\n self.add_task()\n elif self.menu_choice == '6':\n self.delete_task()\n else:\n print('Bye!')\n break\n\n\n<code token>\n",
"<import token>\n\n\nclass ToDo:\n <assignment token>\n\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n\n def __init__(self):\n self.session = None\n self.menu_choice = ''\n self.init_database()\n\n def init_database(self):\n \"\"\"Creates and initializes a database\"\"\"\n engine = create_engine('sqlite:///todo.db?check_same_thread=False')\n self.Base.metadata.create_all(engine)\n self.session = sessionmaker(bind=engine)()\n\n def menu(self):\n \"\"\"Prints menu items and accepts user choice\"\"\"\n print(\"1) Today's tasks\")\n print(\"2) Week's tasks\")\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()\n\n def show_today_tasks(self):\n \"\"\"Outputs all tasks for today\"\"\"\n today = datetime.today()\n tasks = self.session.query(self.Table).filter(self.Table.deadline ==\n today.strftime('%Y-%m-%d')).all()\n print(f\"Today {today.strftime('%d %b')}:\")\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n <function token>\n\n def show_all_tasks(self):\n \"\"\"Shows all tasks from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n print('All tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n else:\n print('Nothing to do!')\n print()\n\n def show_missed_tasks(self):\n \"\"\"Shows all missed tasks from the database\"\"\"\n tasks = self.session.query(self.Table).filter(self.Table.deadline <\n datetime.today().strftime('%Y-%m-%d')).order_by(self.Table.deadline\n ).all()\n print('Missed tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n else:\n print('Nothing is missed!')\n print()\n\n def add_task(self):\n \"\"\"Add a task to the database\"\"\"\n print('Enter task')\n text_task = input()\n print('Enter deadline')\n new_task = self.Table(task=text_task, deadline=datetime.strptime(\n input(), '%Y-%m-%d'))\n self.session.add(new_task)\n self.session.commit()\n print('The task has been added!')\n print()\n\n def delete_task(self):\n \"\"\"Delete a chosen task from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n self.session.query(self.Table).filter(self.Table.id == tasks[\n int(input()) - 1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()\n\n def run(self):\n \"\"\"Main logic of the program\"\"\"\n while True:\n self.menu()\n if self.menu_choice == '1':\n self.show_today_tasks()\n elif self.menu_choice == '2':\n self.show_weeks_tasks()\n elif self.menu_choice == '3':\n self.show_all_tasks()\n elif self.menu_choice == '4':\n self.show_missed_tasks()\n elif self.menu_choice == '5':\n self.add_task()\n elif self.menu_choice == '6':\n self.delete_task()\n else:\n print('Bye!')\n break\n\n\n<code token>\n",
"<import token>\n\n\nclass ToDo:\n <assignment token>\n\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n\n def __init__(self):\n self.session = None\n self.menu_choice = ''\n self.init_database()\n\n def init_database(self):\n \"\"\"Creates and initializes a database\"\"\"\n engine = create_engine('sqlite:///todo.db?check_same_thread=False')\n self.Base.metadata.create_all(engine)\n self.session = sessionmaker(bind=engine)()\n\n def menu(self):\n \"\"\"Prints menu items and accepts user choice\"\"\"\n print(\"1) Today's tasks\")\n print(\"2) Week's tasks\")\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()\n\n def show_today_tasks(self):\n \"\"\"Outputs all tasks for today\"\"\"\n today = datetime.today()\n tasks = self.session.query(self.Table).filter(self.Table.deadline ==\n today.strftime('%Y-%m-%d')).all()\n print(f\"Today {today.strftime('%d %b')}:\")\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n <function token>\n\n def show_all_tasks(self):\n \"\"\"Shows all tasks from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n print('All tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n else:\n print('Nothing to do!')\n print()\n\n def show_missed_tasks(self):\n \"\"\"Shows all missed tasks from the database\"\"\"\n tasks = self.session.query(self.Table).filter(self.Table.deadline <\n datetime.today().strftime('%Y-%m-%d')).order_by(self.Table.deadline\n ).all()\n print('Missed tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n else:\n print('Nothing is missed!')\n print()\n <function token>\n\n def delete_task(self):\n \"\"\"Delete a chosen task from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n self.session.query(self.Table).filter(self.Table.id == tasks[\n int(input()) - 1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()\n\n def run(self):\n \"\"\"Main logic of the program\"\"\"\n while True:\n self.menu()\n if self.menu_choice == '1':\n self.show_today_tasks()\n elif self.menu_choice == '2':\n self.show_weeks_tasks()\n elif self.menu_choice == '3':\n self.show_all_tasks()\n elif self.menu_choice == '4':\n self.show_missed_tasks()\n elif self.menu_choice == '5':\n self.add_task()\n elif self.menu_choice == '6':\n self.delete_task()\n else:\n print('Bye!')\n break\n\n\n<code token>\n",
"<import token>\n\n\nclass ToDo:\n <assignment token>\n\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n\n def __init__(self):\n self.session = None\n self.menu_choice = ''\n self.init_database()\n\n def init_database(self):\n \"\"\"Creates and initializes a database\"\"\"\n engine = create_engine('sqlite:///todo.db?check_same_thread=False')\n self.Base.metadata.create_all(engine)\n self.session = sessionmaker(bind=engine)()\n\n def menu(self):\n \"\"\"Prints menu items and accepts user choice\"\"\"\n print(\"1) Today's tasks\")\n print(\"2) Week's tasks\")\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()\n\n def show_today_tasks(self):\n \"\"\"Outputs all tasks for today\"\"\"\n today = datetime.today()\n tasks = self.session.query(self.Table).filter(self.Table.deadline ==\n today.strftime('%Y-%m-%d')).all()\n print(f\"Today {today.strftime('%d %b')}:\")\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n <function token>\n\n def show_all_tasks(self):\n \"\"\"Shows all tasks from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n print('All tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n else:\n print('Nothing to do!')\n print()\n <function token>\n <function token>\n\n def delete_task(self):\n \"\"\"Delete a chosen task from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n self.session.query(self.Table).filter(self.Table.id == tasks[\n int(input()) - 1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()\n\n def run(self):\n \"\"\"Main logic of the program\"\"\"\n while True:\n self.menu()\n if self.menu_choice == '1':\n self.show_today_tasks()\n elif self.menu_choice == '2':\n self.show_weeks_tasks()\n elif self.menu_choice == '3':\n self.show_all_tasks()\n elif self.menu_choice == '4':\n self.show_missed_tasks()\n elif self.menu_choice == '5':\n self.add_task()\n elif self.menu_choice == '6':\n self.delete_task()\n else:\n print('Bye!')\n break\n\n\n<code token>\n",
"<import token>\n\n\nclass ToDo:\n <assignment token>\n\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n\n def __init__(self):\n self.session = None\n self.menu_choice = ''\n self.init_database()\n <function token>\n\n def menu(self):\n \"\"\"Prints menu items and accepts user choice\"\"\"\n print(\"1) Today's tasks\")\n print(\"2) Week's tasks\")\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()\n\n def show_today_tasks(self):\n \"\"\"Outputs all tasks for today\"\"\"\n today = datetime.today()\n tasks = self.session.query(self.Table).filter(self.Table.deadline ==\n today.strftime('%Y-%m-%d')).all()\n print(f\"Today {today.strftime('%d %b')}:\")\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n <function token>\n\n def show_all_tasks(self):\n \"\"\"Shows all tasks from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n print('All tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n else:\n print('Nothing to do!')\n print()\n <function token>\n <function token>\n\n def delete_task(self):\n \"\"\"Delete a chosen task from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n self.session.query(self.Table).filter(self.Table.id == tasks[\n int(input()) - 1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()\n\n def run(self):\n \"\"\"Main logic of the program\"\"\"\n while True:\n self.menu()\n if self.menu_choice == '1':\n self.show_today_tasks()\n elif self.menu_choice == '2':\n self.show_weeks_tasks()\n elif self.menu_choice == '3':\n self.show_all_tasks()\n elif self.menu_choice == '4':\n self.show_missed_tasks()\n elif self.menu_choice == '5':\n self.add_task()\n elif self.menu_choice == '6':\n self.delete_task()\n else:\n print('Bye!')\n break\n\n\n<code token>\n",
"<import token>\n\n\nclass ToDo:\n <assignment token>\n\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n\n def __init__(self):\n self.session = None\n self.menu_choice = ''\n self.init_database()\n <function token>\n\n def menu(self):\n \"\"\"Prints menu items and accepts user choice\"\"\"\n print(\"1) Today's tasks\")\n print(\"2) Week's tasks\")\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()\n\n def show_today_tasks(self):\n \"\"\"Outputs all tasks for today\"\"\"\n today = datetime.today()\n tasks = self.session.query(self.Table).filter(self.Table.deadline ==\n today.strftime('%Y-%m-%d')).all()\n print(f\"Today {today.strftime('%d %b')}:\")\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}')\n else:\n print('Nothing to do!')\n print()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def delete_task(self):\n \"\"\"Delete a chosen task from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n self.session.query(self.Table).filter(self.Table.id == tasks[\n int(input()) - 1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()\n\n def run(self):\n \"\"\"Main logic of the program\"\"\"\n while True:\n self.menu()\n if self.menu_choice == '1':\n self.show_today_tasks()\n elif self.menu_choice == '2':\n self.show_weeks_tasks()\n elif self.menu_choice == '3':\n self.show_all_tasks()\n elif self.menu_choice == '4':\n self.show_missed_tasks()\n elif self.menu_choice == '5':\n self.add_task()\n elif self.menu_choice == '6':\n self.delete_task()\n else:\n print('Bye!')\n break\n\n\n<code token>\n",
"<import token>\n\n\nclass ToDo:\n <assignment token>\n\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n\n def __init__(self):\n self.session = None\n self.menu_choice = ''\n self.init_database()\n <function token>\n\n def menu(self):\n \"\"\"Prints menu items and accepts user choice\"\"\"\n print(\"1) Today's tasks\")\n print(\"2) Week's tasks\")\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def delete_task(self):\n \"\"\"Delete a chosen task from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n self.session.query(self.Table).filter(self.Table.id == tasks[\n int(input()) - 1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()\n\n def run(self):\n \"\"\"Main logic of the program\"\"\"\n while True:\n self.menu()\n if self.menu_choice == '1':\n self.show_today_tasks()\n elif self.menu_choice == '2':\n self.show_weeks_tasks()\n elif self.menu_choice == '3':\n self.show_all_tasks()\n elif self.menu_choice == '4':\n self.show_missed_tasks()\n elif self.menu_choice == '5':\n self.add_task()\n elif self.menu_choice == '6':\n self.delete_task()\n else:\n print('Bye!')\n break\n\n\n<code token>\n",
"<import token>\n\n\nclass ToDo:\n <assignment token>\n\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n\n def __init__(self):\n self.session = None\n self.menu_choice = ''\n self.init_database()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def delete_task(self):\n \"\"\"Delete a chosen task from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n self.session.query(self.Table).filter(self.Table.id == tasks[\n int(input()) - 1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()\n\n def run(self):\n \"\"\"Main logic of the program\"\"\"\n while True:\n self.menu()\n if self.menu_choice == '1':\n self.show_today_tasks()\n elif self.menu_choice == '2':\n self.show_weeks_tasks()\n elif self.menu_choice == '3':\n self.show_all_tasks()\n elif self.menu_choice == '4':\n self.show_missed_tasks()\n elif self.menu_choice == '5':\n self.add_task()\n elif self.menu_choice == '6':\n self.delete_task()\n else:\n print('Bye!')\n break\n\n\n<code token>\n",
"<import token>\n\n\nclass ToDo:\n <assignment token>\n\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n\n def __init__(self):\n self.session = None\n self.menu_choice = ''\n self.init_database()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def delete_task(self):\n \"\"\"Delete a chosen task from the database\"\"\"\n tasks = self.session.query(self.Table).order_by(self.Table.deadline\n ).all()\n if tasks:\n print('Chose the number of the task you want to delete:')\n for n, task in enumerate(tasks, 1):\n print(f\"{n}. {task.task}. {task.deadline.strftime('%d %b')}\")\n self.session.query(self.Table).filter(self.Table.id == tasks[\n int(input()) - 1].id).delete()\n self.session.commit()\n else:\n print('Nothing to delete!')\n print()\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass ToDo:\n <assignment token>\n\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n\n def __init__(self):\n self.session = None\n self.menu_choice = ''\n self.init_database()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n\n\nclass ToDo:\n <assignment token>\n\n\n class Table(Base):\n \"\"\"The database model of a task\"\"\"\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Unnamed task')\n deadline = Column(Date, default=datetime.today())\n\n def __repr__(self):\n return f'{self.id}. {self.task}'\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
98,827 |
b1293088e5b8947c717b97d3c81c600a2a31cb24
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^register_user/$', views.register_user, name='register_user'),
url(r'^confirmation/$', views.confirmation, name='confirmation'),
url(r'^$', views.index, name='index'),
]
|
[
"from django.conf.urls import url\n\nfrom . import views\n\n\nurlpatterns = [\n url(r'^register_user/$', views.register_user, name='register_user'),\n url(r'^confirmation/$', views.confirmation, name='confirmation'),\n url(r'^$', views.index, name='index'),\n]\n",
"from django.conf.urls import url\nfrom . import views\nurlpatterns = [url('^register_user/$', views.register_user, name=\n 'register_user'), url('^confirmation/$', views.confirmation, name=\n 'confirmation'), url('^$', views.index, name='index')]\n",
"<import token>\nurlpatterns = [url('^register_user/$', views.register_user, name=\n 'register_user'), url('^confirmation/$', views.confirmation, name=\n 'confirmation'), url('^$', views.index, name='index')]\n",
"<import token>\n<assignment token>\n"
] | false |
98,828 |
1d7dbc3646a9fae0f5c438094d471a65192e0cfb
|
from PIL import Image
from PIL.ImageDraw import Draw#ImageDraw #Подключим необходимые библиотеки.
from PySide2 import QtGui
import math as mh
class Oper(object):
# Функция нахождения яркости пикселя
def brightness(self,x, y):
R, G, B = self.pix[x, y]
return sum([R, G, B]) // 3 # 0 is dark (black) and 255 is bright (white)
# Функция изменения цвета
def changeColor(self,alpha, maska, x, y):
for i in [x - 2, x - 1, x]:
for j in [y - 3, y - 2, y - 1]:
self.picture[i][j] = alpha
# **************************************
# Оператор Робертса
def operRoberts(self,matrix, x, y):
Gx = matrix[x + 1][y + 1] - matrix[x][y]
Gy = matrix[x + 1][y] - matrix[x][y + 1]
# G = np.sqrt(sum([Gx ** 2, Gy ** 2]))
G = mh.fabs(Gx) + mh.fabs(Gy)
return G
# **************************************
# **************************************
# Оператор Собеля
def operSobel(self,matrix, x, y):
Gx = (matrix[x + 1][y - 1] + 2 * matrix[x + 1][y] + matrix[x + 1][y + 1]) - (
matrix[x - 1][y - 1] + 2 * matrix[x - 1][y] + matrix[x - 1][y + 1])
Gy = (matrix[x - 1][y + 1] + 2 * matrix[x][y + 1] + matrix[x + 1][y + 1]) - (
matrix[x - 1][y - 1] + 2 * matrix[x][y - 1] + matrix[x + 1][y - 1])
G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))
return G
# **************************************
# **************************************
# Оператор Лапласа
def operLaplas(self, matrix, x, y):
Gx = 4 * matrix[x][y] - matrix[x - 1][y] - matrix[x][y - 1] - matrix[x][y + 1] - matrix[x + 1][y]
Gy = 8 * matrix[x][y] - matrix[x - 1][y - 1] - matrix[x - 1][y] - matrix[x - 1][y + 1] - matrix[x][y - 1] - \
matrix[x][y + 1] - matrix[x + 1][y - 1] - matrix[x + 1][y] - matrix[x + 1][y + 1]
G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))
return G
# **************************************
# Функция выбора оператора
def choiceOper(self, num):
if (num == 1):
self.changeColor(self.operLaplas(self.mask, 1, 1), self.mask, self._wid, self._hei)
elif (num == 2):
self.changeColor(self.operRoberts(self.mask, 1, 1), self.mask, self._wid, self._hei)
elif(num == 3):
self.changeColor(self.operSobel(self.mask, 1, 1), self.mask, self._wid,self._hei)
def mainInOper(self, pict, num):
self.image = pict#pict#Открываем изображение.
self.width = self.image.size[0] #Определяем ширину.
self.height = self.image.size[1] #Определяем высоту.
self.mask = []#Маска со значениями яркости пикселя
self.picture = []#Массив для записи градиентов точек
# Маска 3х3
for j in range(3):
mask2 = []
for i in range(3):
mask2.append(0)
self.mask.append(mask2)
# Вспомогательный массив, который хранит яркости пикселей для нового изображения
for j in range(self.width):
picture2 = []
for i in range(self.height):
picture2.append(0)
self.picture.append(picture2)
# Подгонка изображения для матрицы 3х3
if (self.width%3 != 0 and self.height%3 != 0):
self.imag = self.image.crop((0,0,self.width - self.width%3, self.height - self.height%3))
elif (self.width%3 != 0):
self.imag = self.image.crop((0, 0, self.width - self.width%3, self.height))
elif (self.height%3 != 0):
self.imag = self.image.crop((0, 0, self.width, self.height - self.height % 3))
else:
self.imag = self.image
self.draw = Draw(self.imag)#Создаем инструмент для рисования.
self.width = self.imag.size[0] #Определяем ширину.
self.height = self.imag.size[1] #Определяем высоту.
self.pix = self.imag.load() #Выгружаем значения пикселей.
print(self.imag.size) # Размер изображения
self._hei = 0 # Индекс для прохода по длине
self._wid = 0 # Индекс для прохода по ширине
self._i_wid = 0 # Индекс для прохода по ширине по маске (3x3)
# Обход изображения применяя к нему маску выбранного оператора
while self._wid < self.width:
self._j_hei = 0 # Индекс для прохода по длне по маске (3x3)
while self._hei < self.height and self._j_hei < 3:
self.mask[self._i_wid][self._j_hei] = self.brightness(self._wid, self._hei) # записываем значение яркости пикселя в маску
self._j_hei += 1
self._hei += 1
if (self._i_wid == 2):
if (self._hei == self.height):
# alph = math.atan(operRoberts(mask, 1, 1)) - угол
self.choiceOper(num)
self._hei = 0;
self._i_wid = 0
self._wid += 1
else:
self.choiceOper(num)
# alph = math.atan(operRoberts(mask, 1, 1)) - угол
self._i_wid = 0
self._wid -= 2
else:
self._hei -= 3
self._i_wid += 1
self._wid += 1
if (self._hei == self.height):
self._hei = 0
#Перерисовывание изображения новыми пикселями
for i in range(self.width):
for j in range(self.height):
self.draw.point((i, j), (int(self.picture[i][j]), int(self.picture[i][j]), int(self.picture[i][j]))) # (a, b, c))
return self.imag
# self.pixel = QtGui.QImage(pict)
#
# self.painter = QtGui.QPainter()
# # Перерисовывание изображения новыми пикселями
# # for i in range(self.width):
# # for j in range(self.height):
# # self.painter.setPen(QtGui.QColor(int(self.picture[i][j]), int(self.picture[i][j]), int(self.picture[i][j])))
# # self.painter.drawImage(i, j,self.pix)
# # return self.painter
# for i in range(self.width):
# for j in range(self.height):
# print(self.picture[i][j])
# self.pixel.setPixel(i,j,0)#int(self.picture[i][j]))
# return self.pixel
|
[
"from PIL import Image\nfrom PIL.ImageDraw import Draw#ImageDraw #Подключим необходимые библиотеки.\nfrom PySide2 import QtGui\nimport math as mh\n\nclass Oper(object):\n # Функция нахождения яркости пикселя\n def brightness(self,x, y):\n R, G, B = self.pix[x, y]\n return sum([R, G, B]) // 3 # 0 is dark (black) and 255 is bright (white)\n\n # Функция изменения цвета\n def changeColor(self,alpha, maska, x, y):\n for i in [x - 2, x - 1, x]:\n for j in [y - 3, y - 2, y - 1]:\n self.picture[i][j] = alpha\n\n # **************************************\n # Оператор Робертса\n def operRoberts(self,matrix, x, y):\n Gx = matrix[x + 1][y + 1] - matrix[x][y]\n Gy = matrix[x + 1][y] - matrix[x][y + 1]\n # G = np.sqrt(sum([Gx ** 2, Gy ** 2]))\n G = mh.fabs(Gx) + mh.fabs(Gy)\n return G\n\n # **************************************\n\n # **************************************\n # Оператор Собеля\n def operSobel(self,matrix, x, y):\n Gx = (matrix[x + 1][y - 1] + 2 * matrix[x + 1][y] + matrix[x + 1][y + 1]) - (\n matrix[x - 1][y - 1] + 2 * matrix[x - 1][y] + matrix[x - 1][y + 1])\n Gy = (matrix[x - 1][y + 1] + 2 * matrix[x][y + 1] + matrix[x + 1][y + 1]) - (\n matrix[x - 1][y - 1] + 2 * matrix[x][y - 1] + matrix[x + 1][y - 1])\n G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))\n return G\n\n # **************************************\n\n # **************************************\n # Оператор Лапласа\n def operLaplas(self, matrix, x, y):\n Gx = 4 * matrix[x][y] - matrix[x - 1][y] - matrix[x][y - 1] - matrix[x][y + 1] - matrix[x + 1][y]\n Gy = 8 * matrix[x][y] - matrix[x - 1][y - 1] - matrix[x - 1][y] - matrix[x - 1][y + 1] - matrix[x][y - 1] - \\\n matrix[x][y + 1] - matrix[x + 1][y - 1] - matrix[x + 1][y] - matrix[x + 1][y + 1]\n G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))\n return G\n\n # **************************************\n\n # Функция выбора оператора\n def choiceOper(self, num):\n if (num == 1):\n self.changeColor(self.operLaplas(self.mask, 1, 1), self.mask, self._wid, self._hei)\n elif (num == 2):\n self.changeColor(self.operRoberts(self.mask, 1, 1), self.mask, self._wid, self._hei)\n elif(num == 3):\n self.changeColor(self.operSobel(self.mask, 1, 1), self.mask, self._wid,self._hei)\n\n\n def mainInOper(self, pict, num):\n\n self.image = pict#pict#Открываем изображение.\n self.width = self.image.size[0] #Определяем ширину.\n self.height = self.image.size[1] #Определяем высоту.\n self.mask = []#Маска со значениями яркости пикселя\n self.picture = []#Массив для записи градиентов точек\n\n # Маска 3х3\n for j in range(3):\n mask2 = []\n for i in range(3):\n mask2.append(0)\n self.mask.append(mask2)\n\n # Вспомогательный массив, который хранит яркости пикселей для нового изображения\n for j in range(self.width):\n picture2 = []\n for i in range(self.height):\n picture2.append(0)\n self.picture.append(picture2)\n\n # Подгонка изображения для матрицы 3х3\n if (self.width%3 != 0 and self.height%3 != 0):\n self.imag = self.image.crop((0,0,self.width - self.width%3, self.height - self.height%3))\n elif (self.width%3 != 0):\n self.imag = self.image.crop((0, 0, self.width - self.width%3, self.height))\n elif (self.height%3 != 0):\n self.imag = self.image.crop((0, 0, self.width, self.height - self.height % 3))\n else:\n self.imag = self.image\n\n self.draw = Draw(self.imag)#Создаем инструмент для рисования.\n self.width = self.imag.size[0] #Определяем ширину.\n self.height = self.imag.size[1] #Определяем высоту.\n self.pix = self.imag.load() #Выгружаем значения пикселей.\n\n print(self.imag.size) # Размер изображения\n self._hei = 0 # Индекс для прохода по длине\n self._wid = 0 # Индекс для прохода по ширине\n self._i_wid = 0 # Индекс для прохода по ширине по маске (3x3)\n\n # Обход изображения применяя к нему маску выбранного оператора\n while self._wid < self.width:\n self._j_hei = 0 # Индекс для прохода по длне по маске (3x3)\n while self._hei < self.height and self._j_hei < 3:\n self.mask[self._i_wid][self._j_hei] = self.brightness(self._wid, self._hei) # записываем значение яркости пикселя в маску\n self._j_hei += 1\n self._hei += 1\n if (self._i_wid == 2):\n if (self._hei == self.height):\n # alph = math.atan(operRoberts(mask, 1, 1)) - угол\n self.choiceOper(num)\n self._hei = 0;\n self._i_wid = 0\n self._wid += 1\n else:\n self.choiceOper(num)\n # alph = math.atan(operRoberts(mask, 1, 1)) - угол\n self._i_wid = 0\n self._wid -= 2\n else:\n self._hei -= 3\n self._i_wid += 1\n self._wid += 1\n\n if (self._hei == self.height):\n self._hei = 0\n\n #Перерисовывание изображения новыми пикселями\n for i in range(self.width):\n for j in range(self.height):\n self.draw.point((i, j), (int(self.picture[i][j]), int(self.picture[i][j]), int(self.picture[i][j]))) # (a, b, c))\n return self.imag\n\n\n # self.pixel = QtGui.QImage(pict)\n #\n # self.painter = QtGui.QPainter()\n # # Перерисовывание изображения новыми пикселями\n # # for i in range(self.width):\n # # for j in range(self.height):\n # # self.painter.setPen(QtGui.QColor(int(self.picture[i][j]), int(self.picture[i][j]), int(self.picture[i][j])))\n # # self.painter.drawImage(i, j,self.pix)\n # # return self.painter\n # for i in range(self.width):\n # for j in range(self.height):\n # print(self.picture[i][j])\n # self.pixel.setPixel(i,j,0)#int(self.picture[i][j]))\n # return self.pixel\n\n\n\n\n\n",
"from PIL import Image\nfrom PIL.ImageDraw import Draw\nfrom PySide2 import QtGui\nimport math as mh\n\n\nclass Oper(object):\n\n def brightness(self, x, y):\n R, G, B = self.pix[x, y]\n return sum([R, G, B]) // 3\n\n def changeColor(self, alpha, maska, x, y):\n for i in [x - 2, x - 1, x]:\n for j in [y - 3, y - 2, y - 1]:\n self.picture[i][j] = alpha\n\n def operRoberts(self, matrix, x, y):\n Gx = matrix[x + 1][y + 1] - matrix[x][y]\n Gy = matrix[x + 1][y] - matrix[x][y + 1]\n G = mh.fabs(Gx) + mh.fabs(Gy)\n return G\n\n def operSobel(self, matrix, x, y):\n Gx = matrix[x + 1][y - 1] + 2 * matrix[x + 1][y] + matrix[x + 1][y + 1\n ] - (matrix[x - 1][y - 1] + 2 * matrix[x - 1][y] + matrix[x - 1\n ][y + 1])\n Gy = matrix[x - 1][y + 1] + 2 * matrix[x][y + 1] + matrix[x + 1][y + 1\n ] - (matrix[x - 1][y - 1] + 2 * matrix[x][y - 1] + matrix[x + 1\n ][y - 1])\n G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))\n return G\n\n def operLaplas(self, matrix, x, y):\n Gx = 4 * matrix[x][y] - matrix[x - 1][y] - matrix[x][y - 1] - matrix[x\n ][y + 1] - matrix[x + 1][y]\n Gy = 8 * matrix[x][y] - matrix[x - 1][y - 1] - matrix[x - 1][y\n ] - matrix[x - 1][y + 1] - matrix[x][y - 1] - matrix[x][y + 1\n ] - matrix[x + 1][y - 1] - matrix[x + 1][y] - matrix[x + 1][y + 1]\n G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))\n return G\n\n def choiceOper(self, num):\n if num == 1:\n self.changeColor(self.operLaplas(self.mask, 1, 1), self.mask,\n self._wid, self._hei)\n elif num == 2:\n self.changeColor(self.operRoberts(self.mask, 1, 1), self.mask,\n self._wid, self._hei)\n elif num == 3:\n self.changeColor(self.operSobel(self.mask, 1, 1), self.mask,\n self._wid, self._hei)\n\n def mainInOper(self, pict, num):\n self.image = pict\n self.width = self.image.size[0]\n self.height = self.image.size[1]\n self.mask = []\n self.picture = []\n for j in range(3):\n mask2 = []\n for i in range(3):\n mask2.append(0)\n self.mask.append(mask2)\n for j in range(self.width):\n picture2 = []\n for i in range(self.height):\n picture2.append(0)\n self.picture.append(picture2)\n if self.width % 3 != 0 and self.height % 3 != 0:\n self.imag = self.image.crop((0, 0, self.width - self.width % 3,\n self.height - self.height % 3))\n elif self.width % 3 != 0:\n self.imag = self.image.crop((0, 0, self.width - self.width % 3,\n self.height))\n elif self.height % 3 != 0:\n self.imag = self.image.crop((0, 0, self.width, self.height - \n self.height % 3))\n else:\n self.imag = self.image\n self.draw = Draw(self.imag)\n self.width = self.imag.size[0]\n self.height = self.imag.size[1]\n self.pix = self.imag.load()\n print(self.imag.size)\n self._hei = 0\n self._wid = 0\n self._i_wid = 0\n while self._wid < self.width:\n self._j_hei = 0\n while self._hei < self.height and self._j_hei < 3:\n self.mask[self._i_wid][self._j_hei] = self.brightness(self.\n _wid, self._hei)\n self._j_hei += 1\n self._hei += 1\n if self._i_wid == 2:\n if self._hei == self.height:\n self.choiceOper(num)\n self._hei = 0\n self._i_wid = 0\n self._wid += 1\n else:\n self.choiceOper(num)\n self._i_wid = 0\n self._wid -= 2\n else:\n self._hei -= 3\n self._i_wid += 1\n self._wid += 1\n if self._hei == self.height:\n self._hei = 0\n for i in range(self.width):\n for j in range(self.height):\n self.draw.point((i, j), (int(self.picture[i][j]), int(self.\n picture[i][j]), int(self.picture[i][j])))\n return self.imag\n",
"<import token>\n\n\nclass Oper(object):\n\n def brightness(self, x, y):\n R, G, B = self.pix[x, y]\n return sum([R, G, B]) // 3\n\n def changeColor(self, alpha, maska, x, y):\n for i in [x - 2, x - 1, x]:\n for j in [y - 3, y - 2, y - 1]:\n self.picture[i][j] = alpha\n\n def operRoberts(self, matrix, x, y):\n Gx = matrix[x + 1][y + 1] - matrix[x][y]\n Gy = matrix[x + 1][y] - matrix[x][y + 1]\n G = mh.fabs(Gx) + mh.fabs(Gy)\n return G\n\n def operSobel(self, matrix, x, y):\n Gx = matrix[x + 1][y - 1] + 2 * matrix[x + 1][y] + matrix[x + 1][y + 1\n ] - (matrix[x - 1][y - 1] + 2 * matrix[x - 1][y] + matrix[x - 1\n ][y + 1])\n Gy = matrix[x - 1][y + 1] + 2 * matrix[x][y + 1] + matrix[x + 1][y + 1\n ] - (matrix[x - 1][y - 1] + 2 * matrix[x][y - 1] + matrix[x + 1\n ][y - 1])\n G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))\n return G\n\n def operLaplas(self, matrix, x, y):\n Gx = 4 * matrix[x][y] - matrix[x - 1][y] - matrix[x][y - 1] - matrix[x\n ][y + 1] - matrix[x + 1][y]\n Gy = 8 * matrix[x][y] - matrix[x - 1][y - 1] - matrix[x - 1][y\n ] - matrix[x - 1][y + 1] - matrix[x][y - 1] - matrix[x][y + 1\n ] - matrix[x + 1][y - 1] - matrix[x + 1][y] - matrix[x + 1][y + 1]\n G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))\n return G\n\n def choiceOper(self, num):\n if num == 1:\n self.changeColor(self.operLaplas(self.mask, 1, 1), self.mask,\n self._wid, self._hei)\n elif num == 2:\n self.changeColor(self.operRoberts(self.mask, 1, 1), self.mask,\n self._wid, self._hei)\n elif num == 3:\n self.changeColor(self.operSobel(self.mask, 1, 1), self.mask,\n self._wid, self._hei)\n\n def mainInOper(self, pict, num):\n self.image = pict\n self.width = self.image.size[0]\n self.height = self.image.size[1]\n self.mask = []\n self.picture = []\n for j in range(3):\n mask2 = []\n for i in range(3):\n mask2.append(0)\n self.mask.append(mask2)\n for j in range(self.width):\n picture2 = []\n for i in range(self.height):\n picture2.append(0)\n self.picture.append(picture2)\n if self.width % 3 != 0 and self.height % 3 != 0:\n self.imag = self.image.crop((0, 0, self.width - self.width % 3,\n self.height - self.height % 3))\n elif self.width % 3 != 0:\n self.imag = self.image.crop((0, 0, self.width - self.width % 3,\n self.height))\n elif self.height % 3 != 0:\n self.imag = self.image.crop((0, 0, self.width, self.height - \n self.height % 3))\n else:\n self.imag = self.image\n self.draw = Draw(self.imag)\n self.width = self.imag.size[0]\n self.height = self.imag.size[1]\n self.pix = self.imag.load()\n print(self.imag.size)\n self._hei = 0\n self._wid = 0\n self._i_wid = 0\n while self._wid < self.width:\n self._j_hei = 0\n while self._hei < self.height and self._j_hei < 3:\n self.mask[self._i_wid][self._j_hei] = self.brightness(self.\n _wid, self._hei)\n self._j_hei += 1\n self._hei += 1\n if self._i_wid == 2:\n if self._hei == self.height:\n self.choiceOper(num)\n self._hei = 0\n self._i_wid = 0\n self._wid += 1\n else:\n self.choiceOper(num)\n self._i_wid = 0\n self._wid -= 2\n else:\n self._hei -= 3\n self._i_wid += 1\n self._wid += 1\n if self._hei == self.height:\n self._hei = 0\n for i in range(self.width):\n for j in range(self.height):\n self.draw.point((i, j), (int(self.picture[i][j]), int(self.\n picture[i][j]), int(self.picture[i][j])))\n return self.imag\n",
"<import token>\n\n\nclass Oper(object):\n\n def brightness(self, x, y):\n R, G, B = self.pix[x, y]\n return sum([R, G, B]) // 3\n\n def changeColor(self, alpha, maska, x, y):\n for i in [x - 2, x - 1, x]:\n for j in [y - 3, y - 2, y - 1]:\n self.picture[i][j] = alpha\n\n def operRoberts(self, matrix, x, y):\n Gx = matrix[x + 1][y + 1] - matrix[x][y]\n Gy = matrix[x + 1][y] - matrix[x][y + 1]\n G = mh.fabs(Gx) + mh.fabs(Gy)\n return G\n\n def operSobel(self, matrix, x, y):\n Gx = matrix[x + 1][y - 1] + 2 * matrix[x + 1][y] + matrix[x + 1][y + 1\n ] - (matrix[x - 1][y - 1] + 2 * matrix[x - 1][y] + matrix[x - 1\n ][y + 1])\n Gy = matrix[x - 1][y + 1] + 2 * matrix[x][y + 1] + matrix[x + 1][y + 1\n ] - (matrix[x - 1][y - 1] + 2 * matrix[x][y - 1] + matrix[x + 1\n ][y - 1])\n G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))\n return G\n\n def operLaplas(self, matrix, x, y):\n Gx = 4 * matrix[x][y] - matrix[x - 1][y] - matrix[x][y - 1] - matrix[x\n ][y + 1] - matrix[x + 1][y]\n Gy = 8 * matrix[x][y] - matrix[x - 1][y - 1] - matrix[x - 1][y\n ] - matrix[x - 1][y + 1] - matrix[x][y - 1] - matrix[x][y + 1\n ] - matrix[x + 1][y - 1] - matrix[x + 1][y] - matrix[x + 1][y + 1]\n G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))\n return G\n <function token>\n\n def mainInOper(self, pict, num):\n self.image = pict\n self.width = self.image.size[0]\n self.height = self.image.size[1]\n self.mask = []\n self.picture = []\n for j in range(3):\n mask2 = []\n for i in range(3):\n mask2.append(0)\n self.mask.append(mask2)\n for j in range(self.width):\n picture2 = []\n for i in range(self.height):\n picture2.append(0)\n self.picture.append(picture2)\n if self.width % 3 != 0 and self.height % 3 != 0:\n self.imag = self.image.crop((0, 0, self.width - self.width % 3,\n self.height - self.height % 3))\n elif self.width % 3 != 0:\n self.imag = self.image.crop((0, 0, self.width - self.width % 3,\n self.height))\n elif self.height % 3 != 0:\n self.imag = self.image.crop((0, 0, self.width, self.height - \n self.height % 3))\n else:\n self.imag = self.image\n self.draw = Draw(self.imag)\n self.width = self.imag.size[0]\n self.height = self.imag.size[1]\n self.pix = self.imag.load()\n print(self.imag.size)\n self._hei = 0\n self._wid = 0\n self._i_wid = 0\n while self._wid < self.width:\n self._j_hei = 0\n while self._hei < self.height and self._j_hei < 3:\n self.mask[self._i_wid][self._j_hei] = self.brightness(self.\n _wid, self._hei)\n self._j_hei += 1\n self._hei += 1\n if self._i_wid == 2:\n if self._hei == self.height:\n self.choiceOper(num)\n self._hei = 0\n self._i_wid = 0\n self._wid += 1\n else:\n self.choiceOper(num)\n self._i_wid = 0\n self._wid -= 2\n else:\n self._hei -= 3\n self._i_wid += 1\n self._wid += 1\n if self._hei == self.height:\n self._hei = 0\n for i in range(self.width):\n for j in range(self.height):\n self.draw.point((i, j), (int(self.picture[i][j]), int(self.\n picture[i][j]), int(self.picture[i][j])))\n return self.imag\n",
"<import token>\n\n\nclass Oper(object):\n <function token>\n\n def changeColor(self, alpha, maska, x, y):\n for i in [x - 2, x - 1, x]:\n for j in [y - 3, y - 2, y - 1]:\n self.picture[i][j] = alpha\n\n def operRoberts(self, matrix, x, y):\n Gx = matrix[x + 1][y + 1] - matrix[x][y]\n Gy = matrix[x + 1][y] - matrix[x][y + 1]\n G = mh.fabs(Gx) + mh.fabs(Gy)\n return G\n\n def operSobel(self, matrix, x, y):\n Gx = matrix[x + 1][y - 1] + 2 * matrix[x + 1][y] + matrix[x + 1][y + 1\n ] - (matrix[x - 1][y - 1] + 2 * matrix[x - 1][y] + matrix[x - 1\n ][y + 1])\n Gy = matrix[x - 1][y + 1] + 2 * matrix[x][y + 1] + matrix[x + 1][y + 1\n ] - (matrix[x - 1][y - 1] + 2 * matrix[x][y - 1] + matrix[x + 1\n ][y - 1])\n G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))\n return G\n\n def operLaplas(self, matrix, x, y):\n Gx = 4 * matrix[x][y] - matrix[x - 1][y] - matrix[x][y - 1] - matrix[x\n ][y + 1] - matrix[x + 1][y]\n Gy = 8 * matrix[x][y] - matrix[x - 1][y - 1] - matrix[x - 1][y\n ] - matrix[x - 1][y + 1] - matrix[x][y - 1] - matrix[x][y + 1\n ] - matrix[x + 1][y - 1] - matrix[x + 1][y] - matrix[x + 1][y + 1]\n G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))\n return G\n <function token>\n\n def mainInOper(self, pict, num):\n self.image = pict\n self.width = self.image.size[0]\n self.height = self.image.size[1]\n self.mask = []\n self.picture = []\n for j in range(3):\n mask2 = []\n for i in range(3):\n mask2.append(0)\n self.mask.append(mask2)\n for j in range(self.width):\n picture2 = []\n for i in range(self.height):\n picture2.append(0)\n self.picture.append(picture2)\n if self.width % 3 != 0 and self.height % 3 != 0:\n self.imag = self.image.crop((0, 0, self.width - self.width % 3,\n self.height - self.height % 3))\n elif self.width % 3 != 0:\n self.imag = self.image.crop((0, 0, self.width - self.width % 3,\n self.height))\n elif self.height % 3 != 0:\n self.imag = self.image.crop((0, 0, self.width, self.height - \n self.height % 3))\n else:\n self.imag = self.image\n self.draw = Draw(self.imag)\n self.width = self.imag.size[0]\n self.height = self.imag.size[1]\n self.pix = self.imag.load()\n print(self.imag.size)\n self._hei = 0\n self._wid = 0\n self._i_wid = 0\n while self._wid < self.width:\n self._j_hei = 0\n while self._hei < self.height and self._j_hei < 3:\n self.mask[self._i_wid][self._j_hei] = self.brightness(self.\n _wid, self._hei)\n self._j_hei += 1\n self._hei += 1\n if self._i_wid == 2:\n if self._hei == self.height:\n self.choiceOper(num)\n self._hei = 0\n self._i_wid = 0\n self._wid += 1\n else:\n self.choiceOper(num)\n self._i_wid = 0\n self._wid -= 2\n else:\n self._hei -= 3\n self._i_wid += 1\n self._wid += 1\n if self._hei == self.height:\n self._hei = 0\n for i in range(self.width):\n for j in range(self.height):\n self.draw.point((i, j), (int(self.picture[i][j]), int(self.\n picture[i][j]), int(self.picture[i][j])))\n return self.imag\n",
"<import token>\n\n\nclass Oper(object):\n <function token>\n\n def changeColor(self, alpha, maska, x, y):\n for i in [x - 2, x - 1, x]:\n for j in [y - 3, y - 2, y - 1]:\n self.picture[i][j] = alpha\n\n def operRoberts(self, matrix, x, y):\n Gx = matrix[x + 1][y + 1] - matrix[x][y]\n Gy = matrix[x + 1][y] - matrix[x][y + 1]\n G = mh.fabs(Gx) + mh.fabs(Gy)\n return G\n\n def operSobel(self, matrix, x, y):\n Gx = matrix[x + 1][y - 1] + 2 * matrix[x + 1][y] + matrix[x + 1][y + 1\n ] - (matrix[x - 1][y - 1] + 2 * matrix[x - 1][y] + matrix[x - 1\n ][y + 1])\n Gy = matrix[x - 1][y + 1] + 2 * matrix[x][y + 1] + matrix[x + 1][y + 1\n ] - (matrix[x - 1][y - 1] + 2 * matrix[x][y - 1] + matrix[x + 1\n ][y - 1])\n G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))\n return G\n\n def operLaplas(self, matrix, x, y):\n Gx = 4 * matrix[x][y] - matrix[x - 1][y] - matrix[x][y - 1] - matrix[x\n ][y + 1] - matrix[x + 1][y]\n Gy = 8 * matrix[x][y] - matrix[x - 1][y - 1] - matrix[x - 1][y\n ] - matrix[x - 1][y + 1] - matrix[x][y - 1] - matrix[x][y + 1\n ] - matrix[x + 1][y - 1] - matrix[x + 1][y] - matrix[x + 1][y + 1]\n G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))\n return G\n <function token>\n <function token>\n",
"<import token>\n\n\nclass Oper(object):\n <function token>\n\n def changeColor(self, alpha, maska, x, y):\n for i in [x - 2, x - 1, x]:\n for j in [y - 3, y - 2, y - 1]:\n self.picture[i][j] = alpha\n\n def operRoberts(self, matrix, x, y):\n Gx = matrix[x + 1][y + 1] - matrix[x][y]\n Gy = matrix[x + 1][y] - matrix[x][y + 1]\n G = mh.fabs(Gx) + mh.fabs(Gy)\n return G\n <function token>\n\n def operLaplas(self, matrix, x, y):\n Gx = 4 * matrix[x][y] - matrix[x - 1][y] - matrix[x][y - 1] - matrix[x\n ][y + 1] - matrix[x + 1][y]\n Gy = 8 * matrix[x][y] - matrix[x - 1][y - 1] - matrix[x - 1][y\n ] - matrix[x - 1][y + 1] - matrix[x][y - 1] - matrix[x][y + 1\n ] - matrix[x + 1][y - 1] - matrix[x + 1][y] - matrix[x + 1][y + 1]\n G = mh.sqrt(sum([Gx ** 2, Gy ** 2]))\n return G\n <function token>\n <function token>\n",
"<import token>\n\n\nclass Oper(object):\n <function token>\n\n def changeColor(self, alpha, maska, x, y):\n for i in [x - 2, x - 1, x]:\n for j in [y - 3, y - 2, y - 1]:\n self.picture[i][j] = alpha\n\n def operRoberts(self, matrix, x, y):\n Gx = matrix[x + 1][y + 1] - matrix[x][y]\n Gy = matrix[x + 1][y] - matrix[x][y + 1]\n G = mh.fabs(Gx) + mh.fabs(Gy)\n return G\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass Oper(object):\n <function token>\n <function token>\n\n def operRoberts(self, matrix, x, y):\n Gx = matrix[x + 1][y + 1] - matrix[x][y]\n Gy = matrix[x + 1][y] - matrix[x][y + 1]\n G = mh.fabs(Gx) + mh.fabs(Gy)\n return G\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass Oper(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,829 |
28cb8a214669ce49a7050b09c6e6eb7757cc0e0f
|
# --------------------------- FUNCIONES --------------------------
# declara la funcion, despues de def va el nombre de la función
# debe de estar identado despues de :, es decir 4 espacios
# ----------------PROGRAMA: devuelve las dos primeras y dos últimas letras de una palabra--------------
def mix(pal):
if len(pal) > 4:
mix1 = pal [0:2] + pal [-2:]
print(mix1)
if len(pal) <= 4:
print('invalid input')
mix(input('Ingrese una palabra: '))
# ------------------------PROGRAMA: devuelve el año nacimiento y cumpleaños # 100---------------------
def age_hundred():
name = input('Escribe tu nombre: ')
age = int(input('escribe tu edad: '))
year = 2019 + (100 - age)
birth = 2019 - age
print('\n'f'Hola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}')
age_hundred()
#--------------PROGRAMA: devuelve los números pares y dice cuantos hay en una lista----------------------
#lista de números
nums = input('Ingresa los números separados por comas:')
#convetir el input en una lista
lista = list(int(n) for n in nums.split(','))
#iniciar el conteo
count, count2 = 0, 0
#describir las condiciones de pares e impares
for n in lista:
if n %2 == 0:
count += 1
print(n)
else:
count += 1
print(n)
print(f'Tenemos {count} números pares y {count2} números impares')
#------------------- SEGUNDA OPCION DE CODIGO----------------
nums = input('Ingresa los números separados por comas:')
lista = list(int(n) for n in nums.split(','))
impar =[]
par = []
for i in lista:
if i %2 == 0:
par.append(i)
else:
impar.append(i)
print ('la lista de numeros pares es: ', par)
print ('La lista de numeros impares es: ', impar)
#-------------------- FUNCION : mayor entre tres números------------------------
def mayor_num(num1,num2,num3):
if num1 >= num2 and num1 >= num3:
return num1
if num2 >= num3 and num2>=num3 :
return num2
else:
return num3
print(mayor_num(100,34,5))
#-------------------- FUNCION : reemplazar vocales para una determinada frase------------------------
def encriptar(frase,caracter):
encriptada ='' # variable vacia que crea la frase dada, recordar que un string no es modificable
# se mete a frase y si es consonante se va a else y me guarda la letra en encriptada, si es vocal se va a if y me escribe
#lo que llevo en encriptada y me anexa una x.
for letra in frase:
if letra.lower() in 'aeiouáéíúó':
if letra.isupper():
encriptada = encriptada + caracter.upper()
else:
encriptada = encriptada + caracter
else:
encriptada = encriptada + letra
return encriptada
while True:
texto = input('Ingresa una frase:\n')
caracter_elegido = input('Elige el carácter que deseas:\n')
print(encriptar(texto, caracter_elegido))
opcion = input('''\n Ingresa (1) para encriptar otra frase o (2) para salir del programa:
>''')
if opcion == '2':
break
if opcion =='1':
print('---------o--------\n')
# no es necesario agregar el == 1 ya que como es un ciclo infinito, se tiene que si es distinto de 2 se vuelve a
# repetir, es la unica condicion que si es 2 entonces se cierre
#------------------- FRASE AL REVES -------------------------
def reverse():
user=input('type anything here')
b = user.split()
c = b[::-1] #se usa [inicio fin y salto] si el salto es engativo se recorre hacia atras, : significa que es desde inicio a fin
d = " ".join(c)
print(d)
#------------------- DUPLICADOS -------------------------
# Write a function that takes a list and returns a new list that contains
# all the elements of the first list minus duplicates.
def lista_duplicate():
lista = input('ingresa una lista de cosas:').split()
print(lista)
y = []
for letra in lista:
if letra not in y:
y.append(letra)
print(y)
#------------------- PASSWORD LENGHT GENERATOR -------------------------
# generate a password with length "passlen" with no duplicate characters in the password
import string
import random
def pw_gen(size = 8, chars=string.ascii_letters + string.digits + string.punctuation):
return ''.join(random.choice(chars) for _ in range(size))
#print(pw_gen(int(input('How many characters in your password?'))))
def pwd():
pwd = ""
count = 0
length = int(input("How many characters would you like in your password? "))
while count != length: # significa != no igual, mientras count sea distinto a length
upper = [random.choice(string.ascii_uppercase)]
lower = [random.choice(string.ascii_lowercase)]
num = [random.choice(string.digits)]
symbol = [random.choice(string.punctuation)]
everything = upper + lower + num + symbol
pwd += random.choice(everything)
count += 1
continue
if count == length:
print(pwd)
#------------------- PASSWORD LENGHT GENERATOR -------------------------
# tells if a number is prime and returns the divisor list
def prime_number():
# range function only works with integers, we import the module numpy to work with floats range
# the function arange (start, stop, step)
import numpy
# para simplicidad trabajaremos con enteros
number = int(input('enter your number: '))
listRange = list(range(1, number + 1))
divisorList = []
for numb in listRange:
if number % numb == 0:
divisorList.append(numb)
if len(divisorList) == 2:
print(f'{number} is prime, only has {divisorList} as divisors')
else:
print(f'{number} is not a prime number and the divisors are {divisorList}')
prime_number()
# ---------------------- BUSQUEDA BINARIA----------------------------------------------
lista = [0, 88, 72, 21, 14, 16, 90, 35, 47, 6, 68, 12, 10, 54, 41]
lista.sort() # organiza la lista de menor a mayor
# buscar el punto medioo
# comprobar que el punto medio es el numero buscado
# numero menor, disminuimos el final
# numero menor aumentamos el inicio
def busqueda_binaria(valor):
inicio = 0
final = len(lista)- 1 # el ultimo indice, acordarse que la lista comienza en 0
while inicio<=final:
puntero = (inicio + final)//2 # // parte entera de la division
if valor ==lista[puntero]:
return puntero
elif valor > lista[puntero]:
inicio = puntero + 1
else:
final = puntero - 1
return None
def buscar_valor(valor):
res_busqueda = busqueda_binaria(valor)
if res_busqueda is None:
return f'el valor {valor} no se encontro '
else:
return f'el numero {valor} se encuentra en la posicion {res_busqueda}'
while True:
respuesta = int(input('escribe el numero que deseas buscar: '))
print(buscar_valor(respuesta))
opcion = input('''\n Ingresa (1) buscar otro numero o (2) para salir del programa:
>''')
if opcion == '2':
break
if opcion =='1':
print('---------o--------\n')
#------------------------------- TIC TAC TOE SQUARE ------------------------
def print_lines(a):
print(" ---" * a)
print("| " * (a + 1))
def print_lines2(a):
print(" ---" * a)
def draw_board(x):
y=x
while(y):
if x>=1:
print_lines(y)
x-=1
elif x==0:
print_lines2(y)
break
print("hello user!!!! welcome ")
x=int(input("Enter the size of board u want to draw---> "))
draw_board(x)
|
[
"# --------------------------- FUNCIONES --------------------------\n\n# declara la funcion, despues de def va el nombre de la función\n# debe de estar identado despues de :, es decir 4 espacios\n\n# ----------------PROGRAMA: devuelve las dos primeras y dos últimas letras de una palabra--------------\n\n\ndef mix(pal):\n if len(pal) > 4:\n mix1 = pal [0:2] + pal [-2:]\n print(mix1)\n\n if len(pal) <= 4:\n print('invalid input')\n\nmix(input('Ingrese una palabra: '))\n\n\n# ------------------------PROGRAMA: devuelve el año nacimiento y cumpleaños # 100---------------------\n\ndef age_hundred():\n name = input('Escribe tu nombre: ')\n age = int(input('escribe tu edad: '))\n year = 2019 + (100 - age)\n birth = 2019 - age\n\n print('\\n'f'Hola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}')\n\nage_hundred()\n\n\n\n#--------------PROGRAMA: devuelve los números pares y dice cuantos hay en una lista----------------------\n\n\n#lista de números\nnums = input('Ingresa los números separados por comas:')\n\n\n#convetir el input en una lista\nlista = list(int(n) for n in nums.split(','))\n\n#iniciar el conteo\ncount, count2 = 0, 0\n\n#describir las condiciones de pares e impares\nfor n in lista:\n if n %2 == 0:\n count += 1\n print(n)\n else:\n count += 1\n print(n)\n\nprint(f'Tenemos {count} números pares y {count2} números impares')\n\n#------------------- SEGUNDA OPCION DE CODIGO----------------\n\nnums = input('Ingresa los números separados por comas:')\nlista = list(int(n) for n in nums.split(','))\n\nimpar =[]\npar = []\n\nfor i in lista:\n if i %2 == 0:\n par.append(i)\n else:\n impar.append(i)\n\nprint ('la lista de numeros pares es: ', par)\nprint ('La lista de numeros impares es: ', impar)\n\n#-------------------- FUNCION : mayor entre tres números------------------------\n\ndef mayor_num(num1,num2,num3):\n if num1 >= num2 and num1 >= num3:\n return num1\n if num2 >= num3 and num2>=num3 :\n return num2\n else:\n return num3\n\nprint(mayor_num(100,34,5))\n\n#-------------------- FUNCION : reemplazar vocales para una determinada frase------------------------\n\n\ndef encriptar(frase,caracter):\n encriptada ='' # variable vacia que crea la frase dada, recordar que un string no es modificable\n\n # se mete a frase y si es consonante se va a else y me guarda la letra en encriptada, si es vocal se va a if y me escribe\n #lo que llevo en encriptada y me anexa una x.\n\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\nwhile True:\n texto = input('Ingresa una frase:\\n')\n caracter_elegido = input('Elige el carácter que deseas:\\n')\n print(encriptar(texto, caracter_elegido))\n opcion = input('''\\n Ingresa (1) para encriptar otra frase o (2) para salir del programa: \n >''')\n if opcion == '2':\n break\n if opcion =='1':\n print('---------o--------\\n')\n\n# no es necesario agregar el == 1 ya que como es un ciclo infinito, se tiene que si es distinto de 2 se vuelve a\n# repetir, es la unica condicion que si es 2 entonces se cierre\n\n#------------------- FRASE AL REVES -------------------------\n\ndef reverse():\n user=input('type anything here')\n b = user.split()\n c = b[::-1] #se usa [inicio fin y salto] si el salto es engativo se recorre hacia atras, : significa que es desde inicio a fin \n d = \" \".join(c)\n print(d)\n\n#------------------- DUPLICADOS -------------------------\n\n# Write a function that takes a list and returns a new list that contains\n# all the elements of the first list minus duplicates.\ndef lista_duplicate():\n lista = input('ingresa una lista de cosas:').split()\n print(lista)\n y = []\n for letra in lista:\n if letra not in y:\n y.append(letra)\n print(y)\n\n#------------------- PASSWORD LENGHT GENERATOR -------------------------\n# generate a password with length \"passlen\" with no duplicate characters in the password\n\nimport string\nimport random\ndef pw_gen(size = 8, chars=string.ascii_letters + string.digits + string.punctuation):\n\n\treturn ''.join(random.choice(chars) for _ in range(size))\n\n#print(pw_gen(int(input('How many characters in your password?'))))\n\ndef pwd():\n pwd = \"\"\n count = 0\n length = int(input(\"How many characters would you like in your password? \"))\n\n while count != length: # significa != no igual, mientras count sea distinto a length\n upper = [random.choice(string.ascii_uppercase)]\n lower = [random.choice(string.ascii_lowercase)]\n num = [random.choice(string.digits)]\n symbol = [random.choice(string.punctuation)]\n everything = upper + lower + num + symbol\n pwd += random.choice(everything)\n count += 1\n continue\n if count == length:\n print(pwd)\n\n#------------------- PASSWORD LENGHT GENERATOR -------------------------\n# tells if a number is prime and returns the divisor list\n\ndef prime_number():\n\n # range function only works with integers, we import the module numpy to work with floats range\n # the function arange (start, stop, step)\n import numpy\n # para simplicidad trabajaremos con enteros\n\n number = int(input('enter your number: '))\n listRange = list(range(1, number + 1))\n\n divisorList = []\n for numb in listRange:\n if number % numb == 0:\n divisorList.append(numb)\n\n if len(divisorList) == 2:\n print(f'{number} is prime, only has {divisorList} as divisors')\n else:\n print(f'{number} is not a prime number and the divisors are {divisorList}')\n\nprime_number()\n\n# ---------------------- BUSQUEDA BINARIA----------------------------------------------\n\nlista = [0, 88, 72, 21, 14, 16, 90, 35, 47, 6, 68, 12, 10, 54, 41]\nlista.sort() # organiza la lista de menor a mayor\n\n# buscar el punto medioo\n# comprobar que el punto medio es el numero buscado\n# numero menor, disminuimos el final\n# numero menor aumentamos el inicio\n\n\ndef busqueda_binaria(valor):\n inicio = 0\n final = len(lista)- 1 # el ultimo indice, acordarse que la lista comienza en 0\n\n while inicio<=final:\n puntero = (inicio + final)//2 # // parte entera de la division\n if valor ==lista[puntero]:\n return puntero\n elif valor > lista[puntero]:\n inicio = puntero + 1\n else:\n final = puntero - 1\n return None\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\nwhile True:\n respuesta = int(input('escribe el numero que deseas buscar: '))\n print(buscar_valor(respuesta))\n opcion = input('''\\n Ingresa (1) buscar otro numero o (2) para salir del programa: \n >''')\n if opcion == '2':\n break\n if opcion =='1':\n print('---------o--------\\n')\n\n#------------------------------- TIC TAC TOE SQUARE ------------------------\ndef print_lines(a):\n print(\" ---\" * a)\n print(\"| \" * (a + 1))\ndef print_lines2(a):\n print(\" ---\" * a)\ndef draw_board(x):\n y=x\n while(y):\n if x>=1:\n print_lines(y)\n x-=1\n elif x==0:\n print_lines2(y)\n break\n\nprint(\"hello user!!!! welcome \")\nx=int(input(\"Enter the size of board u want to draw---> \"))\ndraw_board(x)\n\n\n\n\n",
"def mix(pal):\n if len(pal) > 4:\n mix1 = pal[0:2] + pal[-2:]\n print(mix1)\n if len(pal) <= 4:\n print('invalid input')\n\n\nmix(input('Ingrese una palabra: '))\n\n\ndef age_hundred():\n name = input('Escribe tu nombre: ')\n age = int(input('escribe tu edad: '))\n year = 2019 + (100 - age)\n birth = 2019 - age\n print(\n f'\\nHola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}'\n )\n\n\nage_hundred()\nnums = input('Ingresa los números separados por comas:')\nlista = list(int(n) for n in nums.split(','))\ncount, count2 = 0, 0\nfor n in lista:\n if n % 2 == 0:\n count += 1\n print(n)\n else:\n count += 1\n print(n)\nprint(f'Tenemos {count} números pares y {count2} números impares')\nnums = input('Ingresa los números separados por comas:')\nlista = list(int(n) for n in nums.split(','))\nimpar = []\npar = []\nfor i in lista:\n if i % 2 == 0:\n par.append(i)\n else:\n impar.append(i)\nprint('la lista de numeros pares es: ', par)\nprint('La lista de numeros impares es: ', impar)\n\n\ndef mayor_num(num1, num2, num3):\n if num1 >= num2 and num1 >= num3:\n return num1\n if num2 >= num3 and num2 >= num3:\n return num2\n else:\n return num3\n\n\nprint(mayor_num(100, 34, 5))\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\nwhile True:\n texto = input('Ingresa una frase:\\n')\n caracter_elegido = input('Elige el carácter que deseas:\\n')\n print(encriptar(texto, caracter_elegido))\n opcion = input(\n \"\"\"\n Ingresa (1) para encriptar otra frase o (2) para salir del programa: \n >\"\"\"\n )\n if opcion == '2':\n break\n if opcion == '1':\n print('---------o--------\\n')\n\n\ndef reverse():\n user = input('type anything here')\n b = user.split()\n c = b[::-1]\n d = ' '.join(c)\n print(d)\n\n\ndef lista_duplicate():\n lista = input('ingresa una lista de cosas:').split()\n print(lista)\n y = []\n for letra in lista:\n if letra not in y:\n y.append(letra)\n print(y)\n\n\nimport string\nimport random\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef pwd():\n pwd = ''\n count = 0\n length = int(input('How many characters would you like in your password? ')\n )\n while count != length:\n upper = [random.choice(string.ascii_uppercase)]\n lower = [random.choice(string.ascii_lowercase)]\n num = [random.choice(string.digits)]\n symbol = [random.choice(string.punctuation)]\n everything = upper + lower + num + symbol\n pwd += random.choice(everything)\n count += 1\n continue\n if count == length:\n print(pwd)\n\n\ndef prime_number():\n import numpy\n number = int(input('enter your number: '))\n listRange = list(range(1, number + 1))\n divisorList = []\n for numb in listRange:\n if number % numb == 0:\n divisorList.append(numb)\n if len(divisorList) == 2:\n print(f'{number} is prime, only has {divisorList} as divisors')\n else:\n print(\n f'{number} is not a prime number and the divisors are {divisorList}'\n )\n\n\nprime_number()\nlista = [0, 88, 72, 21, 14, 16, 90, 35, 47, 6, 68, 12, 10, 54, 41]\nlista.sort()\n\n\ndef busqueda_binaria(valor):\n inicio = 0\n final = len(lista) - 1\n while inicio <= final:\n puntero = (inicio + final) // 2\n if valor == lista[puntero]:\n return puntero\n elif valor > lista[puntero]:\n inicio = puntero + 1\n else:\n final = puntero - 1\n return None\n\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\nwhile True:\n respuesta = int(input('escribe el numero que deseas buscar: '))\n print(buscar_valor(respuesta))\n opcion = input(\n '\\n Ingresa (1) buscar otro numero o (2) para salir del programa: \\n >'\n )\n if opcion == '2':\n break\n if opcion == '1':\n print('---------o--------\\n')\n\n\ndef print_lines(a):\n print(' ---' * a)\n print('| ' * (a + 1))\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\ndef draw_board(x):\n y = x\n while y:\n if x >= 1:\n print_lines(y)\n x -= 1\n elif x == 0:\n print_lines2(y)\n break\n\n\nprint('hello user!!!! welcome ')\nx = int(input('Enter the size of board u want to draw---> '))\ndraw_board(x)\n",
"def mix(pal):\n if len(pal) > 4:\n mix1 = pal[0:2] + pal[-2:]\n print(mix1)\n if len(pal) <= 4:\n print('invalid input')\n\n\nmix(input('Ingrese una palabra: '))\n\n\ndef age_hundred():\n name = input('Escribe tu nombre: ')\n age = int(input('escribe tu edad: '))\n year = 2019 + (100 - age)\n birth = 2019 - age\n print(\n f'\\nHola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}'\n )\n\n\nage_hundred()\nnums = input('Ingresa los números separados por comas:')\nlista = list(int(n) for n in nums.split(','))\ncount, count2 = 0, 0\nfor n in lista:\n if n % 2 == 0:\n count += 1\n print(n)\n else:\n count += 1\n print(n)\nprint(f'Tenemos {count} números pares y {count2} números impares')\nnums = input('Ingresa los números separados por comas:')\nlista = list(int(n) for n in nums.split(','))\nimpar = []\npar = []\nfor i in lista:\n if i % 2 == 0:\n par.append(i)\n else:\n impar.append(i)\nprint('la lista de numeros pares es: ', par)\nprint('La lista de numeros impares es: ', impar)\n\n\ndef mayor_num(num1, num2, num3):\n if num1 >= num2 and num1 >= num3:\n return num1\n if num2 >= num3 and num2 >= num3:\n return num2\n else:\n return num3\n\n\nprint(mayor_num(100, 34, 5))\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\nwhile True:\n texto = input('Ingresa una frase:\\n')\n caracter_elegido = input('Elige el carácter que deseas:\\n')\n print(encriptar(texto, caracter_elegido))\n opcion = input(\n \"\"\"\n Ingresa (1) para encriptar otra frase o (2) para salir del programa: \n >\"\"\"\n )\n if opcion == '2':\n break\n if opcion == '1':\n print('---------o--------\\n')\n\n\ndef reverse():\n user = input('type anything here')\n b = user.split()\n c = b[::-1]\n d = ' '.join(c)\n print(d)\n\n\ndef lista_duplicate():\n lista = input('ingresa una lista de cosas:').split()\n print(lista)\n y = []\n for letra in lista:\n if letra not in y:\n y.append(letra)\n print(y)\n\n\n<import token>\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef pwd():\n pwd = ''\n count = 0\n length = int(input('How many characters would you like in your password? ')\n )\n while count != length:\n upper = [random.choice(string.ascii_uppercase)]\n lower = [random.choice(string.ascii_lowercase)]\n num = [random.choice(string.digits)]\n symbol = [random.choice(string.punctuation)]\n everything = upper + lower + num + symbol\n pwd += random.choice(everything)\n count += 1\n continue\n if count == length:\n print(pwd)\n\n\ndef prime_number():\n import numpy\n number = int(input('enter your number: '))\n listRange = list(range(1, number + 1))\n divisorList = []\n for numb in listRange:\n if number % numb == 0:\n divisorList.append(numb)\n if len(divisorList) == 2:\n print(f'{number} is prime, only has {divisorList} as divisors')\n else:\n print(\n f'{number} is not a prime number and the divisors are {divisorList}'\n )\n\n\nprime_number()\nlista = [0, 88, 72, 21, 14, 16, 90, 35, 47, 6, 68, 12, 10, 54, 41]\nlista.sort()\n\n\ndef busqueda_binaria(valor):\n inicio = 0\n final = len(lista) - 1\n while inicio <= final:\n puntero = (inicio + final) // 2\n if valor == lista[puntero]:\n return puntero\n elif valor > lista[puntero]:\n inicio = puntero + 1\n else:\n final = puntero - 1\n return None\n\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\nwhile True:\n respuesta = int(input('escribe el numero que deseas buscar: '))\n print(buscar_valor(respuesta))\n opcion = input(\n '\\n Ingresa (1) buscar otro numero o (2) para salir del programa: \\n >'\n )\n if opcion == '2':\n break\n if opcion == '1':\n print('---------o--------\\n')\n\n\ndef print_lines(a):\n print(' ---' * a)\n print('| ' * (a + 1))\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\ndef draw_board(x):\n y = x\n while y:\n if x >= 1:\n print_lines(y)\n x -= 1\n elif x == 0:\n print_lines2(y)\n break\n\n\nprint('hello user!!!! welcome ')\nx = int(input('Enter the size of board u want to draw---> '))\ndraw_board(x)\n",
"def mix(pal):\n if len(pal) > 4:\n mix1 = pal[0:2] + pal[-2:]\n print(mix1)\n if len(pal) <= 4:\n print('invalid input')\n\n\nmix(input('Ingrese una palabra: '))\n\n\ndef age_hundred():\n name = input('Escribe tu nombre: ')\n age = int(input('escribe tu edad: '))\n year = 2019 + (100 - age)\n birth = 2019 - age\n print(\n f'\\nHola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}'\n )\n\n\nage_hundred()\n<assignment token>\nfor n in lista:\n if n % 2 == 0:\n count += 1\n print(n)\n else:\n count += 1\n print(n)\nprint(f'Tenemos {count} números pares y {count2} números impares')\n<assignment token>\nfor i in lista:\n if i % 2 == 0:\n par.append(i)\n else:\n impar.append(i)\nprint('la lista de numeros pares es: ', par)\nprint('La lista de numeros impares es: ', impar)\n\n\ndef mayor_num(num1, num2, num3):\n if num1 >= num2 and num1 >= num3:\n return num1\n if num2 >= num3 and num2 >= num3:\n return num2\n else:\n return num3\n\n\nprint(mayor_num(100, 34, 5))\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\nwhile True:\n texto = input('Ingresa una frase:\\n')\n caracter_elegido = input('Elige el carácter que deseas:\\n')\n print(encriptar(texto, caracter_elegido))\n opcion = input(\n \"\"\"\n Ingresa (1) para encriptar otra frase o (2) para salir del programa: \n >\"\"\"\n )\n if opcion == '2':\n break\n if opcion == '1':\n print('---------o--------\\n')\n\n\ndef reverse():\n user = input('type anything here')\n b = user.split()\n c = b[::-1]\n d = ' '.join(c)\n print(d)\n\n\ndef lista_duplicate():\n lista = input('ingresa una lista de cosas:').split()\n print(lista)\n y = []\n for letra in lista:\n if letra not in y:\n y.append(letra)\n print(y)\n\n\n<import token>\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef pwd():\n pwd = ''\n count = 0\n length = int(input('How many characters would you like in your password? ')\n )\n while count != length:\n upper = [random.choice(string.ascii_uppercase)]\n lower = [random.choice(string.ascii_lowercase)]\n num = [random.choice(string.digits)]\n symbol = [random.choice(string.punctuation)]\n everything = upper + lower + num + symbol\n pwd += random.choice(everything)\n count += 1\n continue\n if count == length:\n print(pwd)\n\n\ndef prime_number():\n import numpy\n number = int(input('enter your number: '))\n listRange = list(range(1, number + 1))\n divisorList = []\n for numb in listRange:\n if number % numb == 0:\n divisorList.append(numb)\n if len(divisorList) == 2:\n print(f'{number} is prime, only has {divisorList} as divisors')\n else:\n print(\n f'{number} is not a prime number and the divisors are {divisorList}'\n )\n\n\nprime_number()\n<assignment token>\nlista.sort()\n\n\ndef busqueda_binaria(valor):\n inicio = 0\n final = len(lista) - 1\n while inicio <= final:\n puntero = (inicio + final) // 2\n if valor == lista[puntero]:\n return puntero\n elif valor > lista[puntero]:\n inicio = puntero + 1\n else:\n final = puntero - 1\n return None\n\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\nwhile True:\n respuesta = int(input('escribe el numero que deseas buscar: '))\n print(buscar_valor(respuesta))\n opcion = input(\n '\\n Ingresa (1) buscar otro numero o (2) para salir del programa: \\n >'\n )\n if opcion == '2':\n break\n if opcion == '1':\n print('---------o--------\\n')\n\n\ndef print_lines(a):\n print(' ---' * a)\n print('| ' * (a + 1))\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\ndef draw_board(x):\n y = x\n while y:\n if x >= 1:\n print_lines(y)\n x -= 1\n elif x == 0:\n print_lines2(y)\n break\n\n\nprint('hello user!!!! welcome ')\n<assignment token>\ndraw_board(x)\n",
"def mix(pal):\n if len(pal) > 4:\n mix1 = pal[0:2] + pal[-2:]\n print(mix1)\n if len(pal) <= 4:\n print('invalid input')\n\n\n<code token>\n\n\ndef age_hundred():\n name = input('Escribe tu nombre: ')\n age = int(input('escribe tu edad: '))\n year = 2019 + (100 - age)\n birth = 2019 - age\n print(\n f'\\nHola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef mayor_num(num1, num2, num3):\n if num1 >= num2 and num1 >= num3:\n return num1\n if num2 >= num3 and num2 >= num3:\n return num2\n else:\n return num3\n\n\n<code token>\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\n<code token>\n\n\ndef reverse():\n user = input('type anything here')\n b = user.split()\n c = b[::-1]\n d = ' '.join(c)\n print(d)\n\n\ndef lista_duplicate():\n lista = input('ingresa una lista de cosas:').split()\n print(lista)\n y = []\n for letra in lista:\n if letra not in y:\n y.append(letra)\n print(y)\n\n\n<import token>\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef pwd():\n pwd = ''\n count = 0\n length = int(input('How many characters would you like in your password? ')\n )\n while count != length:\n upper = [random.choice(string.ascii_uppercase)]\n lower = [random.choice(string.ascii_lowercase)]\n num = [random.choice(string.digits)]\n symbol = [random.choice(string.punctuation)]\n everything = upper + lower + num + symbol\n pwd += random.choice(everything)\n count += 1\n continue\n if count == length:\n print(pwd)\n\n\ndef prime_number():\n import numpy\n number = int(input('enter your number: '))\n listRange = list(range(1, number + 1))\n divisorList = []\n for numb in listRange:\n if number % numb == 0:\n divisorList.append(numb)\n if len(divisorList) == 2:\n print(f'{number} is prime, only has {divisorList} as divisors')\n else:\n print(\n f'{number} is not a prime number and the divisors are {divisorList}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n\n\ndef busqueda_binaria(valor):\n inicio = 0\n final = len(lista) - 1\n while inicio <= final:\n puntero = (inicio + final) // 2\n if valor == lista[puntero]:\n return puntero\n elif valor > lista[puntero]:\n inicio = puntero + 1\n else:\n final = puntero - 1\n return None\n\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\n<code token>\n\n\ndef print_lines(a):\n print(' ---' * a)\n print('| ' * (a + 1))\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\ndef draw_board(x):\n y = x\n while y:\n if x >= 1:\n print_lines(y)\n x -= 1\n elif x == 0:\n print_lines2(y)\n break\n\n\n<code token>\n<assignment token>\n<code token>\n",
"def mix(pal):\n if len(pal) > 4:\n mix1 = pal[0:2] + pal[-2:]\n print(mix1)\n if len(pal) <= 4:\n print('invalid input')\n\n\n<code token>\n\n\ndef age_hundred():\n name = input('Escribe tu nombre: ')\n age = int(input('escribe tu edad: '))\n year = 2019 + (100 - age)\n birth = 2019 - age\n print(\n f'\\nHola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef mayor_num(num1, num2, num3):\n if num1 >= num2 and num1 >= num3:\n return num1\n if num2 >= num3 and num2 >= num3:\n return num2\n else:\n return num3\n\n\n<code token>\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\n<code token>\n\n\ndef reverse():\n user = input('type anything here')\n b = user.split()\n c = b[::-1]\n d = ' '.join(c)\n print(d)\n\n\ndef lista_duplicate():\n lista = input('ingresa una lista de cosas:').split()\n print(lista)\n y = []\n for letra in lista:\n if letra not in y:\n y.append(letra)\n print(y)\n\n\n<import token>\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef pwd():\n pwd = ''\n count = 0\n length = int(input('How many characters would you like in your password? ')\n )\n while count != length:\n upper = [random.choice(string.ascii_uppercase)]\n lower = [random.choice(string.ascii_lowercase)]\n num = [random.choice(string.digits)]\n symbol = [random.choice(string.punctuation)]\n everything = upper + lower + num + symbol\n pwd += random.choice(everything)\n count += 1\n continue\n if count == length:\n print(pwd)\n\n\ndef prime_number():\n import numpy\n number = int(input('enter your number: '))\n listRange = list(range(1, number + 1))\n divisorList = []\n for numb in listRange:\n if number % numb == 0:\n divisorList.append(numb)\n if len(divisorList) == 2:\n print(f'{number} is prime, only has {divisorList} as divisors')\n else:\n print(\n f'{number} is not a prime number and the divisors are {divisorList}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n\n\ndef busqueda_binaria(valor):\n inicio = 0\n final = len(lista) - 1\n while inicio <= final:\n puntero = (inicio + final) // 2\n if valor == lista[puntero]:\n return puntero\n elif valor > lista[puntero]:\n inicio = puntero + 1\n else:\n final = puntero - 1\n return None\n\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\n<code token>\n\n\ndef print_lines(a):\n print(' ---' * a)\n print('| ' * (a + 1))\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"def mix(pal):\n if len(pal) > 4:\n mix1 = pal[0:2] + pal[-2:]\n print(mix1)\n if len(pal) <= 4:\n print('invalid input')\n\n\n<code token>\n\n\ndef age_hundred():\n name = input('Escribe tu nombre: ')\n age = int(input('escribe tu edad: '))\n year = 2019 + (100 - age)\n birth = 2019 - age\n print(\n f'\\nHola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef mayor_num(num1, num2, num3):\n if num1 >= num2 and num1 >= num3:\n return num1\n if num2 >= num3 and num2 >= num3:\n return num2\n else:\n return num3\n\n\n<code token>\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\n<code token>\n\n\ndef reverse():\n user = input('type anything here')\n b = user.split()\n c = b[::-1]\n d = ' '.join(c)\n print(d)\n\n\ndef lista_duplicate():\n lista = input('ingresa una lista de cosas:').split()\n print(lista)\n y = []\n for letra in lista:\n if letra not in y:\n y.append(letra)\n print(y)\n\n\n<import token>\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef pwd():\n pwd = ''\n count = 0\n length = int(input('How many characters would you like in your password? ')\n )\n while count != length:\n upper = [random.choice(string.ascii_uppercase)]\n lower = [random.choice(string.ascii_lowercase)]\n num = [random.choice(string.digits)]\n symbol = [random.choice(string.punctuation)]\n everything = upper + lower + num + symbol\n pwd += random.choice(everything)\n count += 1\n continue\n if count == length:\n print(pwd)\n\n\ndef prime_number():\n import numpy\n number = int(input('enter your number: '))\n listRange = list(range(1, number + 1))\n divisorList = []\n for numb in listRange:\n if number % numb == 0:\n divisorList.append(numb)\n if len(divisorList) == 2:\n print(f'{number} is prime, only has {divisorList} as divisors')\n else:\n print(\n f'{number} is not a prime number and the divisors are {divisorList}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n\n\ndef busqueda_binaria(valor):\n inicio = 0\n final = len(lista) - 1\n while inicio <= final:\n puntero = (inicio + final) // 2\n if valor == lista[puntero]:\n return puntero\n elif valor > lista[puntero]:\n inicio = puntero + 1\n else:\n final = puntero - 1\n return None\n\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\n<code token>\n<function token>\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"def mix(pal):\n if len(pal) > 4:\n mix1 = pal[0:2] + pal[-2:]\n print(mix1)\n if len(pal) <= 4:\n print('invalid input')\n\n\n<code token>\n\n\ndef age_hundred():\n name = input('Escribe tu nombre: ')\n age = int(input('escribe tu edad: '))\n year = 2019 + (100 - age)\n birth = 2019 - age\n print(\n f'\\nHola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\n<code token>\n\n\ndef reverse():\n user = input('type anything here')\n b = user.split()\n c = b[::-1]\n d = ' '.join(c)\n print(d)\n\n\ndef lista_duplicate():\n lista = input('ingresa una lista de cosas:').split()\n print(lista)\n y = []\n for letra in lista:\n if letra not in y:\n y.append(letra)\n print(y)\n\n\n<import token>\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef pwd():\n pwd = ''\n count = 0\n length = int(input('How many characters would you like in your password? ')\n )\n while count != length:\n upper = [random.choice(string.ascii_uppercase)]\n lower = [random.choice(string.ascii_lowercase)]\n num = [random.choice(string.digits)]\n symbol = [random.choice(string.punctuation)]\n everything = upper + lower + num + symbol\n pwd += random.choice(everything)\n count += 1\n continue\n if count == length:\n print(pwd)\n\n\ndef prime_number():\n import numpy\n number = int(input('enter your number: '))\n listRange = list(range(1, number + 1))\n divisorList = []\n for numb in listRange:\n if number % numb == 0:\n divisorList.append(numb)\n if len(divisorList) == 2:\n print(f'{number} is prime, only has {divisorList} as divisors')\n else:\n print(\n f'{number} is not a prime number and the divisors are {divisorList}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n\n\ndef busqueda_binaria(valor):\n inicio = 0\n final = len(lista) - 1\n while inicio <= final:\n puntero = (inicio + final) // 2\n if valor == lista[puntero]:\n return puntero\n elif valor > lista[puntero]:\n inicio = puntero + 1\n else:\n final = puntero - 1\n return None\n\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\n<code token>\n<function token>\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"def mix(pal):\n if len(pal) > 4:\n mix1 = pal[0:2] + pal[-2:]\n print(mix1)\n if len(pal) <= 4:\n print('invalid input')\n\n\n<code token>\n\n\ndef age_hundred():\n name = input('Escribe tu nombre: ')\n age = int(input('escribe tu edad: '))\n year = 2019 + (100 - age)\n birth = 2019 - age\n print(\n f'\\nHola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\n<code token>\n\n\ndef reverse():\n user = input('type anything here')\n b = user.split()\n c = b[::-1]\n d = ' '.join(c)\n print(d)\n\n\ndef lista_duplicate():\n lista = input('ingresa una lista de cosas:').split()\n print(lista)\n y = []\n for letra in lista:\n if letra not in y:\n y.append(letra)\n print(y)\n\n\n<import token>\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef pwd():\n pwd = ''\n count = 0\n length = int(input('How many characters would you like in your password? ')\n )\n while count != length:\n upper = [random.choice(string.ascii_uppercase)]\n lower = [random.choice(string.ascii_lowercase)]\n num = [random.choice(string.digits)]\n symbol = [random.choice(string.punctuation)]\n everything = upper + lower + num + symbol\n pwd += random.choice(everything)\n count += 1\n continue\n if count == length:\n print(pwd)\n\n\ndef prime_number():\n import numpy\n number = int(input('enter your number: '))\n listRange = list(range(1, number + 1))\n divisorList = []\n for numb in listRange:\n if number % numb == 0:\n divisorList.append(numb)\n if len(divisorList) == 2:\n print(f'{number} is prime, only has {divisorList} as divisors')\n else:\n print(\n f'{number} is not a prime number and the divisors are {divisorList}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\n<code token>\n<function token>\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"def mix(pal):\n if len(pal) > 4:\n mix1 = pal[0:2] + pal[-2:]\n print(mix1)\n if len(pal) <= 4:\n print('invalid input')\n\n\n<code token>\n\n\ndef age_hundred():\n name = input('Escribe tu nombre: ')\n age = int(input('escribe tu edad: '))\n year = 2019 + (100 - age)\n birth = 2019 - age\n print(\n f'\\nHola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\n<code token>\n<function token>\n\n\ndef lista_duplicate():\n lista = input('ingresa una lista de cosas:').split()\n print(lista)\n y = []\n for letra in lista:\n if letra not in y:\n y.append(letra)\n print(y)\n\n\n<import token>\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef pwd():\n pwd = ''\n count = 0\n length = int(input('How many characters would you like in your password? ')\n )\n while count != length:\n upper = [random.choice(string.ascii_uppercase)]\n lower = [random.choice(string.ascii_lowercase)]\n num = [random.choice(string.digits)]\n symbol = [random.choice(string.punctuation)]\n everything = upper + lower + num + symbol\n pwd += random.choice(everything)\n count += 1\n continue\n if count == length:\n print(pwd)\n\n\ndef prime_number():\n import numpy\n number = int(input('enter your number: '))\n listRange = list(range(1, number + 1))\n divisorList = []\n for numb in listRange:\n if number % numb == 0:\n divisorList.append(numb)\n if len(divisorList) == 2:\n print(f'{number} is prime, only has {divisorList} as divisors')\n else:\n print(\n f'{number} is not a prime number and the divisors are {divisorList}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\n<code token>\n<function token>\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"def mix(pal):\n if len(pal) > 4:\n mix1 = pal[0:2] + pal[-2:]\n print(mix1)\n if len(pal) <= 4:\n print('invalid input')\n\n\n<code token>\n\n\ndef age_hundred():\n name = input('Escribe tu nombre: ')\n age = int(input('escribe tu edad: '))\n year = 2019 + (100 - age)\n birth = 2019 - age\n print(\n f'\\nHola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\n<code token>\n<function token>\n\n\ndef lista_duplicate():\n lista = input('ingresa una lista de cosas:').split()\n print(lista)\n y = []\n for letra in lista:\n if letra not in y:\n y.append(letra)\n print(y)\n\n\n<import token>\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef pwd():\n pwd = ''\n count = 0\n length = int(input('How many characters would you like in your password? ')\n )\n while count != length:\n upper = [random.choice(string.ascii_uppercase)]\n lower = [random.choice(string.ascii_lowercase)]\n num = [random.choice(string.digits)]\n symbol = [random.choice(string.punctuation)]\n everything = upper + lower + num + symbol\n pwd += random.choice(everything)\n count += 1\n continue\n if count == length:\n print(pwd)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\n<code token>\n<function token>\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"def mix(pal):\n if len(pal) > 4:\n mix1 = pal[0:2] + pal[-2:]\n print(mix1)\n if len(pal) <= 4:\n print('invalid input')\n\n\n<code token>\n\n\ndef age_hundred():\n name = input('Escribe tu nombre: ')\n age = int(input('escribe tu edad: '))\n year = 2019 + (100 - age)\n birth = 2019 - age\n print(\n f'\\nHola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\n<code token>\n<function token>\n<function token>\n<import token>\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef pwd():\n pwd = ''\n count = 0\n length = int(input('How many characters would you like in your password? ')\n )\n while count != length:\n upper = [random.choice(string.ascii_uppercase)]\n lower = [random.choice(string.ascii_lowercase)]\n num = [random.choice(string.digits)]\n symbol = [random.choice(string.punctuation)]\n everything = upper + lower + num + symbol\n pwd += random.choice(everything)\n count += 1\n continue\n if count == length:\n print(pwd)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\n<code token>\n<function token>\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n<code token>\n\n\ndef age_hundred():\n name = input('Escribe tu nombre: ')\n age = int(input('escribe tu edad: '))\n year = 2019 + (100 - age)\n birth = 2019 - age\n print(\n f'\\nHola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\n<code token>\n<function token>\n<function token>\n<import token>\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef pwd():\n pwd = ''\n count = 0\n length = int(input('How many characters would you like in your password? ')\n )\n while count != length:\n upper = [random.choice(string.ascii_uppercase)]\n lower = [random.choice(string.ascii_lowercase)]\n num = [random.choice(string.digits)]\n symbol = [random.choice(string.punctuation)]\n everything = upper + lower + num + symbol\n pwd += random.choice(everything)\n count += 1\n continue\n if count == length:\n print(pwd)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\n<code token>\n<function token>\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n<code token>\n\n\ndef age_hundred():\n name = input('Escribe tu nombre: ')\n age = int(input('escribe tu edad: '))\n year = 2019 + (100 - age)\n birth = 2019 - age\n print(\n f'\\nHola {name}, naciste en {birth}, tu cumpleaños 100 sera en el año {year}'\n )\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\n<code token>\n<function token>\n<function token>\n<import token>\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\n<code token>\n<function token>\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n<code token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\n<code token>\n<function token>\n<function token>\n<import token>\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef buscar_valor(valor):\n res_busqueda = busqueda_binaria(valor)\n if res_busqueda is None:\n return f'el valor {valor} no se encontro '\n else:\n return f'el numero {valor} se encuentra en la posicion {res_busqueda}'\n\n\n<code token>\n<function token>\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n<code token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\n<code token>\n<function token>\n<function token>\n<import token>\n\n\ndef pw_gen(size=8, chars=string.ascii_letters + string.digits + string.\n punctuation):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<code token>\n<function token>\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n<code token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\n<code token>\n<function token>\n<function token>\n<import token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<code token>\n<function token>\n\n\ndef print_lines2(a):\n print(' ---' * a)\n\n\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n<code token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n\n\ndef encriptar(frase, caracter):\n encriptada = ''\n for letra in frase:\n if letra.lower() in 'aeiouáéíúó':\n if letra.isupper():\n encriptada = encriptada + caracter.upper()\n else:\n encriptada = encriptada + caracter\n else:\n encriptada = encriptada + letra\n return encriptada\n\n\n<code token>\n<function token>\n<function token>\n<import token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"<function token>\n<code token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<code token>\n<function token>\n<code token>\n<function token>\n<function token>\n<import token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<code token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,830 |
a31c66e48778d1c7b160ce5579f8ca92852de261
|
import arrow
import ctime
import glob
import os
# article images are stored on file system. Items expire from DB after 30 days
# clean up articles after 31
# NOTE: ctime is last modification time, not creation time, could cause problems
img_files = glob.glob('/home/ubuntu/images/')
for img in img_files:
time_modified = os.path.getctime(img)
if arrow.utcnow().timestamp - time_modified > 2678400: # 31 days
os.remove(img)
|
[
"import arrow\nimport ctime\nimport glob\nimport os\n\n# article images are stored on file system. Items expire from DB after 30 days\n# clean up articles after 31\n\n# NOTE: ctime is last modification time, not creation time, could cause problems\n\nimg_files = glob.glob('/home/ubuntu/images/')\n\nfor img in img_files:\n time_modified = os.path.getctime(img)\n if arrow.utcnow().timestamp - time_modified > 2678400: # 31 days\n os.remove(img)\n",
"import arrow\nimport ctime\nimport glob\nimport os\nimg_files = glob.glob('/home/ubuntu/images/')\nfor img in img_files:\n time_modified = os.path.getctime(img)\n if arrow.utcnow().timestamp - time_modified > 2678400:\n os.remove(img)\n",
"<import token>\nimg_files = glob.glob('/home/ubuntu/images/')\nfor img in img_files:\n time_modified = os.path.getctime(img)\n if arrow.utcnow().timestamp - time_modified > 2678400:\n os.remove(img)\n",
"<import token>\n<assignment token>\nfor img in img_files:\n time_modified = os.path.getctime(img)\n if arrow.utcnow().timestamp - time_modified > 2678400:\n os.remove(img)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,831 |
967b84e33d24322ad4f084d925f92b0755b5961d
|
import os
import subprocess
import logbook
logger = logbook.Logger('connman-dispatcher')
def is_executable(path):
return all([os.path.isfile(path), os.access(path, os.X_OK)])
def execute_scripts_in_dirs(paths, state):
for path in sorted(paths):
if os.path.exists(path) and os.path.isdir(path):
execute_scripts_in_dir(path, state)
def execute_scripts_in_dir(path, state):
for script in sorted(os.listdir(path)):
full_scirpt_path = os.path.join(path, script)
if os.path.exists(full_scirpt_path):
if is_executable(full_scirpt_path):
logger.info('executing: %s %s' % (full_scirpt_path, state))
subprocess.Popen([full_scirpt_path, state])
else:
logger.error('%s is not executable file' % full_scirpt_path)
|
[
"import os\nimport subprocess\nimport logbook\nlogger = logbook.Logger('connman-dispatcher')\n\n\ndef is_executable(path):\n return all([os.path.isfile(path), os.access(path, os.X_OK)])\n\ndef execute_scripts_in_dirs(paths, state):\n for path in sorted(paths):\n if os.path.exists(path) and os.path.isdir(path):\n execute_scripts_in_dir(path, state)\n\ndef execute_scripts_in_dir(path, state):\n for script in sorted(os.listdir(path)):\n full_scirpt_path = os.path.join(path, script)\n if os.path.exists(full_scirpt_path):\n if is_executable(full_scirpt_path):\n logger.info('executing: %s %s' % (full_scirpt_path, state))\n subprocess.Popen([full_scirpt_path, state])\n else:\n logger.error('%s is not executable file' % full_scirpt_path)\n\n",
"import os\nimport subprocess\nimport logbook\nlogger = logbook.Logger('connman-dispatcher')\n\n\ndef is_executable(path):\n return all([os.path.isfile(path), os.access(path, os.X_OK)])\n\n\ndef execute_scripts_in_dirs(paths, state):\n for path in sorted(paths):\n if os.path.exists(path) and os.path.isdir(path):\n execute_scripts_in_dir(path, state)\n\n\ndef execute_scripts_in_dir(path, state):\n for script in sorted(os.listdir(path)):\n full_scirpt_path = os.path.join(path, script)\n if os.path.exists(full_scirpt_path):\n if is_executable(full_scirpt_path):\n logger.info('executing: %s %s' % (full_scirpt_path, state))\n subprocess.Popen([full_scirpt_path, state])\n else:\n logger.error('%s is not executable file' % full_scirpt_path)\n",
"<import token>\nlogger = logbook.Logger('connman-dispatcher')\n\n\ndef is_executable(path):\n return all([os.path.isfile(path), os.access(path, os.X_OK)])\n\n\ndef execute_scripts_in_dirs(paths, state):\n for path in sorted(paths):\n if os.path.exists(path) and os.path.isdir(path):\n execute_scripts_in_dir(path, state)\n\n\ndef execute_scripts_in_dir(path, state):\n for script in sorted(os.listdir(path)):\n full_scirpt_path = os.path.join(path, script)\n if os.path.exists(full_scirpt_path):\n if is_executable(full_scirpt_path):\n logger.info('executing: %s %s' % (full_scirpt_path, state))\n subprocess.Popen([full_scirpt_path, state])\n else:\n logger.error('%s is not executable file' % full_scirpt_path)\n",
"<import token>\n<assignment token>\n\n\ndef is_executable(path):\n return all([os.path.isfile(path), os.access(path, os.X_OK)])\n\n\ndef execute_scripts_in_dirs(paths, state):\n for path in sorted(paths):\n if os.path.exists(path) and os.path.isdir(path):\n execute_scripts_in_dir(path, state)\n\n\ndef execute_scripts_in_dir(path, state):\n for script in sorted(os.listdir(path)):\n full_scirpt_path = os.path.join(path, script)\n if os.path.exists(full_scirpt_path):\n if is_executable(full_scirpt_path):\n logger.info('executing: %s %s' % (full_scirpt_path, state))\n subprocess.Popen([full_scirpt_path, state])\n else:\n logger.error('%s is not executable file' % full_scirpt_path)\n",
"<import token>\n<assignment token>\n\n\ndef is_executable(path):\n return all([os.path.isfile(path), os.access(path, os.X_OK)])\n\n\n<function token>\n\n\ndef execute_scripts_in_dir(path, state):\n for script in sorted(os.listdir(path)):\n full_scirpt_path = os.path.join(path, script)\n if os.path.exists(full_scirpt_path):\n if is_executable(full_scirpt_path):\n logger.info('executing: %s %s' % (full_scirpt_path, state))\n subprocess.Popen([full_scirpt_path, state])\n else:\n logger.error('%s is not executable file' % full_scirpt_path)\n",
"<import token>\n<assignment token>\n\n\ndef is_executable(path):\n return all([os.path.isfile(path), os.access(path, os.X_OK)])\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,832 |
04e00ddcb0d179e5c3f06d8ac82461f9b413d638
|
"""
This class implements experiment_07
We create and save one model for first occurance of each day of the week in month of September
for each num_drivers value
"""
from __future__ import division
import os
import logging
from pathos.pools import ProcessPool
import multiprocessing as mp
from copy import deepcopy
from jobs.rl_training import RunRLTrainingJob
from data.data_exporter import DataExporter
class Experiment07(object):
"""
Experiment07 class
"""
def __init__(self, config_):
"""
Constructor
:param config_:
:return:
"""
self.config = config_
self.data_exporter = DataExporter(self.config)
self.logger = logging.getLogger("cuda_logger")
self.expt_name = "expt_07"
self.config['RL_parameters']['experiment'] = self.expt_name
@staticmethod
def run_rl_training(config):
rl_trainer = RunRLTrainingJob(config)
data = rl_trainer.run()
return data
def run(self):
"""
Run experiment
"""
days = [
'Sunday_00_', 'Monday_00_', 'Tuesday_00_', 'Wednesday_00_', 'Thursday_00_', 'Friday_00_', 'Saturday_00_',
'Sunday_01_', 'Monday_01_', 'Tuesday_01_', 'Wednesday_01_', 'Thursday_01_', 'Friday_01_', 'Saturday_01_',
'Sunday_02_', 'Monday_02_', 'Tuesday_02_', 'Wednesday_02_', 'Thursday_02_', 'Friday_02_', 'Saturday_02_',
'Sunday_03_', 'Monday_03_', 'Tuesday_03_', 'Wednesday_03_', 'Thursday_03_', 'Friday_03_', 'Saturday_03_',
'Sunday_04_', 'Monday_04_', 'Tuesday_04_', 'Wednesday_04_', 'Thursday_04_', 'Friday_04_', 'Saturday_04_'
]
num_drivers = [4000, 5000, 6000, 7000, 8000, 9000, 10000]
imbalance_thresholds = [2]
# Create a pool of processes
num_processes = mp.cpu_count()
self.logger.info("Processes: {}".format(num_processes))
pool = ProcessPool(nodes=num_processes)
configs = []
count = 0
for d in num_drivers:
for threshold in imbalance_thresholds:
for day in days:
self.config['RL_parameters']['num_drivers'] = d
self.config['RL_parameters']['num_strategic_drivers'] = d
self.config['RL_parameters']['imbalance_threshold'] = threshold
self.config['RL_parameters']['experiment'] = self.expt_name + "_" + str(count)
if os.path.isfile(self.config['app']['DATA_DIR'] + 'city_states/' + day + 'city_states.dill'):
self.config['RL_parameters']['city_states_filename'] = day + 'city_states.dill'
self.config['RL_parameters']['best_model_filename'] = (
day + str(d) + '_' + str(threshold) + '_model.dill')
configs.append(deepcopy(self.config))
count += 1
self.logger.info("Starting expt_07")
results = pool.amap(self.run_rl_training, configs).get()
pool.close()
pool.join()
pool.clear()
self.logger.info("Finished expt_07")
|
[
"\"\"\"\nThis class implements experiment_07\nWe create and save one model for first occurance of each day of the week in month of September\nfor each num_drivers value\n\"\"\"\n\nfrom __future__ import division\nimport os\nimport logging\nfrom pathos.pools import ProcessPool\nimport multiprocessing as mp\nfrom copy import deepcopy\nfrom jobs.rl_training import RunRLTrainingJob\nfrom data.data_exporter import DataExporter\n\n\nclass Experiment07(object):\n \"\"\"\n Experiment07 class\n \"\"\"\n\n def __init__(self, config_):\n \"\"\"\n Constructor\n :param config_:\n :return:\n \"\"\"\n self.config = config_\n self.data_exporter = DataExporter(self.config)\n self.logger = logging.getLogger(\"cuda_logger\")\n self.expt_name = \"expt_07\"\n self.config['RL_parameters']['experiment'] = self.expt_name\n\n @staticmethod\n def run_rl_training(config):\n rl_trainer = RunRLTrainingJob(config)\n data = rl_trainer.run()\n return data\n\n def run(self):\n \"\"\"\n Run experiment\n \"\"\"\n days = [\n 'Sunday_00_', 'Monday_00_', 'Tuesday_00_', 'Wednesday_00_', 'Thursday_00_', 'Friday_00_', 'Saturday_00_',\n 'Sunday_01_', 'Monday_01_', 'Tuesday_01_', 'Wednesday_01_', 'Thursday_01_', 'Friday_01_', 'Saturday_01_',\n 'Sunday_02_', 'Monday_02_', 'Tuesday_02_', 'Wednesday_02_', 'Thursday_02_', 'Friday_02_', 'Saturday_02_',\n 'Sunday_03_', 'Monday_03_', 'Tuesday_03_', 'Wednesday_03_', 'Thursday_03_', 'Friday_03_', 'Saturday_03_',\n 'Sunday_04_', 'Monday_04_', 'Tuesday_04_', 'Wednesday_04_', 'Thursday_04_', 'Friday_04_', 'Saturday_04_'\n ]\n\n num_drivers = [4000, 5000, 6000, 7000, 8000, 9000, 10000]\n\n imbalance_thresholds = [2]\n\n # Create a pool of processes\n num_processes = mp.cpu_count()\n self.logger.info(\"Processes: {}\".format(num_processes))\n pool = ProcessPool(nodes=num_processes)\n\n configs = []\n count = 0\n\n for d in num_drivers:\n for threshold in imbalance_thresholds:\n for day in days:\n self.config['RL_parameters']['num_drivers'] = d\n self.config['RL_parameters']['num_strategic_drivers'] = d\n\n self.config['RL_parameters']['imbalance_threshold'] = threshold\n self.config['RL_parameters']['experiment'] = self.expt_name + \"_\" + str(count)\n if os.path.isfile(self.config['app']['DATA_DIR'] + 'city_states/' + day + 'city_states.dill'):\n self.config['RL_parameters']['city_states_filename'] = day + 'city_states.dill'\n self.config['RL_parameters']['best_model_filename'] = (\n day + str(d) + '_' + str(threshold) + '_model.dill')\n configs.append(deepcopy(self.config))\n count += 1\n\n self.logger.info(\"Starting expt_07\")\n\n results = pool.amap(self.run_rl_training, configs).get()\n pool.close()\n pool.join()\n pool.clear()\n\n self.logger.info(\"Finished expt_07\")\n",
"<docstring token>\nfrom __future__ import division\nimport os\nimport logging\nfrom pathos.pools import ProcessPool\nimport multiprocessing as mp\nfrom copy import deepcopy\nfrom jobs.rl_training import RunRLTrainingJob\nfrom data.data_exporter import DataExporter\n\n\nclass Experiment07(object):\n \"\"\"\n Experiment07 class\n \"\"\"\n\n def __init__(self, config_):\n \"\"\"\n Constructor\n :param config_:\n :return:\n \"\"\"\n self.config = config_\n self.data_exporter = DataExporter(self.config)\n self.logger = logging.getLogger('cuda_logger')\n self.expt_name = 'expt_07'\n self.config['RL_parameters']['experiment'] = self.expt_name\n\n @staticmethod\n def run_rl_training(config):\n rl_trainer = RunRLTrainingJob(config)\n data = rl_trainer.run()\n return data\n\n def run(self):\n \"\"\"\n Run experiment\n \"\"\"\n days = ['Sunday_00_', 'Monday_00_', 'Tuesday_00_', 'Wednesday_00_',\n 'Thursday_00_', 'Friday_00_', 'Saturday_00_', 'Sunday_01_',\n 'Monday_01_', 'Tuesday_01_', 'Wednesday_01_', 'Thursday_01_',\n 'Friday_01_', 'Saturday_01_', 'Sunday_02_', 'Monday_02_',\n 'Tuesday_02_', 'Wednesday_02_', 'Thursday_02_', 'Friday_02_',\n 'Saturday_02_', 'Sunday_03_', 'Monday_03_', 'Tuesday_03_',\n 'Wednesday_03_', 'Thursday_03_', 'Friday_03_', 'Saturday_03_',\n 'Sunday_04_', 'Monday_04_', 'Tuesday_04_', 'Wednesday_04_',\n 'Thursday_04_', 'Friday_04_', 'Saturday_04_']\n num_drivers = [4000, 5000, 6000, 7000, 8000, 9000, 10000]\n imbalance_thresholds = [2]\n num_processes = mp.cpu_count()\n self.logger.info('Processes: {}'.format(num_processes))\n pool = ProcessPool(nodes=num_processes)\n configs = []\n count = 0\n for d in num_drivers:\n for threshold in imbalance_thresholds:\n for day in days:\n self.config['RL_parameters']['num_drivers'] = d\n self.config['RL_parameters']['num_strategic_drivers'] = d\n self.config['RL_parameters']['imbalance_threshold'\n ] = threshold\n self.config['RL_parameters']['experiment'\n ] = self.expt_name + '_' + str(count)\n if os.path.isfile(self.config['app']['DATA_DIR'] +\n 'city_states/' + day + 'city_states.dill'):\n self.config['RL_parameters']['city_states_filename'\n ] = day + 'city_states.dill'\n self.config['RL_parameters']['best_model_filename'\n ] = day + str(d) + '_' + str(threshold\n ) + '_model.dill'\n configs.append(deepcopy(self.config))\n count += 1\n self.logger.info('Starting expt_07')\n results = pool.amap(self.run_rl_training, configs).get()\n pool.close()\n pool.join()\n pool.clear()\n self.logger.info('Finished expt_07')\n",
"<docstring token>\n<import token>\n\n\nclass Experiment07(object):\n \"\"\"\n Experiment07 class\n \"\"\"\n\n def __init__(self, config_):\n \"\"\"\n Constructor\n :param config_:\n :return:\n \"\"\"\n self.config = config_\n self.data_exporter = DataExporter(self.config)\n self.logger = logging.getLogger('cuda_logger')\n self.expt_name = 'expt_07'\n self.config['RL_parameters']['experiment'] = self.expt_name\n\n @staticmethod\n def run_rl_training(config):\n rl_trainer = RunRLTrainingJob(config)\n data = rl_trainer.run()\n return data\n\n def run(self):\n \"\"\"\n Run experiment\n \"\"\"\n days = ['Sunday_00_', 'Monday_00_', 'Tuesday_00_', 'Wednesday_00_',\n 'Thursday_00_', 'Friday_00_', 'Saturday_00_', 'Sunday_01_',\n 'Monday_01_', 'Tuesday_01_', 'Wednesday_01_', 'Thursday_01_',\n 'Friday_01_', 'Saturday_01_', 'Sunday_02_', 'Monday_02_',\n 'Tuesday_02_', 'Wednesday_02_', 'Thursday_02_', 'Friday_02_',\n 'Saturday_02_', 'Sunday_03_', 'Monday_03_', 'Tuesday_03_',\n 'Wednesday_03_', 'Thursday_03_', 'Friday_03_', 'Saturday_03_',\n 'Sunday_04_', 'Monday_04_', 'Tuesday_04_', 'Wednesday_04_',\n 'Thursday_04_', 'Friday_04_', 'Saturday_04_']\n num_drivers = [4000, 5000, 6000, 7000, 8000, 9000, 10000]\n imbalance_thresholds = [2]\n num_processes = mp.cpu_count()\n self.logger.info('Processes: {}'.format(num_processes))\n pool = ProcessPool(nodes=num_processes)\n configs = []\n count = 0\n for d in num_drivers:\n for threshold in imbalance_thresholds:\n for day in days:\n self.config['RL_parameters']['num_drivers'] = d\n self.config['RL_parameters']['num_strategic_drivers'] = d\n self.config['RL_parameters']['imbalance_threshold'\n ] = threshold\n self.config['RL_parameters']['experiment'\n ] = self.expt_name + '_' + str(count)\n if os.path.isfile(self.config['app']['DATA_DIR'] +\n 'city_states/' + day + 'city_states.dill'):\n self.config['RL_parameters']['city_states_filename'\n ] = day + 'city_states.dill'\n self.config['RL_parameters']['best_model_filename'\n ] = day + str(d) + '_' + str(threshold\n ) + '_model.dill'\n configs.append(deepcopy(self.config))\n count += 1\n self.logger.info('Starting expt_07')\n results = pool.amap(self.run_rl_training, configs).get()\n pool.close()\n pool.join()\n pool.clear()\n self.logger.info('Finished expt_07')\n",
"<docstring token>\n<import token>\n\n\nclass Experiment07(object):\n <docstring token>\n\n def __init__(self, config_):\n \"\"\"\n Constructor\n :param config_:\n :return:\n \"\"\"\n self.config = config_\n self.data_exporter = DataExporter(self.config)\n self.logger = logging.getLogger('cuda_logger')\n self.expt_name = 'expt_07'\n self.config['RL_parameters']['experiment'] = self.expt_name\n\n @staticmethod\n def run_rl_training(config):\n rl_trainer = RunRLTrainingJob(config)\n data = rl_trainer.run()\n return data\n\n def run(self):\n \"\"\"\n Run experiment\n \"\"\"\n days = ['Sunday_00_', 'Monday_00_', 'Tuesday_00_', 'Wednesday_00_',\n 'Thursday_00_', 'Friday_00_', 'Saturday_00_', 'Sunday_01_',\n 'Monday_01_', 'Tuesday_01_', 'Wednesday_01_', 'Thursday_01_',\n 'Friday_01_', 'Saturday_01_', 'Sunday_02_', 'Monday_02_',\n 'Tuesday_02_', 'Wednesday_02_', 'Thursday_02_', 'Friday_02_',\n 'Saturday_02_', 'Sunday_03_', 'Monday_03_', 'Tuesday_03_',\n 'Wednesday_03_', 'Thursday_03_', 'Friday_03_', 'Saturday_03_',\n 'Sunday_04_', 'Monday_04_', 'Tuesday_04_', 'Wednesday_04_',\n 'Thursday_04_', 'Friday_04_', 'Saturday_04_']\n num_drivers = [4000, 5000, 6000, 7000, 8000, 9000, 10000]\n imbalance_thresholds = [2]\n num_processes = mp.cpu_count()\n self.logger.info('Processes: {}'.format(num_processes))\n pool = ProcessPool(nodes=num_processes)\n configs = []\n count = 0\n for d in num_drivers:\n for threshold in imbalance_thresholds:\n for day in days:\n self.config['RL_parameters']['num_drivers'] = d\n self.config['RL_parameters']['num_strategic_drivers'] = d\n self.config['RL_parameters']['imbalance_threshold'\n ] = threshold\n self.config['RL_parameters']['experiment'\n ] = self.expt_name + '_' + str(count)\n if os.path.isfile(self.config['app']['DATA_DIR'] +\n 'city_states/' + day + 'city_states.dill'):\n self.config['RL_parameters']['city_states_filename'\n ] = day + 'city_states.dill'\n self.config['RL_parameters']['best_model_filename'\n ] = day + str(d) + '_' + str(threshold\n ) + '_model.dill'\n configs.append(deepcopy(self.config))\n count += 1\n self.logger.info('Starting expt_07')\n results = pool.amap(self.run_rl_training, configs).get()\n pool.close()\n pool.join()\n pool.clear()\n self.logger.info('Finished expt_07')\n",
"<docstring token>\n<import token>\n\n\nclass Experiment07(object):\n <docstring token>\n <function token>\n\n @staticmethod\n def run_rl_training(config):\n rl_trainer = RunRLTrainingJob(config)\n data = rl_trainer.run()\n return data\n\n def run(self):\n \"\"\"\n Run experiment\n \"\"\"\n days = ['Sunday_00_', 'Monday_00_', 'Tuesday_00_', 'Wednesday_00_',\n 'Thursday_00_', 'Friday_00_', 'Saturday_00_', 'Sunday_01_',\n 'Monday_01_', 'Tuesday_01_', 'Wednesday_01_', 'Thursday_01_',\n 'Friday_01_', 'Saturday_01_', 'Sunday_02_', 'Monday_02_',\n 'Tuesday_02_', 'Wednesday_02_', 'Thursday_02_', 'Friday_02_',\n 'Saturday_02_', 'Sunday_03_', 'Monday_03_', 'Tuesday_03_',\n 'Wednesday_03_', 'Thursday_03_', 'Friday_03_', 'Saturday_03_',\n 'Sunday_04_', 'Monday_04_', 'Tuesday_04_', 'Wednesday_04_',\n 'Thursday_04_', 'Friday_04_', 'Saturday_04_']\n num_drivers = [4000, 5000, 6000, 7000, 8000, 9000, 10000]\n imbalance_thresholds = [2]\n num_processes = mp.cpu_count()\n self.logger.info('Processes: {}'.format(num_processes))\n pool = ProcessPool(nodes=num_processes)\n configs = []\n count = 0\n for d in num_drivers:\n for threshold in imbalance_thresholds:\n for day in days:\n self.config['RL_parameters']['num_drivers'] = d\n self.config['RL_parameters']['num_strategic_drivers'] = d\n self.config['RL_parameters']['imbalance_threshold'\n ] = threshold\n self.config['RL_parameters']['experiment'\n ] = self.expt_name + '_' + str(count)\n if os.path.isfile(self.config['app']['DATA_DIR'] +\n 'city_states/' + day + 'city_states.dill'):\n self.config['RL_parameters']['city_states_filename'\n ] = day + 'city_states.dill'\n self.config['RL_parameters']['best_model_filename'\n ] = day + str(d) + '_' + str(threshold\n ) + '_model.dill'\n configs.append(deepcopy(self.config))\n count += 1\n self.logger.info('Starting expt_07')\n results = pool.amap(self.run_rl_training, configs).get()\n pool.close()\n pool.join()\n pool.clear()\n self.logger.info('Finished expt_07')\n",
"<docstring token>\n<import token>\n\n\nclass Experiment07(object):\n <docstring token>\n <function token>\n\n @staticmethod\n def run_rl_training(config):\n rl_trainer = RunRLTrainingJob(config)\n data = rl_trainer.run()\n return data\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass Experiment07(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
98,833 |
9a1da2b69658d4724996d42bbe791595c6e1950a
|
import schedule
import time
import schedule
import time
def job(message='stuff'):
print("I'm working on:", message)
schedule.every(5).seconds.do(job)
while True:
schedule.run_pending()
time.sleep(1)
|
[
"import schedule\r\nimport time\r\nimport schedule\r\nimport time\r\n\r\ndef job(message='stuff'):\r\n print(\"I'm working on:\", message)\r\n\r\nschedule.every(5).seconds.do(job)\r\n\r\nwhile True:\r\n schedule.run_pending()\r\n time.sleep(1)\r\n",
"import schedule\nimport time\nimport schedule\nimport time\n\n\ndef job(message='stuff'):\n print(\"I'm working on:\", message)\n\n\nschedule.every(5).seconds.do(job)\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n",
"<import token>\n\n\ndef job(message='stuff'):\n print(\"I'm working on:\", message)\n\n\nschedule.every(5).seconds.do(job)\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n",
"<import token>\n\n\ndef job(message='stuff'):\n print(\"I'm working on:\", message)\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
98,834 |
5a69814b887874cb39d2beb8ff2695ecf643a87f
|
class BlankClass(object):
'''This is a Blank class for CS162.'''
pass
t = BlankClass()
class ClassWithAttr(object):
x1 = 1
x2 = 2
my_attr = ClassWithAttr()
my_attr.x3 = 3
|
[
"class BlankClass(object):\n '''This is a Blank class for CS162.'''\n pass\nt = BlankClass()\n\nclass ClassWithAttr(object):\n x1 = 1\n x2 = 2\n\nmy_attr = ClassWithAttr()\nmy_attr.x3 = 3\n\n",
"class BlankClass(object):\n \"\"\"This is a Blank class for CS162.\"\"\"\n pass\n\n\nt = BlankClass()\n\n\nclass ClassWithAttr(object):\n x1 = 1\n x2 = 2\n\n\nmy_attr = ClassWithAttr()\nmy_attr.x3 = 3\n",
"class BlankClass(object):\n \"\"\"This is a Blank class for CS162.\"\"\"\n pass\n\n\n<assignment token>\n\n\nclass ClassWithAttr(object):\n x1 = 1\n x2 = 2\n\n\n<assignment token>\n",
"class BlankClass(object):\n <docstring token>\n pass\n\n\n<assignment token>\n\n\nclass ClassWithAttr(object):\n x1 = 1\n x2 = 2\n\n\n<assignment token>\n",
"<class token>\n<assignment token>\n\n\nclass ClassWithAttr(object):\n x1 = 1\n x2 = 2\n\n\n<assignment token>\n",
"<class token>\n<assignment token>\n\n\nclass ClassWithAttr(object):\n <assignment token>\n <assignment token>\n\n\n<assignment token>\n",
"<class token>\n<assignment token>\n<class token>\n<assignment token>\n"
] | false |
98,835 |
75ae1a5a186cd39eda2a5b79cb35187f5523ad2f
|
#!/usr/bin/python
from re import sub
import numpy as np
import pandas as pd
import os
from . import private
from . import synth
from . import weedout
from . import rundir_num
MOOG_path = '{}/.pymoog/moog_nosm/moog_nosm_NOV2019/'.format(os.environ['HOME'])
MOOG_run_path = '{}/.pymoog/rundir/'.format(os.environ['HOME'])
MOOG_file_path = '{}/.pymoog/files/'.format(os.environ['HOME'])
## Convert the element column to element specics
def save_linelist(linelist_all, sub_ll_name, wav_start=None, wav_end=None, header=None, negative=False):
'''
Save the linelist in MOOG format into specified position.
Parameters
----------
linelist_all : pandas.Dataframe
The Dataframe of linelist in MOOG format
sub_ll_name : str
The name of the line list to be saved into.
wav_start : float
Start wavelength of the line list.
end_start : float
End wavelength of the line list.
type : str, = 'vald'
Type of the line list. Now only 'vald' is supported.
negative : bool
Switch to permit negative wavelength.
'''
# Crop the line list according to wavelength, if needed.
if not(negative):
index = linelist_all['wavelength'] > 0
else:
index = np.abs(linelist_all['wavelength']) >= 0
if wav_start != None:
index = index & (linelist_all['wavelength'] > wav_start)
if wav_end != None:
index = index & (linelist_all['wavelength'] < wav_end)
sub_linelist = linelist_all[index]
sub_linelist.reset_index(drop=True, inplace=True)
# Judge if the length of the line list is 0; if so raise an error.
if len(sub_linelist) == 0:
raise ValueError('The length of line list is 0. Consider enalrge the wavelength or check the input line list.')
# Decidcde which format to save the linelist according to C6 value.
if np.any(abs(sub_linelist['C6'].values) > 1e-25):
output_format = '%10.3f%10.5f%10.4f%10.3f%10.3f%10.3f%10.3f'
elif np.any(abs(sub_linelist['C6'].values) < 1e-25):
output_format = '%10.3f%10.5f%10.4f%10.3f%10.2E%10.3f%10.3f'
# Remove the last column if no EW values.
if len(sub_linelist.columns) == 6:
output_format = output_format[:-6]
np.savetxt(sub_ll_name, np.array(sub_linelist), fmt=output_format)
run_status = private.subprocess.run(['sed', '-i', 's/nan/ /g', sub_ll_name])
if header == None:
header = 'Linelist'
run_status = private.subprocess.run(['sed', '-i', '1 i\{}'.format(header), sub_ll_name])
def read_linelist(linelist_name, loggf_cut=None, mode='ascii'):
'''
Read the post-processed linelist.
Parameters
----------
linelist_name : str
The MOOG format line list
loggf_cut : float, optional
Cut on loggf (only save for the lines with loggf > loggf_cut)
mode : str, default 'ascii'
Reading mode for reading line-list. The efficiency of 'npy' mode is much higher than 'ascii' mode.
'''
available_line_list = ['ges', 'ges_hfs_iso', 'ges_nohfs_noiso', 'vald_3000_24000', 'vald_winered', 'mb99_j', 'mb99_k', 'apogee', 'kurucz', 'kurucz_winered']
if linelist_name[-5:] != '.list' and linelist_name in available_line_list:
# Read built-in line list
if linelist_name == 'ges':
linelist_name = 'ges_hfs_iso'
if mode == 'npy':
linelist_name = MOOG_file_path + 'linelist/{}/{}.npy'.format(linelist_name.split('_')[0], linelist_name)
elif mode == 'ascii':
linelist_name = MOOG_file_path + 'linelist/{}/{}.list'.format(linelist_name.split('_')[0], linelist_name)
else:
raise ValueError('mode must be "npy" or "ascii".')
elif linelist_name[-5:] == '.list':
pass
else:
raise ValueError("Built in line list type not recognized. Please use one of the following:\n 'ges', 'ges_hfs_iso', 'ges_nohfs_noiso', 'vald_3000_24000', 'vald_winered', 'mb99_j', 'mb99_k', 'kurucz', 'kurucz_winered' or 'apogee'.")
if mode == 'npy':
linelist_array = np.load(linelist_name, allow_pickle=True)
linelist = pd.DataFrame(linelist_array, columns=['wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW'])
elif mode == 'ascii':
linelist = pd.read_fwf(linelist_name, colspecs=[(0,11), (11,21), (21,31), (31,41), (41,51), (51,61), (61,71)], names=['wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW'], skiprows=1)
# MOOG seems to crash if there is line with EP larger than 50eV, so they are removed.
# Need to be test for other line lists
linelist = linelist[(linelist['EP'] <= 50)]
if loggf_cut != None:
linelist = linelist[(linelist['loggf'] >= loggf_cut)]
linelist.reset_index(drop=True, inplace=True)
return linelist
def find_lines(linelist_keep, linelist_all):
line_index_keep = []
for i in linelist_keep.index:
indice = (np.abs(linelist_all['wavelength'] - linelist_keep.loc[i, 'wavelength']) < 0.001)
for col in ['id', 'EP', 'loggf']:
indice = indice & (np.abs(linelist_all[col] - linelist_keep.loc[i, col]) < 0.001)
if len(linelist_all[indice]) == 0:
raise ValueError('No match line found.')
line_index_keep.append(linelist_all[indice].index.values[0])
return line_index_keep
def find_single_dominant_line(line_wav_input, teff, logg, fe_h, resolution, r_d_blend_thre=0.1, line_list='ges', weedout_switch=False, search_half_width=0.5, linelist_serach=False, abun_change=None):
# Establish the linelist
linelist_all = read_linelist(line_list)
linelist_all = linelist_all[np.abs(linelist_all['wavelength']-line_wav_input) < search_half_width]
# Calculate the blending ratio
s = synth.synth(teff, logg, fe_h, line_wav_input-search_half_width-1, line_wav_input+search_half_width+1, resolution, line_list=line_list)
s.prepare_file(abun_change=abun_change)
# Whole spectra
s.run_moog()
s.read_spectra(unlock=False)
wav_all, flux_all = s.wav, s.flux
# weedout lines
if weedout_switch != False:
w = weedout.weedout(teff, logg, fe_h, line_wav_input-search_half_width, line_wav_input+search_half_width, line_list=line_list)
w.prepare_file()
w.run_moog()
# Target line exclude
if weedout_switch:
linelist_keep = read_linelist(w.rundir_path + 'keep.list')
else:
linelist_keep = linelist_all
# Unlock runs
s.unlock()
if weedout_switch != False:
w.unlock()
line_index_keep = find_lines(linelist_keep, linelist_all)
r_blend_depth_list = []
for line_index in line_index_keep:
s = synth.synth(teff, logg, fe_h, line_wav_input-search_half_width-1, line_wav_input+search_half_width+1,
resolution, line_list='ges')
s.prepare_file(abun_change=abun_change)
linelist_exclude = linelist_all.drop(line_index).reset_index(drop=True)
save_linelist(linelist_exclude, s.rundir_path + 'line.list')
s.run_moog()
s.read_spectra(unlock=False)
wav_exclude, flux_exclude = s.wav, s.flux
# Target line only
linelist_target = linelist_all.loc[line_index:line_index].reset_index(drop=True)
line_wavlength = linelist_target.loc[0, 'wavelength']
line_loggf = linelist_target.loc[0, 'loggf']
line_EP = linelist_target.loc[0, 'EP']
if abun_change is not None:
s.prepare_file(abun_change=abun_change)
else:
s.prepare_file()
save_linelist(linelist_target, s.rundir_path + 'line.list')
s.run_moog()
s.read_spectra()
wav_target, flux_target = s.wav, s.flux
# Calculate the EW and blending fraction
EW = (np.sum(1-flux_all)*0.02 - np.sum(1-flux_exclude)*0.02) * 1000
depth = 1 - np.min(flux_all[np.abs(wav_all-line_wavlength) <= 0.03])
r_blend_depth = (1-flux_exclude[np.argmin(np.abs(wav_exclude-line_wavlength))]) / (1-flux_all[np.argmin(np.abs(wav_all-line_wavlength))])
r_blend_depth_list.append(r_blend_depth)
linelist_keep['r_blend_depth'] = r_blend_depth_list
if len(line_index_keep) > 0:
try:
target_line_index = np.abs(linelist_keep.loc[linelist_keep['r_blend_depth'] < 0.1, 'wavelength'] - line_wav_input).sort_values().index[0]
target_line = linelist_keep.loc[target_line_index:target_line_index].reset_index(drop=True)
except IndexError:
# No dominant line is found
target_line = pd.DataFrame(np.array([np.nan]*8)).T
target_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW', 'r_blend_depth']
else:
# No line is found
target_line = pd.DataFrame(np.array([np.nan]*8)).T
target_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW', 'r_blend_depth']
if linelist_serach:
return target_line, linelist_keep
else:
return target_line
|
[
"#!/usr/bin/python\nfrom re import sub\nimport numpy as np\nimport pandas as pd\nimport os\nfrom . import private\nfrom . import synth\nfrom . import weedout\nfrom . import rundir_num\n\nMOOG_path = '{}/.pymoog/moog_nosm/moog_nosm_NOV2019/'.format(os.environ['HOME'])\nMOOG_run_path = '{}/.pymoog/rundir/'.format(os.environ['HOME'])\nMOOG_file_path = '{}/.pymoog/files/'.format(os.environ['HOME'])\n\n## Convert the element column to element specics\n\n \ndef save_linelist(linelist_all, sub_ll_name, wav_start=None, wav_end=None, header=None, negative=False):\n '''\n Save the linelist in MOOG format into specified position.\n \n Parameters\n ----------\n linelist_all : pandas.Dataframe\n The Dataframe of linelist in MOOG format\n sub_ll_name : str\n The name of the line list to be saved into.\n wav_start : float\n Start wavelength of the line list.\n end_start : float\n End wavelength of the line list.\n type : str, = 'vald'\n Type of the line list. Now only 'vald' is supported.\n negative : bool\n Switch to permit negative wavelength. \n '''\n \n # Crop the line list according to wavelength, if needed.\n if not(negative):\n index = linelist_all['wavelength'] > 0\n else:\n index = np.abs(linelist_all['wavelength']) >= 0\n if wav_start != None:\n index = index & (linelist_all['wavelength'] > wav_start)\n if wav_end != None:\n index = index & (linelist_all['wavelength'] < wav_end) \n \n sub_linelist = linelist_all[index]\n sub_linelist.reset_index(drop=True, inplace=True)\n \n # Judge if the length of the line list is 0; if so raise an error.\n if len(sub_linelist) == 0:\n raise ValueError('The length of line list is 0. Consider enalrge the wavelength or check the input line list.')\n \n # Decidcde which format to save the linelist according to C6 value.\n if np.any(abs(sub_linelist['C6'].values) > 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.3f%10.3f%10.3f'\n elif np.any(abs(sub_linelist['C6'].values) < 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.2E%10.3f%10.3f'\n \n # Remove the last column if no EW values.\n if len(sub_linelist.columns) == 6:\n output_format = output_format[:-6]\n np.savetxt(sub_ll_name, np.array(sub_linelist), fmt=output_format)\n run_status = private.subprocess.run(['sed', '-i', 's/nan/ /g', sub_ll_name])\n if header == None:\n header = 'Linelist'\n run_status = private.subprocess.run(['sed', '-i', '1 i\\{}'.format(header), sub_ll_name])\n\ndef read_linelist(linelist_name, loggf_cut=None, mode='ascii'):\n '''\n Read the post-processed linelist.\n \n Parameters\n ----------\n linelist_name : str\n The MOOG format line list\n loggf_cut : float, optional\n Cut on loggf (only save for the lines with loggf > loggf_cut)\n mode : str, default 'ascii'\n Reading mode for reading line-list. The efficiency of 'npy' mode is much higher than 'ascii' mode.\n '''\n \n available_line_list = ['ges', 'ges_hfs_iso', 'ges_nohfs_noiso', 'vald_3000_24000', 'vald_winered', 'mb99_j', 'mb99_k', 'apogee', 'kurucz', 'kurucz_winered']\n \n if linelist_name[-5:] != '.list' and linelist_name in available_line_list:\n # Read built-in line list\n if linelist_name == 'ges':\n linelist_name = 'ges_hfs_iso'\n if mode == 'npy':\n linelist_name = MOOG_file_path + 'linelist/{}/{}.npy'.format(linelist_name.split('_')[0], linelist_name)\n elif mode == 'ascii':\n linelist_name = MOOG_file_path + 'linelist/{}/{}.list'.format(linelist_name.split('_')[0], linelist_name)\n else:\n raise ValueError('mode must be \"npy\" or \"ascii\".')\n elif linelist_name[-5:] == '.list':\n pass\n else:\n raise ValueError(\"Built in line list type not recognized. Please use one of the following:\\n 'ges', 'ges_hfs_iso', 'ges_nohfs_noiso', 'vald_3000_24000', 'vald_winered', 'mb99_j', 'mb99_k', 'kurucz', 'kurucz_winered' or 'apogee'.\")\n \n if mode == 'npy':\n linelist_array = np.load(linelist_name, allow_pickle=True)\n linelist = pd.DataFrame(linelist_array, columns=['wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW'])\n elif mode == 'ascii':\n linelist = pd.read_fwf(linelist_name, colspecs=[(0,11), (11,21), (21,31), (31,41), (41,51), (51,61), (61,71)], names=['wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW'], skiprows=1)\n # MOOG seems to crash if there is line with EP larger than 50eV, so they are removed.\n # Need to be test for other line lists\n linelist = linelist[(linelist['EP'] <= 50)]\n if loggf_cut != None:\n linelist = linelist[(linelist['loggf'] >= loggf_cut)]\n linelist.reset_index(drop=True, inplace=True)\n return linelist\n\ndef find_lines(linelist_keep, linelist_all):\n line_index_keep = []\n for i in linelist_keep.index:\n indice = (np.abs(linelist_all['wavelength'] - linelist_keep.loc[i, 'wavelength']) < 0.001)\n for col in ['id', 'EP', 'loggf']:\n indice = indice & (np.abs(linelist_all[col] - linelist_keep.loc[i, col]) < 0.001)\n if len(linelist_all[indice]) == 0:\n raise ValueError('No match line found.')\n line_index_keep.append(linelist_all[indice].index.values[0])\n return line_index_keep\n\n\ndef find_single_dominant_line(line_wav_input, teff, logg, fe_h, resolution, r_d_blend_thre=0.1, line_list='ges', weedout_switch=False, search_half_width=0.5, linelist_serach=False, abun_change=None):\n\n # Establish the linelist\n linelist_all = read_linelist(line_list)\n linelist_all = linelist_all[np.abs(linelist_all['wavelength']-line_wav_input) < search_half_width]\n\n # Calculate the blending ratio\n s = synth.synth(teff, logg, fe_h, line_wav_input-search_half_width-1, line_wav_input+search_half_width+1, resolution, line_list=line_list)\n s.prepare_file(abun_change=abun_change)\n # Whole spectra \n s.run_moog()\n s.read_spectra(unlock=False)\n wav_all, flux_all = s.wav, s.flux\n\n # weedout lines\n if weedout_switch != False:\n w = weedout.weedout(teff, logg, fe_h, line_wav_input-search_half_width, line_wav_input+search_half_width, line_list=line_list)\n w.prepare_file()\n w.run_moog()\n \n # Target line exclude\n if weedout_switch:\n linelist_keep = read_linelist(w.rundir_path + 'keep.list')\n else:\n linelist_keep = linelist_all\n \n # Unlock runs\n s.unlock()\n if weedout_switch != False:\n w.unlock()\n \n line_index_keep = find_lines(linelist_keep, linelist_all)\n\n r_blend_depth_list = []\n for line_index in line_index_keep:\n s = synth.synth(teff, logg, fe_h, line_wav_input-search_half_width-1, line_wav_input+search_half_width+1, \n resolution, line_list='ges')\n s.prepare_file(abun_change=abun_change)\n linelist_exclude = linelist_all.drop(line_index).reset_index(drop=True)\n save_linelist(linelist_exclude, s.rundir_path + 'line.list')\n s.run_moog()\n s.read_spectra(unlock=False)\n wav_exclude, flux_exclude = s.wav, s.flux\n\n # Target line only\n linelist_target = linelist_all.loc[line_index:line_index].reset_index(drop=True)\n line_wavlength = linelist_target.loc[0, 'wavelength']\n line_loggf = linelist_target.loc[0, 'loggf']\n line_EP = linelist_target.loc[0, 'EP']\n if abun_change is not None:\n s.prepare_file(abun_change=abun_change)\n else:\n s.prepare_file()\n save_linelist(linelist_target, s.rundir_path + 'line.list')\n s.run_moog()\n s.read_spectra()\n wav_target, flux_target = s.wav, s.flux\n\n # Calculate the EW and blending fraction\n EW = (np.sum(1-flux_all)*0.02 - np.sum(1-flux_exclude)*0.02) * 1000\n depth = 1 - np.min(flux_all[np.abs(wav_all-line_wavlength) <= 0.03])\n r_blend_depth = (1-flux_exclude[np.argmin(np.abs(wav_exclude-line_wavlength))]) / (1-flux_all[np.argmin(np.abs(wav_all-line_wavlength))])\n\n r_blend_depth_list.append(r_blend_depth)\n\n linelist_keep['r_blend_depth'] = r_blend_depth_list\n\n if len(line_index_keep) > 0:\n try:\n target_line_index = np.abs(linelist_keep.loc[linelist_keep['r_blend_depth'] < 0.1, 'wavelength'] - line_wav_input).sort_values().index[0]\n target_line = linelist_keep.loc[target_line_index:target_line_index].reset_index(drop=True)\n except IndexError:\n # No dominant line is found\n target_line = pd.DataFrame(np.array([np.nan]*8)).T\n target_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW', 'r_blend_depth']\n else:\n # No line is found\n target_line = pd.DataFrame(np.array([np.nan]*8)).T\n target_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW', 'r_blend_depth']\n\n if linelist_serach:\n return target_line, linelist_keep\n else:\n return target_line",
"from re import sub\nimport numpy as np\nimport pandas as pd\nimport os\nfrom . import private\nfrom . import synth\nfrom . import weedout\nfrom . import rundir_num\nMOOG_path = '{}/.pymoog/moog_nosm/moog_nosm_NOV2019/'.format(os.environ['HOME']\n )\nMOOG_run_path = '{}/.pymoog/rundir/'.format(os.environ['HOME'])\nMOOG_file_path = '{}/.pymoog/files/'.format(os.environ['HOME'])\n\n\ndef save_linelist(linelist_all, sub_ll_name, wav_start=None, wav_end=None,\n header=None, negative=False):\n \"\"\"\n Save the linelist in MOOG format into specified position.\n \n Parameters\n ----------\n linelist_all : pandas.Dataframe\n The Dataframe of linelist in MOOG format\n sub_ll_name : str\n The name of the line list to be saved into.\n wav_start : float\n Start wavelength of the line list.\n end_start : float\n End wavelength of the line list.\n type : str, = 'vald'\n Type of the line list. Now only 'vald' is supported.\n negative : bool\n Switch to permit negative wavelength. \n \"\"\"\n if not negative:\n index = linelist_all['wavelength'] > 0\n else:\n index = np.abs(linelist_all['wavelength']) >= 0\n if wav_start != None:\n index = index & (linelist_all['wavelength'] > wav_start)\n if wav_end != None:\n index = index & (linelist_all['wavelength'] < wav_end)\n sub_linelist = linelist_all[index]\n sub_linelist.reset_index(drop=True, inplace=True)\n if len(sub_linelist) == 0:\n raise ValueError(\n 'The length of line list is 0. Consider enalrge the wavelength or check the input line list.'\n )\n if np.any(abs(sub_linelist['C6'].values) > 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.3f%10.3f%10.3f'\n elif np.any(abs(sub_linelist['C6'].values) < 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.2E%10.3f%10.3f'\n if len(sub_linelist.columns) == 6:\n output_format = output_format[:-6]\n np.savetxt(sub_ll_name, np.array(sub_linelist), fmt=output_format)\n run_status = private.subprocess.run(['sed', '-i', 's/nan/ /g',\n sub_ll_name])\n if header == None:\n header = 'Linelist'\n run_status = private.subprocess.run(['sed', '-i', '1 i\\\\{}'.format(\n header), sub_ll_name])\n\n\ndef read_linelist(linelist_name, loggf_cut=None, mode='ascii'):\n \"\"\"\n Read the post-processed linelist.\n \n Parameters\n ----------\n linelist_name : str\n The MOOG format line list\n loggf_cut : float, optional\n Cut on loggf (only save for the lines with loggf > loggf_cut)\n mode : str, default 'ascii'\n Reading mode for reading line-list. The efficiency of 'npy' mode is much higher than 'ascii' mode.\n \"\"\"\n available_line_list = ['ges', 'ges_hfs_iso', 'ges_nohfs_noiso',\n 'vald_3000_24000', 'vald_winered', 'mb99_j', 'mb99_k', 'apogee',\n 'kurucz', 'kurucz_winered']\n if linelist_name[-5:] != '.list' and linelist_name in available_line_list:\n if linelist_name == 'ges':\n linelist_name = 'ges_hfs_iso'\n if mode == 'npy':\n linelist_name = MOOG_file_path + 'linelist/{}/{}.npy'.format(\n linelist_name.split('_')[0], linelist_name)\n elif mode == 'ascii':\n linelist_name = MOOG_file_path + 'linelist/{}/{}.list'.format(\n linelist_name.split('_')[0], linelist_name)\n else:\n raise ValueError('mode must be \"npy\" or \"ascii\".')\n elif linelist_name[-5:] == '.list':\n pass\n else:\n raise ValueError(\n \"\"\"Built in line list type not recognized. Please use one of the following:\n 'ges', 'ges_hfs_iso', 'ges_nohfs_noiso', 'vald_3000_24000', 'vald_winered', 'mb99_j', 'mb99_k', 'kurucz', 'kurucz_winered' or 'apogee'.\"\"\"\n )\n if mode == 'npy':\n linelist_array = np.load(linelist_name, allow_pickle=True)\n linelist = pd.DataFrame(linelist_array, columns=['wavelength', 'id',\n 'EP', 'loggf', 'C6', 'D0', 'EW'])\n elif mode == 'ascii':\n linelist = pd.read_fwf(linelist_name, colspecs=[(0, 11), (11, 21),\n (21, 31), (31, 41), (41, 51), (51, 61), (61, 71)], names=[\n 'wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW'], skiprows=1)\n linelist = linelist[linelist['EP'] <= 50]\n if loggf_cut != None:\n linelist = linelist[linelist['loggf'] >= loggf_cut]\n linelist.reset_index(drop=True, inplace=True)\n return linelist\n\n\ndef find_lines(linelist_keep, linelist_all):\n line_index_keep = []\n for i in linelist_keep.index:\n indice = np.abs(linelist_all['wavelength'] - linelist_keep.loc[i,\n 'wavelength']) < 0.001\n for col in ['id', 'EP', 'loggf']:\n indice = indice & (np.abs(linelist_all[col] - linelist_keep.loc\n [i, col]) < 0.001)\n if len(linelist_all[indice]) == 0:\n raise ValueError('No match line found.')\n line_index_keep.append(linelist_all[indice].index.values[0])\n return line_index_keep\n\n\ndef find_single_dominant_line(line_wav_input, teff, logg, fe_h, resolution,\n r_d_blend_thre=0.1, line_list='ges', weedout_switch=False,\n search_half_width=0.5, linelist_serach=False, abun_change=None):\n linelist_all = read_linelist(line_list)\n linelist_all = linelist_all[np.abs(linelist_all['wavelength'] -\n line_wav_input) < search_half_width]\n s = synth.synth(teff, logg, fe_h, line_wav_input - search_half_width - \n 1, line_wav_input + search_half_width + 1, resolution, line_list=\n line_list)\n s.prepare_file(abun_change=abun_change)\n s.run_moog()\n s.read_spectra(unlock=False)\n wav_all, flux_all = s.wav, s.flux\n if weedout_switch != False:\n w = weedout.weedout(teff, logg, fe_h, line_wav_input -\n search_half_width, line_wav_input + search_half_width,\n line_list=line_list)\n w.prepare_file()\n w.run_moog()\n if weedout_switch:\n linelist_keep = read_linelist(w.rundir_path + 'keep.list')\n else:\n linelist_keep = linelist_all\n s.unlock()\n if weedout_switch != False:\n w.unlock()\n line_index_keep = find_lines(linelist_keep, linelist_all)\n r_blend_depth_list = []\n for line_index in line_index_keep:\n s = synth.synth(teff, logg, fe_h, line_wav_input -\n search_half_width - 1, line_wav_input + search_half_width + 1,\n resolution, line_list='ges')\n s.prepare_file(abun_change=abun_change)\n linelist_exclude = linelist_all.drop(line_index).reset_index(drop=True)\n save_linelist(linelist_exclude, s.rundir_path + 'line.list')\n s.run_moog()\n s.read_spectra(unlock=False)\n wav_exclude, flux_exclude = s.wav, s.flux\n linelist_target = linelist_all.loc[line_index:line_index].reset_index(\n drop=True)\n line_wavlength = linelist_target.loc[0, 'wavelength']\n line_loggf = linelist_target.loc[0, 'loggf']\n line_EP = linelist_target.loc[0, 'EP']\n if abun_change is not None:\n s.prepare_file(abun_change=abun_change)\n else:\n s.prepare_file()\n save_linelist(linelist_target, s.rundir_path + 'line.list')\n s.run_moog()\n s.read_spectra()\n wav_target, flux_target = s.wav, s.flux\n EW = (np.sum(1 - flux_all) * 0.02 - np.sum(1 - flux_exclude) * 0.02\n ) * 1000\n depth = 1 - np.min(flux_all[np.abs(wav_all - line_wavlength) <= 0.03])\n r_blend_depth = (1 - flux_exclude[np.argmin(np.abs(wav_exclude -\n line_wavlength))]) / (1 - flux_all[np.argmin(np.abs(wav_all -\n line_wavlength))])\n r_blend_depth_list.append(r_blend_depth)\n linelist_keep['r_blend_depth'] = r_blend_depth_list\n if len(line_index_keep) > 0:\n try:\n target_line_index = np.abs(linelist_keep.loc[linelist_keep[\n 'r_blend_depth'] < 0.1, 'wavelength'] - line_wav_input\n ).sort_values().index[0]\n target_line = linelist_keep.loc[target_line_index:target_line_index\n ].reset_index(drop=True)\n except IndexError:\n target_line = pd.DataFrame(np.array([np.nan] * 8)).T\n target_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6',\n 'D0', 'EW', 'r_blend_depth']\n else:\n target_line = pd.DataFrame(np.array([np.nan] * 8)).T\n target_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6',\n 'D0', 'EW', 'r_blend_depth']\n if linelist_serach:\n return target_line, linelist_keep\n else:\n return target_line\n",
"<import token>\nMOOG_path = '{}/.pymoog/moog_nosm/moog_nosm_NOV2019/'.format(os.environ['HOME']\n )\nMOOG_run_path = '{}/.pymoog/rundir/'.format(os.environ['HOME'])\nMOOG_file_path = '{}/.pymoog/files/'.format(os.environ['HOME'])\n\n\ndef save_linelist(linelist_all, sub_ll_name, wav_start=None, wav_end=None,\n header=None, negative=False):\n \"\"\"\n Save the linelist in MOOG format into specified position.\n \n Parameters\n ----------\n linelist_all : pandas.Dataframe\n The Dataframe of linelist in MOOG format\n sub_ll_name : str\n The name of the line list to be saved into.\n wav_start : float\n Start wavelength of the line list.\n end_start : float\n End wavelength of the line list.\n type : str, = 'vald'\n Type of the line list. Now only 'vald' is supported.\n negative : bool\n Switch to permit negative wavelength. \n \"\"\"\n if not negative:\n index = linelist_all['wavelength'] > 0\n else:\n index = np.abs(linelist_all['wavelength']) >= 0\n if wav_start != None:\n index = index & (linelist_all['wavelength'] > wav_start)\n if wav_end != None:\n index = index & (linelist_all['wavelength'] < wav_end)\n sub_linelist = linelist_all[index]\n sub_linelist.reset_index(drop=True, inplace=True)\n if len(sub_linelist) == 0:\n raise ValueError(\n 'The length of line list is 0. Consider enalrge the wavelength or check the input line list.'\n )\n if np.any(abs(sub_linelist['C6'].values) > 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.3f%10.3f%10.3f'\n elif np.any(abs(sub_linelist['C6'].values) < 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.2E%10.3f%10.3f'\n if len(sub_linelist.columns) == 6:\n output_format = output_format[:-6]\n np.savetxt(sub_ll_name, np.array(sub_linelist), fmt=output_format)\n run_status = private.subprocess.run(['sed', '-i', 's/nan/ /g',\n sub_ll_name])\n if header == None:\n header = 'Linelist'\n run_status = private.subprocess.run(['sed', '-i', '1 i\\\\{}'.format(\n header), sub_ll_name])\n\n\ndef read_linelist(linelist_name, loggf_cut=None, mode='ascii'):\n \"\"\"\n Read the post-processed linelist.\n \n Parameters\n ----------\n linelist_name : str\n The MOOG format line list\n loggf_cut : float, optional\n Cut on loggf (only save for the lines with loggf > loggf_cut)\n mode : str, default 'ascii'\n Reading mode for reading line-list. The efficiency of 'npy' mode is much higher than 'ascii' mode.\n \"\"\"\n available_line_list = ['ges', 'ges_hfs_iso', 'ges_nohfs_noiso',\n 'vald_3000_24000', 'vald_winered', 'mb99_j', 'mb99_k', 'apogee',\n 'kurucz', 'kurucz_winered']\n if linelist_name[-5:] != '.list' and linelist_name in available_line_list:\n if linelist_name == 'ges':\n linelist_name = 'ges_hfs_iso'\n if mode == 'npy':\n linelist_name = MOOG_file_path + 'linelist/{}/{}.npy'.format(\n linelist_name.split('_')[0], linelist_name)\n elif mode == 'ascii':\n linelist_name = MOOG_file_path + 'linelist/{}/{}.list'.format(\n linelist_name.split('_')[0], linelist_name)\n else:\n raise ValueError('mode must be \"npy\" or \"ascii\".')\n elif linelist_name[-5:] == '.list':\n pass\n else:\n raise ValueError(\n \"\"\"Built in line list type not recognized. Please use one of the following:\n 'ges', 'ges_hfs_iso', 'ges_nohfs_noiso', 'vald_3000_24000', 'vald_winered', 'mb99_j', 'mb99_k', 'kurucz', 'kurucz_winered' or 'apogee'.\"\"\"\n )\n if mode == 'npy':\n linelist_array = np.load(linelist_name, allow_pickle=True)\n linelist = pd.DataFrame(linelist_array, columns=['wavelength', 'id',\n 'EP', 'loggf', 'C6', 'D0', 'EW'])\n elif mode == 'ascii':\n linelist = pd.read_fwf(linelist_name, colspecs=[(0, 11), (11, 21),\n (21, 31), (31, 41), (41, 51), (51, 61), (61, 71)], names=[\n 'wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW'], skiprows=1)\n linelist = linelist[linelist['EP'] <= 50]\n if loggf_cut != None:\n linelist = linelist[linelist['loggf'] >= loggf_cut]\n linelist.reset_index(drop=True, inplace=True)\n return linelist\n\n\ndef find_lines(linelist_keep, linelist_all):\n line_index_keep = []\n for i in linelist_keep.index:\n indice = np.abs(linelist_all['wavelength'] - linelist_keep.loc[i,\n 'wavelength']) < 0.001\n for col in ['id', 'EP', 'loggf']:\n indice = indice & (np.abs(linelist_all[col] - linelist_keep.loc\n [i, col]) < 0.001)\n if len(linelist_all[indice]) == 0:\n raise ValueError('No match line found.')\n line_index_keep.append(linelist_all[indice].index.values[0])\n return line_index_keep\n\n\ndef find_single_dominant_line(line_wav_input, teff, logg, fe_h, resolution,\n r_d_blend_thre=0.1, line_list='ges', weedout_switch=False,\n search_half_width=0.5, linelist_serach=False, abun_change=None):\n linelist_all = read_linelist(line_list)\n linelist_all = linelist_all[np.abs(linelist_all['wavelength'] -\n line_wav_input) < search_half_width]\n s = synth.synth(teff, logg, fe_h, line_wav_input - search_half_width - \n 1, line_wav_input + search_half_width + 1, resolution, line_list=\n line_list)\n s.prepare_file(abun_change=abun_change)\n s.run_moog()\n s.read_spectra(unlock=False)\n wav_all, flux_all = s.wav, s.flux\n if weedout_switch != False:\n w = weedout.weedout(teff, logg, fe_h, line_wav_input -\n search_half_width, line_wav_input + search_half_width,\n line_list=line_list)\n w.prepare_file()\n w.run_moog()\n if weedout_switch:\n linelist_keep = read_linelist(w.rundir_path + 'keep.list')\n else:\n linelist_keep = linelist_all\n s.unlock()\n if weedout_switch != False:\n w.unlock()\n line_index_keep = find_lines(linelist_keep, linelist_all)\n r_blend_depth_list = []\n for line_index in line_index_keep:\n s = synth.synth(teff, logg, fe_h, line_wav_input -\n search_half_width - 1, line_wav_input + search_half_width + 1,\n resolution, line_list='ges')\n s.prepare_file(abun_change=abun_change)\n linelist_exclude = linelist_all.drop(line_index).reset_index(drop=True)\n save_linelist(linelist_exclude, s.rundir_path + 'line.list')\n s.run_moog()\n s.read_spectra(unlock=False)\n wav_exclude, flux_exclude = s.wav, s.flux\n linelist_target = linelist_all.loc[line_index:line_index].reset_index(\n drop=True)\n line_wavlength = linelist_target.loc[0, 'wavelength']\n line_loggf = linelist_target.loc[0, 'loggf']\n line_EP = linelist_target.loc[0, 'EP']\n if abun_change is not None:\n s.prepare_file(abun_change=abun_change)\n else:\n s.prepare_file()\n save_linelist(linelist_target, s.rundir_path + 'line.list')\n s.run_moog()\n s.read_spectra()\n wav_target, flux_target = s.wav, s.flux\n EW = (np.sum(1 - flux_all) * 0.02 - np.sum(1 - flux_exclude) * 0.02\n ) * 1000\n depth = 1 - np.min(flux_all[np.abs(wav_all - line_wavlength) <= 0.03])\n r_blend_depth = (1 - flux_exclude[np.argmin(np.abs(wav_exclude -\n line_wavlength))]) / (1 - flux_all[np.argmin(np.abs(wav_all -\n line_wavlength))])\n r_blend_depth_list.append(r_blend_depth)\n linelist_keep['r_blend_depth'] = r_blend_depth_list\n if len(line_index_keep) > 0:\n try:\n target_line_index = np.abs(linelist_keep.loc[linelist_keep[\n 'r_blend_depth'] < 0.1, 'wavelength'] - line_wav_input\n ).sort_values().index[0]\n target_line = linelist_keep.loc[target_line_index:target_line_index\n ].reset_index(drop=True)\n except IndexError:\n target_line = pd.DataFrame(np.array([np.nan] * 8)).T\n target_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6',\n 'D0', 'EW', 'r_blend_depth']\n else:\n target_line = pd.DataFrame(np.array([np.nan] * 8)).T\n target_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6',\n 'D0', 'EW', 'r_blend_depth']\n if linelist_serach:\n return target_line, linelist_keep\n else:\n return target_line\n",
"<import token>\n<assignment token>\n\n\ndef save_linelist(linelist_all, sub_ll_name, wav_start=None, wav_end=None,\n header=None, negative=False):\n \"\"\"\n Save the linelist in MOOG format into specified position.\n \n Parameters\n ----------\n linelist_all : pandas.Dataframe\n The Dataframe of linelist in MOOG format\n sub_ll_name : str\n The name of the line list to be saved into.\n wav_start : float\n Start wavelength of the line list.\n end_start : float\n End wavelength of the line list.\n type : str, = 'vald'\n Type of the line list. Now only 'vald' is supported.\n negative : bool\n Switch to permit negative wavelength. \n \"\"\"\n if not negative:\n index = linelist_all['wavelength'] > 0\n else:\n index = np.abs(linelist_all['wavelength']) >= 0\n if wav_start != None:\n index = index & (linelist_all['wavelength'] > wav_start)\n if wav_end != None:\n index = index & (linelist_all['wavelength'] < wav_end)\n sub_linelist = linelist_all[index]\n sub_linelist.reset_index(drop=True, inplace=True)\n if len(sub_linelist) == 0:\n raise ValueError(\n 'The length of line list is 0. Consider enalrge the wavelength or check the input line list.'\n )\n if np.any(abs(sub_linelist['C6'].values) > 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.3f%10.3f%10.3f'\n elif np.any(abs(sub_linelist['C6'].values) < 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.2E%10.3f%10.3f'\n if len(sub_linelist.columns) == 6:\n output_format = output_format[:-6]\n np.savetxt(sub_ll_name, np.array(sub_linelist), fmt=output_format)\n run_status = private.subprocess.run(['sed', '-i', 's/nan/ /g',\n sub_ll_name])\n if header == None:\n header = 'Linelist'\n run_status = private.subprocess.run(['sed', '-i', '1 i\\\\{}'.format(\n header), sub_ll_name])\n\n\ndef read_linelist(linelist_name, loggf_cut=None, mode='ascii'):\n \"\"\"\n Read the post-processed linelist.\n \n Parameters\n ----------\n linelist_name : str\n The MOOG format line list\n loggf_cut : float, optional\n Cut on loggf (only save for the lines with loggf > loggf_cut)\n mode : str, default 'ascii'\n Reading mode for reading line-list. The efficiency of 'npy' mode is much higher than 'ascii' mode.\n \"\"\"\n available_line_list = ['ges', 'ges_hfs_iso', 'ges_nohfs_noiso',\n 'vald_3000_24000', 'vald_winered', 'mb99_j', 'mb99_k', 'apogee',\n 'kurucz', 'kurucz_winered']\n if linelist_name[-5:] != '.list' and linelist_name in available_line_list:\n if linelist_name == 'ges':\n linelist_name = 'ges_hfs_iso'\n if mode == 'npy':\n linelist_name = MOOG_file_path + 'linelist/{}/{}.npy'.format(\n linelist_name.split('_')[0], linelist_name)\n elif mode == 'ascii':\n linelist_name = MOOG_file_path + 'linelist/{}/{}.list'.format(\n linelist_name.split('_')[0], linelist_name)\n else:\n raise ValueError('mode must be \"npy\" or \"ascii\".')\n elif linelist_name[-5:] == '.list':\n pass\n else:\n raise ValueError(\n \"\"\"Built in line list type not recognized. Please use one of the following:\n 'ges', 'ges_hfs_iso', 'ges_nohfs_noiso', 'vald_3000_24000', 'vald_winered', 'mb99_j', 'mb99_k', 'kurucz', 'kurucz_winered' or 'apogee'.\"\"\"\n )\n if mode == 'npy':\n linelist_array = np.load(linelist_name, allow_pickle=True)\n linelist = pd.DataFrame(linelist_array, columns=['wavelength', 'id',\n 'EP', 'loggf', 'C6', 'D0', 'EW'])\n elif mode == 'ascii':\n linelist = pd.read_fwf(linelist_name, colspecs=[(0, 11), (11, 21),\n (21, 31), (31, 41), (41, 51), (51, 61), (61, 71)], names=[\n 'wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW'], skiprows=1)\n linelist = linelist[linelist['EP'] <= 50]\n if loggf_cut != None:\n linelist = linelist[linelist['loggf'] >= loggf_cut]\n linelist.reset_index(drop=True, inplace=True)\n return linelist\n\n\ndef find_lines(linelist_keep, linelist_all):\n line_index_keep = []\n for i in linelist_keep.index:\n indice = np.abs(linelist_all['wavelength'] - linelist_keep.loc[i,\n 'wavelength']) < 0.001\n for col in ['id', 'EP', 'loggf']:\n indice = indice & (np.abs(linelist_all[col] - linelist_keep.loc\n [i, col]) < 0.001)\n if len(linelist_all[indice]) == 0:\n raise ValueError('No match line found.')\n line_index_keep.append(linelist_all[indice].index.values[0])\n return line_index_keep\n\n\ndef find_single_dominant_line(line_wav_input, teff, logg, fe_h, resolution,\n r_d_blend_thre=0.1, line_list='ges', weedout_switch=False,\n search_half_width=0.5, linelist_serach=False, abun_change=None):\n linelist_all = read_linelist(line_list)\n linelist_all = linelist_all[np.abs(linelist_all['wavelength'] -\n line_wav_input) < search_half_width]\n s = synth.synth(teff, logg, fe_h, line_wav_input - search_half_width - \n 1, line_wav_input + search_half_width + 1, resolution, line_list=\n line_list)\n s.prepare_file(abun_change=abun_change)\n s.run_moog()\n s.read_spectra(unlock=False)\n wav_all, flux_all = s.wav, s.flux\n if weedout_switch != False:\n w = weedout.weedout(teff, logg, fe_h, line_wav_input -\n search_half_width, line_wav_input + search_half_width,\n line_list=line_list)\n w.prepare_file()\n w.run_moog()\n if weedout_switch:\n linelist_keep = read_linelist(w.rundir_path + 'keep.list')\n else:\n linelist_keep = linelist_all\n s.unlock()\n if weedout_switch != False:\n w.unlock()\n line_index_keep = find_lines(linelist_keep, linelist_all)\n r_blend_depth_list = []\n for line_index in line_index_keep:\n s = synth.synth(teff, logg, fe_h, line_wav_input -\n search_half_width - 1, line_wav_input + search_half_width + 1,\n resolution, line_list='ges')\n s.prepare_file(abun_change=abun_change)\n linelist_exclude = linelist_all.drop(line_index).reset_index(drop=True)\n save_linelist(linelist_exclude, s.rundir_path + 'line.list')\n s.run_moog()\n s.read_spectra(unlock=False)\n wav_exclude, flux_exclude = s.wav, s.flux\n linelist_target = linelist_all.loc[line_index:line_index].reset_index(\n drop=True)\n line_wavlength = linelist_target.loc[0, 'wavelength']\n line_loggf = linelist_target.loc[0, 'loggf']\n line_EP = linelist_target.loc[0, 'EP']\n if abun_change is not None:\n s.prepare_file(abun_change=abun_change)\n else:\n s.prepare_file()\n save_linelist(linelist_target, s.rundir_path + 'line.list')\n s.run_moog()\n s.read_spectra()\n wav_target, flux_target = s.wav, s.flux\n EW = (np.sum(1 - flux_all) * 0.02 - np.sum(1 - flux_exclude) * 0.02\n ) * 1000\n depth = 1 - np.min(flux_all[np.abs(wav_all - line_wavlength) <= 0.03])\n r_blend_depth = (1 - flux_exclude[np.argmin(np.abs(wav_exclude -\n line_wavlength))]) / (1 - flux_all[np.argmin(np.abs(wav_all -\n line_wavlength))])\n r_blend_depth_list.append(r_blend_depth)\n linelist_keep['r_blend_depth'] = r_blend_depth_list\n if len(line_index_keep) > 0:\n try:\n target_line_index = np.abs(linelist_keep.loc[linelist_keep[\n 'r_blend_depth'] < 0.1, 'wavelength'] - line_wav_input\n ).sort_values().index[0]\n target_line = linelist_keep.loc[target_line_index:target_line_index\n ].reset_index(drop=True)\n except IndexError:\n target_line = pd.DataFrame(np.array([np.nan] * 8)).T\n target_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6',\n 'D0', 'EW', 'r_blend_depth']\n else:\n target_line = pd.DataFrame(np.array([np.nan] * 8)).T\n target_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6',\n 'D0', 'EW', 'r_blend_depth']\n if linelist_serach:\n return target_line, linelist_keep\n else:\n return target_line\n",
"<import token>\n<assignment token>\n\n\ndef save_linelist(linelist_all, sub_ll_name, wav_start=None, wav_end=None,\n header=None, negative=False):\n \"\"\"\n Save the linelist in MOOG format into specified position.\n \n Parameters\n ----------\n linelist_all : pandas.Dataframe\n The Dataframe of linelist in MOOG format\n sub_ll_name : str\n The name of the line list to be saved into.\n wav_start : float\n Start wavelength of the line list.\n end_start : float\n End wavelength of the line list.\n type : str, = 'vald'\n Type of the line list. Now only 'vald' is supported.\n negative : bool\n Switch to permit negative wavelength. \n \"\"\"\n if not negative:\n index = linelist_all['wavelength'] > 0\n else:\n index = np.abs(linelist_all['wavelength']) >= 0\n if wav_start != None:\n index = index & (linelist_all['wavelength'] > wav_start)\n if wav_end != None:\n index = index & (linelist_all['wavelength'] < wav_end)\n sub_linelist = linelist_all[index]\n sub_linelist.reset_index(drop=True, inplace=True)\n if len(sub_linelist) == 0:\n raise ValueError(\n 'The length of line list is 0. Consider enalrge the wavelength or check the input line list.'\n )\n if np.any(abs(sub_linelist['C6'].values) > 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.3f%10.3f%10.3f'\n elif np.any(abs(sub_linelist['C6'].values) < 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.2E%10.3f%10.3f'\n if len(sub_linelist.columns) == 6:\n output_format = output_format[:-6]\n np.savetxt(sub_ll_name, np.array(sub_linelist), fmt=output_format)\n run_status = private.subprocess.run(['sed', '-i', 's/nan/ /g',\n sub_ll_name])\n if header == None:\n header = 'Linelist'\n run_status = private.subprocess.run(['sed', '-i', '1 i\\\\{}'.format(\n header), sub_ll_name])\n\n\ndef read_linelist(linelist_name, loggf_cut=None, mode='ascii'):\n \"\"\"\n Read the post-processed linelist.\n \n Parameters\n ----------\n linelist_name : str\n The MOOG format line list\n loggf_cut : float, optional\n Cut on loggf (only save for the lines with loggf > loggf_cut)\n mode : str, default 'ascii'\n Reading mode for reading line-list. The efficiency of 'npy' mode is much higher than 'ascii' mode.\n \"\"\"\n available_line_list = ['ges', 'ges_hfs_iso', 'ges_nohfs_noiso',\n 'vald_3000_24000', 'vald_winered', 'mb99_j', 'mb99_k', 'apogee',\n 'kurucz', 'kurucz_winered']\n if linelist_name[-5:] != '.list' and linelist_name in available_line_list:\n if linelist_name == 'ges':\n linelist_name = 'ges_hfs_iso'\n if mode == 'npy':\n linelist_name = MOOG_file_path + 'linelist/{}/{}.npy'.format(\n linelist_name.split('_')[0], linelist_name)\n elif mode == 'ascii':\n linelist_name = MOOG_file_path + 'linelist/{}/{}.list'.format(\n linelist_name.split('_')[0], linelist_name)\n else:\n raise ValueError('mode must be \"npy\" or \"ascii\".')\n elif linelist_name[-5:] == '.list':\n pass\n else:\n raise ValueError(\n \"\"\"Built in line list type not recognized. Please use one of the following:\n 'ges', 'ges_hfs_iso', 'ges_nohfs_noiso', 'vald_3000_24000', 'vald_winered', 'mb99_j', 'mb99_k', 'kurucz', 'kurucz_winered' or 'apogee'.\"\"\"\n )\n if mode == 'npy':\n linelist_array = np.load(linelist_name, allow_pickle=True)\n linelist = pd.DataFrame(linelist_array, columns=['wavelength', 'id',\n 'EP', 'loggf', 'C6', 'D0', 'EW'])\n elif mode == 'ascii':\n linelist = pd.read_fwf(linelist_name, colspecs=[(0, 11), (11, 21),\n (21, 31), (31, 41), (41, 51), (51, 61), (61, 71)], names=[\n 'wavelength', 'id', 'EP', 'loggf', 'C6', 'D0', 'EW'], skiprows=1)\n linelist = linelist[linelist['EP'] <= 50]\n if loggf_cut != None:\n linelist = linelist[linelist['loggf'] >= loggf_cut]\n linelist.reset_index(drop=True, inplace=True)\n return linelist\n\n\n<function token>\n\n\ndef find_single_dominant_line(line_wav_input, teff, logg, fe_h, resolution,\n r_d_blend_thre=0.1, line_list='ges', weedout_switch=False,\n search_half_width=0.5, linelist_serach=False, abun_change=None):\n linelist_all = read_linelist(line_list)\n linelist_all = linelist_all[np.abs(linelist_all['wavelength'] -\n line_wav_input) < search_half_width]\n s = synth.synth(teff, logg, fe_h, line_wav_input - search_half_width - \n 1, line_wav_input + search_half_width + 1, resolution, line_list=\n line_list)\n s.prepare_file(abun_change=abun_change)\n s.run_moog()\n s.read_spectra(unlock=False)\n wav_all, flux_all = s.wav, s.flux\n if weedout_switch != False:\n w = weedout.weedout(teff, logg, fe_h, line_wav_input -\n search_half_width, line_wav_input + search_half_width,\n line_list=line_list)\n w.prepare_file()\n w.run_moog()\n if weedout_switch:\n linelist_keep = read_linelist(w.rundir_path + 'keep.list')\n else:\n linelist_keep = linelist_all\n s.unlock()\n if weedout_switch != False:\n w.unlock()\n line_index_keep = find_lines(linelist_keep, linelist_all)\n r_blend_depth_list = []\n for line_index in line_index_keep:\n s = synth.synth(teff, logg, fe_h, line_wav_input -\n search_half_width - 1, line_wav_input + search_half_width + 1,\n resolution, line_list='ges')\n s.prepare_file(abun_change=abun_change)\n linelist_exclude = linelist_all.drop(line_index).reset_index(drop=True)\n save_linelist(linelist_exclude, s.rundir_path + 'line.list')\n s.run_moog()\n s.read_spectra(unlock=False)\n wav_exclude, flux_exclude = s.wav, s.flux\n linelist_target = linelist_all.loc[line_index:line_index].reset_index(\n drop=True)\n line_wavlength = linelist_target.loc[0, 'wavelength']\n line_loggf = linelist_target.loc[0, 'loggf']\n line_EP = linelist_target.loc[0, 'EP']\n if abun_change is not None:\n s.prepare_file(abun_change=abun_change)\n else:\n s.prepare_file()\n save_linelist(linelist_target, s.rundir_path + 'line.list')\n s.run_moog()\n s.read_spectra()\n wav_target, flux_target = s.wav, s.flux\n EW = (np.sum(1 - flux_all) * 0.02 - np.sum(1 - flux_exclude) * 0.02\n ) * 1000\n depth = 1 - np.min(flux_all[np.abs(wav_all - line_wavlength) <= 0.03])\n r_blend_depth = (1 - flux_exclude[np.argmin(np.abs(wav_exclude -\n line_wavlength))]) / (1 - flux_all[np.argmin(np.abs(wav_all -\n line_wavlength))])\n r_blend_depth_list.append(r_blend_depth)\n linelist_keep['r_blend_depth'] = r_blend_depth_list\n if len(line_index_keep) > 0:\n try:\n target_line_index = np.abs(linelist_keep.loc[linelist_keep[\n 'r_blend_depth'] < 0.1, 'wavelength'] - line_wav_input\n ).sort_values().index[0]\n target_line = linelist_keep.loc[target_line_index:target_line_index\n ].reset_index(drop=True)\n except IndexError:\n target_line = pd.DataFrame(np.array([np.nan] * 8)).T\n target_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6',\n 'D0', 'EW', 'r_blend_depth']\n else:\n target_line = pd.DataFrame(np.array([np.nan] * 8)).T\n target_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6',\n 'D0', 'EW', 'r_blend_depth']\n if linelist_serach:\n return target_line, linelist_keep\n else:\n return target_line\n",
"<import token>\n<assignment token>\n\n\ndef save_linelist(linelist_all, sub_ll_name, wav_start=None, wav_end=None,\n header=None, negative=False):\n \"\"\"\n Save the linelist in MOOG format into specified position.\n \n Parameters\n ----------\n linelist_all : pandas.Dataframe\n The Dataframe of linelist in MOOG format\n sub_ll_name : str\n The name of the line list to be saved into.\n wav_start : float\n Start wavelength of the line list.\n end_start : float\n End wavelength of the line list.\n type : str, = 'vald'\n Type of the line list. Now only 'vald' is supported.\n negative : bool\n Switch to permit negative wavelength. \n \"\"\"\n if not negative:\n index = linelist_all['wavelength'] > 0\n else:\n index = np.abs(linelist_all['wavelength']) >= 0\n if wav_start != None:\n index = index & (linelist_all['wavelength'] > wav_start)\n if wav_end != None:\n index = index & (linelist_all['wavelength'] < wav_end)\n sub_linelist = linelist_all[index]\n sub_linelist.reset_index(drop=True, inplace=True)\n if len(sub_linelist) == 0:\n raise ValueError(\n 'The length of line list is 0. Consider enalrge the wavelength or check the input line list.'\n )\n if np.any(abs(sub_linelist['C6'].values) > 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.3f%10.3f%10.3f'\n elif np.any(abs(sub_linelist['C6'].values) < 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.2E%10.3f%10.3f'\n if len(sub_linelist.columns) == 6:\n output_format = output_format[:-6]\n np.savetxt(sub_ll_name, np.array(sub_linelist), fmt=output_format)\n run_status = private.subprocess.run(['sed', '-i', 's/nan/ /g',\n sub_ll_name])\n if header == None:\n header = 'Linelist'\n run_status = private.subprocess.run(['sed', '-i', '1 i\\\\{}'.format(\n header), sub_ll_name])\n\n\n<function token>\n<function token>\n\n\ndef find_single_dominant_line(line_wav_input, teff, logg, fe_h, resolution,\n r_d_blend_thre=0.1, line_list='ges', weedout_switch=False,\n search_half_width=0.5, linelist_serach=False, abun_change=None):\n linelist_all = read_linelist(line_list)\n linelist_all = linelist_all[np.abs(linelist_all['wavelength'] -\n line_wav_input) < search_half_width]\n s = synth.synth(teff, logg, fe_h, line_wav_input - search_half_width - \n 1, line_wav_input + search_half_width + 1, resolution, line_list=\n line_list)\n s.prepare_file(abun_change=abun_change)\n s.run_moog()\n s.read_spectra(unlock=False)\n wav_all, flux_all = s.wav, s.flux\n if weedout_switch != False:\n w = weedout.weedout(teff, logg, fe_h, line_wav_input -\n search_half_width, line_wav_input + search_half_width,\n line_list=line_list)\n w.prepare_file()\n w.run_moog()\n if weedout_switch:\n linelist_keep = read_linelist(w.rundir_path + 'keep.list')\n else:\n linelist_keep = linelist_all\n s.unlock()\n if weedout_switch != False:\n w.unlock()\n line_index_keep = find_lines(linelist_keep, linelist_all)\n r_blend_depth_list = []\n for line_index in line_index_keep:\n s = synth.synth(teff, logg, fe_h, line_wav_input -\n search_half_width - 1, line_wav_input + search_half_width + 1,\n resolution, line_list='ges')\n s.prepare_file(abun_change=abun_change)\n linelist_exclude = linelist_all.drop(line_index).reset_index(drop=True)\n save_linelist(linelist_exclude, s.rundir_path + 'line.list')\n s.run_moog()\n s.read_spectra(unlock=False)\n wav_exclude, flux_exclude = s.wav, s.flux\n linelist_target = linelist_all.loc[line_index:line_index].reset_index(\n drop=True)\n line_wavlength = linelist_target.loc[0, 'wavelength']\n line_loggf = linelist_target.loc[0, 'loggf']\n line_EP = linelist_target.loc[0, 'EP']\n if abun_change is not None:\n s.prepare_file(abun_change=abun_change)\n else:\n s.prepare_file()\n save_linelist(linelist_target, s.rundir_path + 'line.list')\n s.run_moog()\n s.read_spectra()\n wav_target, flux_target = s.wav, s.flux\n EW = (np.sum(1 - flux_all) * 0.02 - np.sum(1 - flux_exclude) * 0.02\n ) * 1000\n depth = 1 - np.min(flux_all[np.abs(wav_all - line_wavlength) <= 0.03])\n r_blend_depth = (1 - flux_exclude[np.argmin(np.abs(wav_exclude -\n line_wavlength))]) / (1 - flux_all[np.argmin(np.abs(wav_all -\n line_wavlength))])\n r_blend_depth_list.append(r_blend_depth)\n linelist_keep['r_blend_depth'] = r_blend_depth_list\n if len(line_index_keep) > 0:\n try:\n target_line_index = np.abs(linelist_keep.loc[linelist_keep[\n 'r_blend_depth'] < 0.1, 'wavelength'] - line_wav_input\n ).sort_values().index[0]\n target_line = linelist_keep.loc[target_line_index:target_line_index\n ].reset_index(drop=True)\n except IndexError:\n target_line = pd.DataFrame(np.array([np.nan] * 8)).T\n target_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6',\n 'D0', 'EW', 'r_blend_depth']\n else:\n target_line = pd.DataFrame(np.array([np.nan] * 8)).T\n target_line.columns = ['wavelength', 'id', 'EP', 'loggf', 'C6',\n 'D0', 'EW', 'r_blend_depth']\n if linelist_serach:\n return target_line, linelist_keep\n else:\n return target_line\n",
"<import token>\n<assignment token>\n\n\ndef save_linelist(linelist_all, sub_ll_name, wav_start=None, wav_end=None,\n header=None, negative=False):\n \"\"\"\n Save the linelist in MOOG format into specified position.\n \n Parameters\n ----------\n linelist_all : pandas.Dataframe\n The Dataframe of linelist in MOOG format\n sub_ll_name : str\n The name of the line list to be saved into.\n wav_start : float\n Start wavelength of the line list.\n end_start : float\n End wavelength of the line list.\n type : str, = 'vald'\n Type of the line list. Now only 'vald' is supported.\n negative : bool\n Switch to permit negative wavelength. \n \"\"\"\n if not negative:\n index = linelist_all['wavelength'] > 0\n else:\n index = np.abs(linelist_all['wavelength']) >= 0\n if wav_start != None:\n index = index & (linelist_all['wavelength'] > wav_start)\n if wav_end != None:\n index = index & (linelist_all['wavelength'] < wav_end)\n sub_linelist = linelist_all[index]\n sub_linelist.reset_index(drop=True, inplace=True)\n if len(sub_linelist) == 0:\n raise ValueError(\n 'The length of line list is 0. Consider enalrge the wavelength or check the input line list.'\n )\n if np.any(abs(sub_linelist['C6'].values) > 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.3f%10.3f%10.3f'\n elif np.any(abs(sub_linelist['C6'].values) < 1e-25):\n output_format = '%10.3f%10.5f%10.4f%10.3f%10.2E%10.3f%10.3f'\n if len(sub_linelist.columns) == 6:\n output_format = output_format[:-6]\n np.savetxt(sub_ll_name, np.array(sub_linelist), fmt=output_format)\n run_status = private.subprocess.run(['sed', '-i', 's/nan/ /g',\n sub_ll_name])\n if header == None:\n header = 'Linelist'\n run_status = private.subprocess.run(['sed', '-i', '1 i\\\\{}'.format(\n header), sub_ll_name])\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,836 |
4b26f9bab0eba64c747318d42a0a45b01f44a2ea
|
IMAGE_SIZE = 400
RADII_CHANGE = 0.13
PRESSURE_CHANGE = 11.11
FUEL_EFF_DROP = 0.2
IBM_URL = "https://us-south.ml.cloud.ibm.com"
IBM_CONNECT_API_KEY = "5qs02cWZrWcw8JaCk04Fg2CK7s2TItb-d64sHuCo5NAg"
ML_DEPLOYMENT = {'entity': {'asset': {'id': 'af323e8b-8fc1-4d5d-9687-659648ea835a'},
'custom': {},
'deployed_asset_type': 'model',
'hardware_spec': {'id': 'Not_Applicable', 'name': 'S', 'num_nodes': 1},
'name': 'RandomForestSklearn Deployment',
'online': {},
'space_id': '221febe8-1eee-4e50-92b3-19d97d5770e8',
'status': {'online_url': {
'url': 'https://us-south.ml.cloud.ibm.com/ml/v4/deployments/73f1fc9a-dc68-473c-8bab-14e2c4807638/predictions'},
'state': 'ready'}
},
'metadata': {'created_at': '2021-06-18T07:41:38.641Z',
'id': '73f1fc9a-dc68-473c-8bab-14e2c4807638',
'modified_at': '2021-06-18T07:41:38.641Z',
'name': 'RandomForestSklearn Deployment',
'owner': 'IBMid-665000NICM',
'space_id': '221febe8-1eee-4e50-92b3-19d97d5770e8'}}
|
[
"IMAGE_SIZE = 400\n\nRADII_CHANGE = 0.13\nPRESSURE_CHANGE = 11.11\nFUEL_EFF_DROP = 0.2\n\nIBM_URL = \"https://us-south.ml.cloud.ibm.com\"\nIBM_CONNECT_API_KEY = \"5qs02cWZrWcw8JaCk04Fg2CK7s2TItb-d64sHuCo5NAg\"\n\n\nML_DEPLOYMENT = {'entity': {'asset': {'id': 'af323e8b-8fc1-4d5d-9687-659648ea835a'},\n 'custom': {},\n 'deployed_asset_type': 'model',\n 'hardware_spec': {'id': 'Not_Applicable', 'name': 'S', 'num_nodes': 1},\n 'name': 'RandomForestSklearn Deployment',\n 'online': {},\n 'space_id': '221febe8-1eee-4e50-92b3-19d97d5770e8',\n 'status': {'online_url': {\n 'url': 'https://us-south.ml.cloud.ibm.com/ml/v4/deployments/73f1fc9a-dc68-473c-8bab-14e2c4807638/predictions'},\n 'state': 'ready'}\n },\n 'metadata': {'created_at': '2021-06-18T07:41:38.641Z',\n 'id': '73f1fc9a-dc68-473c-8bab-14e2c4807638',\n 'modified_at': '2021-06-18T07:41:38.641Z',\n 'name': 'RandomForestSklearn Deployment',\n 'owner': 'IBMid-665000NICM',\n 'space_id': '221febe8-1eee-4e50-92b3-19d97d5770e8'}}\n",
"IMAGE_SIZE = 400\nRADII_CHANGE = 0.13\nPRESSURE_CHANGE = 11.11\nFUEL_EFF_DROP = 0.2\nIBM_URL = 'https://us-south.ml.cloud.ibm.com'\nIBM_CONNECT_API_KEY = '5qs02cWZrWcw8JaCk04Fg2CK7s2TItb-d64sHuCo5NAg'\nML_DEPLOYMENT = {'entity': {'asset': {'id':\n 'af323e8b-8fc1-4d5d-9687-659648ea835a'}, 'custom': {},\n 'deployed_asset_type': 'model', 'hardware_spec': {'id':\n 'Not_Applicable', 'name': 'S', 'num_nodes': 1}, 'name':\n 'RandomForestSklearn Deployment', 'online': {}, 'space_id':\n '221febe8-1eee-4e50-92b3-19d97d5770e8', 'status': {'online_url': {'url':\n 'https://us-south.ml.cloud.ibm.com/ml/v4/deployments/73f1fc9a-dc68-473c-8bab-14e2c4807638/predictions'\n }, 'state': 'ready'}}, 'metadata': {'created_at':\n '2021-06-18T07:41:38.641Z', 'id':\n '73f1fc9a-dc68-473c-8bab-14e2c4807638', 'modified_at':\n '2021-06-18T07:41:38.641Z', 'name': 'RandomForestSklearn Deployment',\n 'owner': 'IBMid-665000NICM', 'space_id':\n '221febe8-1eee-4e50-92b3-19d97d5770e8'}}\n",
"<assignment token>\n"
] | false |
98,837 |
542970527a0b021ef6c85c0a9257a03024a262ed
|
import os
import re
import json
import sys
import math
import shutil
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from skimage import exposure
from PIL import Image
from lib import utils
def save_im(im, f_name=None):
fig = plt.figure()
if f_name is None:
return plt.imshow(im)
plt.imsave(f_name, im)
plt.clf()
plt.close()
def voxel(vox, color=None, f_name=None):
vox = vox.transpose(2, 0, 1)
color = color.transpose(2, 0, 1)
if color is None or len(np.unique(color)) <= 2:
color = 'red'
else:
color_map = plt.get_cmap('coolwarm')
color = color_map(color)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.voxels(vox, facecolors=color, edgecolor='k')
ax.view_init(30, 45)
if f_name is None:
return fig.show()
fig.savefig(f_name, bbox_inches='tight')
fig.clf()
plt.close()
def label(y, f_name=None):
return voxel(np.argmax(y, axis=-1), f_name=f_name)
def softmax(y_hat, f_name=None):
return voxel(np.argmax(y_hat, axis=-1), y_hat[:, :, :, 1], f_name=f_name)
def scaled(im, axis, f_name=None):
ret_im = exposure.rescale_intensity(utils.montage(im, axis))
return save_im(ret_im, f_name)
def multichannel(im, f_name=None):
mulitchannel_montage = utils.montage_multichannel(im)
return save_im(mulitchannel_montage, f_name)
def sequence(im, f_name=None):
sequence_montage = utils.montage_sequence(im)
return save_im(sequence_montage, f_name)
def create_video(im_list):
pass
|
[
"import os\nimport re\nimport json\nimport sys\nimport math\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom skimage import exposure\nfrom PIL import Image\nfrom lib import utils\n\n\ndef save_im(im, f_name=None):\n fig = plt.figure()\n if f_name is None:\n return plt.imshow(im)\n plt.imsave(f_name, im)\n plt.clf()\n plt.close()\n\n\ndef voxel(vox, color=None, f_name=None):\n vox = vox.transpose(2, 0, 1)\n color = color.transpose(2, 0, 1)\n if color is None or len(np.unique(color)) <= 2:\n color = 'red'\n else:\n color_map = plt.get_cmap('coolwarm')\n color = color_map(color)\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.voxels(vox, facecolors=color, edgecolor='k')\n ax.view_init(30, 45)\n\n if f_name is None:\n return fig.show()\n\n fig.savefig(f_name, bbox_inches='tight')\n fig.clf()\n plt.close()\n\n\ndef label(y, f_name=None):\n return voxel(np.argmax(y, axis=-1), f_name=f_name)\n\n\ndef softmax(y_hat, f_name=None):\n return voxel(np.argmax(y_hat, axis=-1), y_hat[:, :, :, 1], f_name=f_name)\n\n\ndef scaled(im, axis, f_name=None):\n ret_im = exposure.rescale_intensity(utils.montage(im, axis))\n return save_im(ret_im, f_name)\n\n\ndef multichannel(im, f_name=None):\n mulitchannel_montage = utils.montage_multichannel(im)\n return save_im(mulitchannel_montage, f_name)\n\n\ndef sequence(im, f_name=None):\n sequence_montage = utils.montage_sequence(im)\n return save_im(sequence_montage, f_name)\n\n\ndef create_video(im_list):\n pass\n",
"import os\nimport re\nimport json\nimport sys\nimport math\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom skimage import exposure\nfrom PIL import Image\nfrom lib import utils\n\n\ndef save_im(im, f_name=None):\n fig = plt.figure()\n if f_name is None:\n return plt.imshow(im)\n plt.imsave(f_name, im)\n plt.clf()\n plt.close()\n\n\ndef voxel(vox, color=None, f_name=None):\n vox = vox.transpose(2, 0, 1)\n color = color.transpose(2, 0, 1)\n if color is None or len(np.unique(color)) <= 2:\n color = 'red'\n else:\n color_map = plt.get_cmap('coolwarm')\n color = color_map(color)\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.voxels(vox, facecolors=color, edgecolor='k')\n ax.view_init(30, 45)\n if f_name is None:\n return fig.show()\n fig.savefig(f_name, bbox_inches='tight')\n fig.clf()\n plt.close()\n\n\ndef label(y, f_name=None):\n return voxel(np.argmax(y, axis=-1), f_name=f_name)\n\n\ndef softmax(y_hat, f_name=None):\n return voxel(np.argmax(y_hat, axis=-1), y_hat[:, :, :, 1], f_name=f_name)\n\n\ndef scaled(im, axis, f_name=None):\n ret_im = exposure.rescale_intensity(utils.montage(im, axis))\n return save_im(ret_im, f_name)\n\n\ndef multichannel(im, f_name=None):\n mulitchannel_montage = utils.montage_multichannel(im)\n return save_im(mulitchannel_montage, f_name)\n\n\ndef sequence(im, f_name=None):\n sequence_montage = utils.montage_sequence(im)\n return save_im(sequence_montage, f_name)\n\n\ndef create_video(im_list):\n pass\n",
"<import token>\n\n\ndef save_im(im, f_name=None):\n fig = plt.figure()\n if f_name is None:\n return plt.imshow(im)\n plt.imsave(f_name, im)\n plt.clf()\n plt.close()\n\n\ndef voxel(vox, color=None, f_name=None):\n vox = vox.transpose(2, 0, 1)\n color = color.transpose(2, 0, 1)\n if color is None or len(np.unique(color)) <= 2:\n color = 'red'\n else:\n color_map = plt.get_cmap('coolwarm')\n color = color_map(color)\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.voxels(vox, facecolors=color, edgecolor='k')\n ax.view_init(30, 45)\n if f_name is None:\n return fig.show()\n fig.savefig(f_name, bbox_inches='tight')\n fig.clf()\n plt.close()\n\n\ndef label(y, f_name=None):\n return voxel(np.argmax(y, axis=-1), f_name=f_name)\n\n\ndef softmax(y_hat, f_name=None):\n return voxel(np.argmax(y_hat, axis=-1), y_hat[:, :, :, 1], f_name=f_name)\n\n\ndef scaled(im, axis, f_name=None):\n ret_im = exposure.rescale_intensity(utils.montage(im, axis))\n return save_im(ret_im, f_name)\n\n\ndef multichannel(im, f_name=None):\n mulitchannel_montage = utils.montage_multichannel(im)\n return save_im(mulitchannel_montage, f_name)\n\n\ndef sequence(im, f_name=None):\n sequence_montage = utils.montage_sequence(im)\n return save_im(sequence_montage, f_name)\n\n\ndef create_video(im_list):\n pass\n",
"<import token>\n<function token>\n\n\ndef voxel(vox, color=None, f_name=None):\n vox = vox.transpose(2, 0, 1)\n color = color.transpose(2, 0, 1)\n if color is None or len(np.unique(color)) <= 2:\n color = 'red'\n else:\n color_map = plt.get_cmap('coolwarm')\n color = color_map(color)\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.voxels(vox, facecolors=color, edgecolor='k')\n ax.view_init(30, 45)\n if f_name is None:\n return fig.show()\n fig.savefig(f_name, bbox_inches='tight')\n fig.clf()\n plt.close()\n\n\ndef label(y, f_name=None):\n return voxel(np.argmax(y, axis=-1), f_name=f_name)\n\n\ndef softmax(y_hat, f_name=None):\n return voxel(np.argmax(y_hat, axis=-1), y_hat[:, :, :, 1], f_name=f_name)\n\n\ndef scaled(im, axis, f_name=None):\n ret_im = exposure.rescale_intensity(utils.montage(im, axis))\n return save_im(ret_im, f_name)\n\n\ndef multichannel(im, f_name=None):\n mulitchannel_montage = utils.montage_multichannel(im)\n return save_im(mulitchannel_montage, f_name)\n\n\ndef sequence(im, f_name=None):\n sequence_montage = utils.montage_sequence(im)\n return save_im(sequence_montage, f_name)\n\n\ndef create_video(im_list):\n pass\n",
"<import token>\n<function token>\n\n\ndef voxel(vox, color=None, f_name=None):\n vox = vox.transpose(2, 0, 1)\n color = color.transpose(2, 0, 1)\n if color is None or len(np.unique(color)) <= 2:\n color = 'red'\n else:\n color_map = plt.get_cmap('coolwarm')\n color = color_map(color)\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.voxels(vox, facecolors=color, edgecolor='k')\n ax.view_init(30, 45)\n if f_name is None:\n return fig.show()\n fig.savefig(f_name, bbox_inches='tight')\n fig.clf()\n plt.close()\n\n\n<function token>\n\n\ndef softmax(y_hat, f_name=None):\n return voxel(np.argmax(y_hat, axis=-1), y_hat[:, :, :, 1], f_name=f_name)\n\n\ndef scaled(im, axis, f_name=None):\n ret_im = exposure.rescale_intensity(utils.montage(im, axis))\n return save_im(ret_im, f_name)\n\n\ndef multichannel(im, f_name=None):\n mulitchannel_montage = utils.montage_multichannel(im)\n return save_im(mulitchannel_montage, f_name)\n\n\ndef sequence(im, f_name=None):\n sequence_montage = utils.montage_sequence(im)\n return save_im(sequence_montage, f_name)\n\n\ndef create_video(im_list):\n pass\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef softmax(y_hat, f_name=None):\n return voxel(np.argmax(y_hat, axis=-1), y_hat[:, :, :, 1], f_name=f_name)\n\n\ndef scaled(im, axis, f_name=None):\n ret_im = exposure.rescale_intensity(utils.montage(im, axis))\n return save_im(ret_im, f_name)\n\n\ndef multichannel(im, f_name=None):\n mulitchannel_montage = utils.montage_multichannel(im)\n return save_im(mulitchannel_montage, f_name)\n\n\ndef sequence(im, f_name=None):\n sequence_montage = utils.montage_sequence(im)\n return save_im(sequence_montage, f_name)\n\n\ndef create_video(im_list):\n pass\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef softmax(y_hat, f_name=None):\n return voxel(np.argmax(y_hat, axis=-1), y_hat[:, :, :, 1], f_name=f_name)\n\n\n<function token>\n\n\ndef multichannel(im, f_name=None):\n mulitchannel_montage = utils.montage_multichannel(im)\n return save_im(mulitchannel_montage, f_name)\n\n\ndef sequence(im, f_name=None):\n sequence_montage = utils.montage_sequence(im)\n return save_im(sequence_montage, f_name)\n\n\ndef create_video(im_list):\n pass\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef softmax(y_hat, f_name=None):\n return voxel(np.argmax(y_hat, axis=-1), y_hat[:, :, :, 1], f_name=f_name)\n\n\n<function token>\n\n\ndef multichannel(im, f_name=None):\n mulitchannel_montage = utils.montage_multichannel(im)\n return save_im(mulitchannel_montage, f_name)\n\n\ndef sequence(im, f_name=None):\n sequence_montage = utils.montage_sequence(im)\n return save_im(sequence_montage, f_name)\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef multichannel(im, f_name=None):\n mulitchannel_montage = utils.montage_multichannel(im)\n return save_im(mulitchannel_montage, f_name)\n\n\ndef sequence(im, f_name=None):\n sequence_montage = utils.montage_sequence(im)\n return save_im(sequence_montage, f_name)\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef multichannel(im, f_name=None):\n mulitchannel_montage = utils.montage_multichannel(im)\n return save_im(mulitchannel_montage, f_name)\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,838 |
6ed4ef91f1d24f30806dacec206d7fcb4a9fdba7
|
import os
import importlib
import glob
all_pages = {}
def load_pages():
my_path = os.path.dirname(os.path.abspath(__file__))
for mod_file in glob.glob(os.path.join(my_path, '*.py')):
parent_mod, mod_name = mod_file.split('/')[-2:]
mod_name, _ = os.path.splitext(mod_name)
if mod_name in ('__init__', 'base', 'hel_buildings'):
continue
mod = importlib.import_module('.'.join([parent_mod, mod_name]))
page = mod.page
if page.path:
all_pages[page.path] = page
def get_page_for_path(path):
return all_pages.get(path)
def get_page_for_emission_sector(sector1, sector2):
if not sector2:
sector2 = None
for page in all_pages.values():
if not page.emission_sector:
continue
if (sector1, sector2) == tuple(page.emission_sector):
return page
return None
|
[
"import os\nimport importlib\nimport glob\n\n\nall_pages = {}\n\n\ndef load_pages():\n my_path = os.path.dirname(os.path.abspath(__file__))\n for mod_file in glob.glob(os.path.join(my_path, '*.py')):\n parent_mod, mod_name = mod_file.split('/')[-2:]\n mod_name, _ = os.path.splitext(mod_name)\n if mod_name in ('__init__', 'base', 'hel_buildings'):\n continue\n\n mod = importlib.import_module('.'.join([parent_mod, mod_name]))\n page = mod.page\n if page.path:\n all_pages[page.path] = page\n\n\ndef get_page_for_path(path):\n return all_pages.get(path)\n\n\ndef get_page_for_emission_sector(sector1, sector2):\n if not sector2:\n sector2 = None\n for page in all_pages.values():\n if not page.emission_sector:\n continue\n if (sector1, sector2) == tuple(page.emission_sector):\n return page\n return None\n",
"import os\nimport importlib\nimport glob\nall_pages = {}\n\n\ndef load_pages():\n my_path = os.path.dirname(os.path.abspath(__file__))\n for mod_file in glob.glob(os.path.join(my_path, '*.py')):\n parent_mod, mod_name = mod_file.split('/')[-2:]\n mod_name, _ = os.path.splitext(mod_name)\n if mod_name in ('__init__', 'base', 'hel_buildings'):\n continue\n mod = importlib.import_module('.'.join([parent_mod, mod_name]))\n page = mod.page\n if page.path:\n all_pages[page.path] = page\n\n\ndef get_page_for_path(path):\n return all_pages.get(path)\n\n\ndef get_page_for_emission_sector(sector1, sector2):\n if not sector2:\n sector2 = None\n for page in all_pages.values():\n if not page.emission_sector:\n continue\n if (sector1, sector2) == tuple(page.emission_sector):\n return page\n return None\n",
"<import token>\nall_pages = {}\n\n\ndef load_pages():\n my_path = os.path.dirname(os.path.abspath(__file__))\n for mod_file in glob.glob(os.path.join(my_path, '*.py')):\n parent_mod, mod_name = mod_file.split('/')[-2:]\n mod_name, _ = os.path.splitext(mod_name)\n if mod_name in ('__init__', 'base', 'hel_buildings'):\n continue\n mod = importlib.import_module('.'.join([parent_mod, mod_name]))\n page = mod.page\n if page.path:\n all_pages[page.path] = page\n\n\ndef get_page_for_path(path):\n return all_pages.get(path)\n\n\ndef get_page_for_emission_sector(sector1, sector2):\n if not sector2:\n sector2 = None\n for page in all_pages.values():\n if not page.emission_sector:\n continue\n if (sector1, sector2) == tuple(page.emission_sector):\n return page\n return None\n",
"<import token>\n<assignment token>\n\n\ndef load_pages():\n my_path = os.path.dirname(os.path.abspath(__file__))\n for mod_file in glob.glob(os.path.join(my_path, '*.py')):\n parent_mod, mod_name = mod_file.split('/')[-2:]\n mod_name, _ = os.path.splitext(mod_name)\n if mod_name in ('__init__', 'base', 'hel_buildings'):\n continue\n mod = importlib.import_module('.'.join([parent_mod, mod_name]))\n page = mod.page\n if page.path:\n all_pages[page.path] = page\n\n\ndef get_page_for_path(path):\n return all_pages.get(path)\n\n\ndef get_page_for_emission_sector(sector1, sector2):\n if not sector2:\n sector2 = None\n for page in all_pages.values():\n if not page.emission_sector:\n continue\n if (sector1, sector2) == tuple(page.emission_sector):\n return page\n return None\n",
"<import token>\n<assignment token>\n\n\ndef load_pages():\n my_path = os.path.dirname(os.path.abspath(__file__))\n for mod_file in glob.glob(os.path.join(my_path, '*.py')):\n parent_mod, mod_name = mod_file.split('/')[-2:]\n mod_name, _ = os.path.splitext(mod_name)\n if mod_name in ('__init__', 'base', 'hel_buildings'):\n continue\n mod = importlib.import_module('.'.join([parent_mod, mod_name]))\n page = mod.page\n if page.path:\n all_pages[page.path] = page\n\n\ndef get_page_for_path(path):\n return all_pages.get(path)\n\n\n<function token>\n",
"<import token>\n<assignment token>\n\n\ndef load_pages():\n my_path = os.path.dirname(os.path.abspath(__file__))\n for mod_file in glob.glob(os.path.join(my_path, '*.py')):\n parent_mod, mod_name = mod_file.split('/')[-2:]\n mod_name, _ = os.path.splitext(mod_name)\n if mod_name in ('__init__', 'base', 'hel_buildings'):\n continue\n mod = importlib.import_module('.'.join([parent_mod, mod_name]))\n page = mod.page\n if page.path:\n all_pages[page.path] = page\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,839 |
501875b5b3964dfbace8393cd78ad78927acc493
|
for i in 5:
print i
|
[
"for i in 5:\n\tprint i\n"
] | true |
98,840 |
bbf7e64d54d873ed16525c452491fb21549047cb
|
#! /usr/bin/env python
#coding=utf-8
# there is no need to do recheck. most error are caused by time out
import urllib
import urllib2
import os
import re
import time
import sys
import math
def getProb(phraseList):
phraseListNew = list([urllib.quote(gram) for gram in phraseList])
phrasesStr = "\n".join(phraseListNew)
#probStr = urllib2.urlopen(urllib2.Request('http://web-ngram.research.microsoft.com/rest/lookup.svc/bing-body/apr10/5/jp?u=6c5bffbd-e43c-44ab-8c69-acf0439a7a6b',phrasesStr)).read()
#probStr = urllib2.urlopen(urllib2.Request('http://weblm.research.microsoft.com/weblm/rest.svc/bing-body/apr10/5/jp?u=6ad01338-a036-4184-acc5-380e9aad7fb4',phrasesStr)).read()
try:
#probStr = urllib2.urlopen(urllib2.Request('http://web-ngram.research.microsoft.com/rest/lookup.svc/bing-body/apr10/5/jp?u=6ad01338-a036-4184-acc5-380e9aad7fb4',phrasesStr)).read()
#probStr = urllib2.urlopen(urllib2.Request('http://web-ngram.research.microsoft.com/rest/lookup.svc/bing-body/apr10/5/jp?u=6c5bffbd-e43c-44ab-8c69-acf0439a7a6b',phrasesStr)).read()
probStr = urllib2.urlopen(urllib2.Request('http://weblm.research.microsoft.com/weblm/rest.svc/bing-body/apr10/5/jp?u=6ad01338-a036-4184-acc5-380e9aad7fb4',phrasesStr)).read()
# print probStr[-10:]
except:# Exception as error_detail:
#time.sleep(1)
#return getProb(phraseList)
# print "error", error_detail #sys.exc_info()[0]
return list(["-1" for i in phraseList])
probArr = probStr.strip().split("\r\n")
return probArr
def getProb_redmond(phraseList):
phraseListNew = list([urllib.quote(gram) for gram in phraseList])
phrasesStr = "\n".join(phraseListNew)
try:
probStr = urllib2.urlopen(urllib2.Request('http://weblm.research.microsoft.com/weblm/rest.svc/bing-body/apr10/5/jp?u=6ad01338-a036-4184-acc5-380e9aad7fb4',phrasesStr)).read()
except:
return list(["-1" for i in phraseList])
probArr = probStr.split("\r\n")
return probArr
def getProb_beijing(phraseList):
phraseListNew = list([urllib.quote(gram) for gram in phraseList])
phrasesStr = "\n".join(phraseListNew)
try:
probStr = urllib2.urlopen(urllib2.Request('http://msraml-s003/ngram-lm/rest.svc/bing-body/apr10/5/jp?u=6ad01338-a036-4184-acc5-380e9aad7fb4',gramStr)).read()
except:
return list(["-1" for i in phraseList])
probArr = probStr.split("\r\n")
return probArr
def getProbDebug(phraseList):
probsAll = []
for i in range(0, len(phraseList)):
probArr = getProb(phraseList[i])
if probArr is None:
probArr = ["-1"]
print "##In Debug(<10): Error: " + str(i) + "'s gram: " + str(subList)
probsAll.append(probArr[0])
return probsAll
## main
if __name__ == "__main__":
print "Program starts at time:" + str(time.asctime())
if len(sys.argv) == 2:
probFilePath = sys.argv[1]
else:
print "Usage: python ngramProb_recheck.py probFilePath (/home/yxqin/corpus/data_stock201504/segment/grams_qtwe/qngrams_01_prob)"
print "there is no need to do recheck. most error are caused by time out"
sys.exit(1)
probFile = file(probFilePath)
newProbFile = file(probFilePath + "_newprob", "w")
print "## Reading file " + probFile.name
phraseList = []
lineIdx = 0
N = 1000 # gramNum in each request
contentArr = probFile.readlines()
contentArr = [line[:-1] for line in contentArr]
probFile.close()
newContentArr = [line[:line.find(" ")].strip()+" "+line[line.find(" ")+1:] for line in contentArr]
# newContentArr = []
# for i in range(len(contentArr)/N +1):
# st = N*i
# end = N*(i+1)
# if end > len(contentArr):
# end = len(contentArr)
#
# scoreList = [item[:item.find(" ")].strip() for item in contentArr[st:end]]
# phraseList = [item[item.find(" ")+1:] for item in contentArr[st:end]]
#
# errorBatch = [1 for item in scoreList if item == "-1"]
# if sum(errorBatch) == len(scoreList):
# print "errorBatch: st, end", st, end, len(phraseList), phraseList[-5:]
# probArr = []
# for j in range(10):
# sub_phraseList = phraseList[j*N/10:(j+1)*N/10]
# sub_probArr = getProb(sub_phraseList)
# probArr.extend(sub_probArr)
#
# print "get prob done", probArr[:5]
# if len(probArr) != len(phraseList):
# print "Error! prob output number not equal phrase number"
# sys.exit(0)
# for j in range(st, end):
# newContentArr.append(probArr[j%N] + " " + phraseList[j%N])
# else:
# for idx in range(len(scoreList)):
# newContentArr.append(scoreList[idx] + " " + phraseList[idx])
# if st % 10000 == 0:
# print "**", st, "lines are processed.", len(newContentArr)
newProbFile.write("\n".join(newContentArr))
newProbFile.close()
print "## New Probs are written to file " + newProbFile.name
print "Program ends at time:" + str(time.asctime())
|
[
"#! /usr/bin/env python\n#coding=utf-8\n\n# there is no need to do recheck. most error are caused by time out\n\nimport urllib\nimport urllib2\nimport os\nimport re\nimport time\nimport sys\nimport math\n\ndef getProb(phraseList):\n phraseListNew = list([urllib.quote(gram) for gram in phraseList])\n phrasesStr = \"\\n\".join(phraseListNew) \n #probStr = urllib2.urlopen(urllib2.Request('http://web-ngram.research.microsoft.com/rest/lookup.svc/bing-body/apr10/5/jp?u=6c5bffbd-e43c-44ab-8c69-acf0439a7a6b',phrasesStr)).read()\n #probStr = urllib2.urlopen(urllib2.Request('http://weblm.research.microsoft.com/weblm/rest.svc/bing-body/apr10/5/jp?u=6ad01338-a036-4184-acc5-380e9aad7fb4',phrasesStr)).read()\n\n try:\n #probStr = urllib2.urlopen(urllib2.Request('http://web-ngram.research.microsoft.com/rest/lookup.svc/bing-body/apr10/5/jp?u=6ad01338-a036-4184-acc5-380e9aad7fb4',phrasesStr)).read()\n #probStr = urllib2.urlopen(urllib2.Request('http://web-ngram.research.microsoft.com/rest/lookup.svc/bing-body/apr10/5/jp?u=6c5bffbd-e43c-44ab-8c69-acf0439a7a6b',phrasesStr)).read()\n probStr = urllib2.urlopen(urllib2.Request('http://weblm.research.microsoft.com/weblm/rest.svc/bing-body/apr10/5/jp?u=6ad01338-a036-4184-acc5-380e9aad7fb4',phrasesStr)).read()\n# print probStr[-10:]\n except:# Exception as error_detail:\n #time.sleep(1)\n #return getProb(phraseList)\n# print \"error\", error_detail #sys.exc_info()[0]\n\n return list([\"-1\" for i in phraseList])\n\n probArr = probStr.strip().split(\"\\r\\n\")\n return probArr\n\ndef getProb_redmond(phraseList):\n phraseListNew = list([urllib.quote(gram) for gram in phraseList])\n phrasesStr = \"\\n\".join(phraseListNew) \n try:\n probStr = urllib2.urlopen(urllib2.Request('http://weblm.research.microsoft.com/weblm/rest.svc/bing-body/apr10/5/jp?u=6ad01338-a036-4184-acc5-380e9aad7fb4',phrasesStr)).read()\n except:\n return list([\"-1\" for i in phraseList])\n probArr = probStr.split(\"\\r\\n\")\n return probArr\n\ndef getProb_beijing(phraseList):\n phraseListNew = list([urllib.quote(gram) for gram in phraseList])\n phrasesStr = \"\\n\".join(phraseListNew) \n try:\n probStr = urllib2.urlopen(urllib2.Request('http://msraml-s003/ngram-lm/rest.svc/bing-body/apr10/5/jp?u=6ad01338-a036-4184-acc5-380e9aad7fb4',gramStr)).read()\n except:\n return list([\"-1\" for i in phraseList])\n probArr = probStr.split(\"\\r\\n\")\n return probArr\n\ndef getProbDebug(phraseList):\n probsAll = []\n for i in range(0, len(phraseList)):\n probArr = getProb(phraseList[i])\n if probArr is None:\n probArr = [\"-1\"]\n print \"##In Debug(<10): Error: \" + str(i) + \"'s gram: \" + str(subList)\n probsAll.append(probArr[0])\n return probsAll \n\n## main\n\nif __name__ == \"__main__\":\n print \"Program starts at time:\" + str(time.asctime())\n\n if len(sys.argv) == 2:\n probFilePath = sys.argv[1]\n else:\n print \"Usage: python ngramProb_recheck.py probFilePath (/home/yxqin/corpus/data_stock201504/segment/grams_qtwe/qngrams_01_prob)\"\n print \"there is no need to do recheck. most error are caused by time out\"\n\n sys.exit(1)\n\n probFile = file(probFilePath)\n newProbFile = file(probFilePath + \"_newprob\", \"w\")\n\n print \"## Reading file \" + probFile.name\n phraseList = []\n lineIdx = 0\n N = 1000 # gramNum in each request\n\n contentArr = probFile.readlines()\n contentArr = [line[:-1] for line in contentArr]\n probFile.close()\n\n newContentArr = [line[:line.find(\" \")].strip()+\" \"+line[line.find(\" \")+1:] for line in contentArr]\n\n# newContentArr = []\n# for i in range(len(contentArr)/N +1):\n# st = N*i\n# end = N*(i+1)\n# if end > len(contentArr):\n# end = len(contentArr)\n#\n# scoreList = [item[:item.find(\" \")].strip() for item in contentArr[st:end]]\n# phraseList = [item[item.find(\" \")+1:] for item in contentArr[st:end]]\n#\n# errorBatch = [1 for item in scoreList if item == \"-1\"]\n# if sum(errorBatch) == len(scoreList):\n# print \"errorBatch: st, end\", st, end, len(phraseList), phraseList[-5:]\n# probArr = []\n# for j in range(10):\n# sub_phraseList = phraseList[j*N/10:(j+1)*N/10]\n# sub_probArr = getProb(sub_phraseList)\n# probArr.extend(sub_probArr)\n# \n# print \"get prob done\", probArr[:5]\n# if len(probArr) != len(phraseList):\n# print \"Error! prob output number not equal phrase number\"\n# sys.exit(0)\n# for j in range(st, end):\n# newContentArr.append(probArr[j%N] + \" \" + phraseList[j%N])\n# else:\n# for idx in range(len(scoreList)):\n# newContentArr.append(scoreList[idx] + \" \" + phraseList[idx])\n# if st % 10000 == 0:\n# print \"**\", st, \"lines are processed.\", len(newContentArr) \n\n\n newProbFile.write(\"\\n\".join(newContentArr))\n newProbFile.close()\n\n print \"## New Probs are written to file \" + newProbFile.name\n print \"Program ends at time:\" + str(time.asctime())\n\n"
] | true |
98,841 |
218015c20c5ba5fc56591ec195d24ee1fbf4de6c
|
"""
Exercise 2.9
"""
import pylab as pl
import math
pl.ion()
#给定初始条件,计算轨迹
class cannon_shell:
def __init__(self, init_v = 0, init_theta = 0, time_step = 0):
self.x = [0]
self.y = [0]
self.init_theta = init_theta
self.vx = [init_v * math.cos(self.init_theta / 180 * math.pi) / 1000]
self.vy = [init_v * math.sin(self.init_theta / 180 * math.pi) / 1000]
self.dt = time_step
self.C = 0
self.g = 9.8E-3
def launch(self):
i = 0
while(True):
self.C = 4E-2 * math.pow(1 - 6.5 * self.y[i] / 288.15, 2.5)
self.x.append(self.x[i] + self.vx[i] * self.dt)
self.y.append(self.y[i] + self.vy[i] * self.dt)
self.vx.append(self.vx[i] - self.C * math.hypot(self.vx[i], self.vy[i]) * self.vx[i] * self.dt)
self.vy.append(self.vy[i] - self.g * self.dt - self.C * math.hypot(self.vx[i], self.vy[i]) * self.vy[i] * self.dt)
i += 1
if self.y[i] < 0:
break
#利用所得x轴两侧最近的两点近似计算曲线与x轴的交点
self.x[i] = -self.y[i-1] * (self.x[i] - self.x[i-1]) / (self.y[i] - self.y[i-1]) + self.x[i-1]
self.y[i] = 0
#求给定初速度下的最大落地距离及对应发射角度
class maximum_range(cannon_shell):
def find(self):
max_range = 0
temp_max = 0
init_theta = 0
print("\n--------------")
print("正在计算此初速度下最大落地距离,预计需几十秒,请耐心等待...\n")
while(True):
cannon_shell.__init__(self, user_input.init_v, init_theta, user_input.time_step)
cannon_shell.launch(self)
temp_max = self.x[-1]
if (max_range <= temp_max):
max_range = temp_max
init_theta += 0.1
else:
init_theta -= 0.1
break
print("初速度:", user_input.init_v, "m/s")
print("计算间隔:", user_input.time_step, "s")
print("在此初速度下最大落地距离为: %.4f km"%max_range)
print("最大落地距离对应的发射角为: %.1f °"%init_theta)
#绘制运动轨迹
class show_results:
def show_results_1(self):
pl.figure(1)
pl.title('Cannon Shell')
pl.xlabel('x / $km$')
pl.ylabel('y / $km$')
pl.grid()
pl.show()
def show_results_2(self):
pl.figure(1)
pl.plot(self.x, self.y,label = "angle: %.1f °"%self.init_theta)
pl.draw()
pl.legend()
print("\n初速度:", user_input.init_v, "m/s")
print("计算间隔:", user_input.time_step, "s")
print("发射角度:", self.init_theta, "°")
print("落地距离:%.4f km"%self.x[-1], "\n")
#用户输入初始值
class user_input:
num_str_in = input("请输入初速度(m/s),计算间隔dt(s)的值,并用空格隔开:\n")
num = [float(n) for n in num_str_in.split()]
init_v = num[0]
time_step = num[1]
#用户输入不同的初始角度值,并输出结果曲线
class user_output:
start = cannon_shell()
show_results.show_results_1(start)
while(True):
init_theta = float(input("--------------\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\n"))
if init_theta != 999:
start = cannon_shell(user_input.init_v, init_theta, user_input.time_step)
start.launch()
show_results.show_results_2(start)
else:
break
start = maximum_range(user_input.init_v, init_theta, user_input.time_step)
start.find()
#运行程序
user_input()
user_output()
end = input("\n\n\n按下回车结束程序...")
|
[
"\"\"\"\r\nExercise 2.9\r\n\"\"\"\r\nimport pylab as pl\r\nimport math\r\npl.ion()\r\n#给定初始条件,计算轨迹\r\nclass cannon_shell:\r\n def __init__(self, init_v = 0, init_theta = 0, time_step = 0):\r\n self.x = [0]\r\n self.y = [0]\r\n self.init_theta = init_theta\r\n self.vx = [init_v * math.cos(self.init_theta / 180 * math.pi) / 1000]\r\n self.vy = [init_v * math.sin(self.init_theta / 180 * math.pi) / 1000]\r\n self.dt = time_step\r\n self.C = 0\r\n self.g = 9.8E-3\r\n def launch(self):\r\n i = 0\r\n while(True):\r\n self.C = 4E-2 * math.pow(1 - 6.5 * self.y[i] / 288.15, 2.5)\r\n self.x.append(self.x[i] + self.vx[i] * self.dt)\r\n self.y.append(self.y[i] + self.vy[i] * self.dt)\r\n self.vx.append(self.vx[i] - self.C * math.hypot(self.vx[i], self.vy[i]) * self.vx[i] * self.dt)\r\n self.vy.append(self.vy[i] - self.g * self.dt - self.C * math.hypot(self.vx[i], self.vy[i]) * self.vy[i] * self.dt)\r\n i += 1\r\n if self.y[i] < 0:\r\n break\r\n#利用所得x轴两侧最近的两点近似计算曲线与x轴的交点\r\n self.x[i] = -self.y[i-1] * (self.x[i] - self.x[i-1]) / (self.y[i] - self.y[i-1]) + self.x[i-1]\r\n self.y[i] = 0\r\n#求给定初速度下的最大落地距离及对应发射角度\r\nclass maximum_range(cannon_shell):\r\n def find(self):\r\n max_range = 0\r\n temp_max = 0\r\n init_theta = 0\r\n print(\"\\n--------------\")\r\n print(\"正在计算此初速度下最大落地距离,预计需几十秒,请耐心等待...\\n\")\r\n while(True):\r\n cannon_shell.__init__(self, user_input.init_v, init_theta, user_input.time_step)\r\n cannon_shell.launch(self)\r\n temp_max = self.x[-1]\r\n if (max_range <= temp_max):\r\n max_range = temp_max\r\n init_theta += 0.1\r\n else:\r\n init_theta -= 0.1\r\n break\r\n print(\"初速度:\", user_input.init_v, \"m/s\")\r\n print(\"计算间隔:\", user_input.time_step, \"s\")\r\n print(\"在此初速度下最大落地距离为: %.4f km\"%max_range)\r\n print(\"最大落地距离对应的发射角为: %.1f °\"%init_theta)\r\n#绘制运动轨迹\r\nclass show_results:\r\n def show_results_1(self):\r\n pl.figure(1)\r\n pl.title('Cannon Shell')\r\n pl.xlabel('x / $km$')\r\n pl.ylabel('y / $km$')\r\n pl.grid()\r\n pl.show()\r\n def show_results_2(self):\r\n pl.figure(1)\r\n pl.plot(self.x, self.y,label = \"angle: %.1f °\"%self.init_theta)\r\n pl.draw()\r\n pl.legend()\r\n print(\"\\n初速度:\", user_input.init_v, \"m/s\")\r\n print(\"计算间隔:\", user_input.time_step, \"s\")\r\n print(\"发射角度:\", self.init_theta, \"°\")\r\n print(\"落地距离:%.4f km\"%self.x[-1], \"\\n\")\r\n#用户输入初始值\r\nclass user_input:\r\n num_str_in = input(\"请输入初速度(m/s),计算间隔dt(s)的值,并用空格隔开:\\n\")\r\n num = [float(n) for n in num_str_in.split()]\r\n init_v = num[0]\r\n time_step = num[1]\r\n#用户输入不同的初始角度值,并输出结果曲线\r\nclass user_output:\r\n start = cannon_shell()\r\n show_results.show_results_1(start)\r\n while(True):\r\n init_theta = float(input(\"--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n\"))\r\n if init_theta != 999:\r\n start = cannon_shell(user_input.init_v, init_theta, user_input.time_step)\r\n start.launch()\r\n show_results.show_results_2(start)\r\n else:\r\n break\r\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\r\n start.find()\r\n#运行程序\r\nuser_input()\r\nuser_output()\r\nend = input(\"\\n\\n\\n按下回车结束程序...\")",
"<docstring token>\nimport pylab as pl\nimport math\npl.ion()\n\n\nclass cannon_shell:\n\n def __init__(self, init_v=0, init_theta=0, time_step=0):\n self.x = [0]\n self.y = [0]\n self.init_theta = init_theta\n self.vx = [init_v * math.cos(self.init_theta / 180 * math.pi) / 1000]\n self.vy = [init_v * math.sin(self.init_theta / 180 * math.pi) / 1000]\n self.dt = time_step\n self.C = 0\n self.g = 0.0098\n\n def launch(self):\n i = 0\n while True:\n self.C = 0.04 * math.pow(1 - 6.5 * self.y[i] / 288.15, 2.5)\n self.x.append(self.x[i] + self.vx[i] * self.dt)\n self.y.append(self.y[i] + self.vy[i] * self.dt)\n self.vx.append(self.vx[i] - self.C * math.hypot(self.vx[i],\n self.vy[i]) * self.vx[i] * self.dt)\n self.vy.append(self.vy[i] - self.g * self.dt - self.C * math.\n hypot(self.vx[i], self.vy[i]) * self.vy[i] * self.dt)\n i += 1\n if self.y[i] < 0:\n break\n self.x[i] = -self.y[i - 1] * (self.x[i] - self.x[i - 1]) / (self.y[\n i] - self.y[i - 1]) + self.x[i - 1]\n self.y[i] = 0\n\n\nclass maximum_range(cannon_shell):\n\n def find(self):\n max_range = 0\n temp_max = 0\n init_theta = 0\n print('\\n--------------')\n print('正在计算此初速度下最大落地距离,预计需几十秒,请耐心等待...\\n')\n while True:\n cannon_shell.__init__(self, user_input.init_v, init_theta,\n user_input.time_step)\n cannon_shell.launch(self)\n temp_max = self.x[-1]\n if max_range <= temp_max:\n max_range = temp_max\n init_theta += 0.1\n else:\n init_theta -= 0.1\n break\n print('初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('在此初速度下最大落地距离为: %.4f km' % max_range)\n print('最大落地距离对应的发射角为: %.1f °' % init_theta)\n\n\nclass show_results:\n\n def show_results_1(self):\n pl.figure(1)\n pl.title('Cannon Shell')\n pl.xlabel('x / $km$')\n pl.ylabel('y / $km$')\n pl.grid()\n pl.show()\n\n def show_results_2(self):\n pl.figure(1)\n pl.plot(self.x, self.y, label='angle: %.1f °' % self.init_theta)\n pl.draw()\n pl.legend()\n print('\\n初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('发射角度:', self.init_theta, '°')\n print('落地距离:%.4f km' % self.x[-1], '\\n')\n\n\nclass user_input:\n num_str_in = input('请输入初速度(m/s),计算间隔dt(s)的值,并用空格隔开:\\n')\n num = [float(n) for n in num_str_in.split()]\n init_v = num[0]\n time_step = num[1]\n\n\nclass user_output:\n start = cannon_shell()\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\n start.find()\n\n\nuser_input()\nuser_output()\nend = input('\\n\\n\\n按下回车结束程序...')\n",
"<docstring token>\n<import token>\npl.ion()\n\n\nclass cannon_shell:\n\n def __init__(self, init_v=0, init_theta=0, time_step=0):\n self.x = [0]\n self.y = [0]\n self.init_theta = init_theta\n self.vx = [init_v * math.cos(self.init_theta / 180 * math.pi) / 1000]\n self.vy = [init_v * math.sin(self.init_theta / 180 * math.pi) / 1000]\n self.dt = time_step\n self.C = 0\n self.g = 0.0098\n\n def launch(self):\n i = 0\n while True:\n self.C = 0.04 * math.pow(1 - 6.5 * self.y[i] / 288.15, 2.5)\n self.x.append(self.x[i] + self.vx[i] * self.dt)\n self.y.append(self.y[i] + self.vy[i] * self.dt)\n self.vx.append(self.vx[i] - self.C * math.hypot(self.vx[i],\n self.vy[i]) * self.vx[i] * self.dt)\n self.vy.append(self.vy[i] - self.g * self.dt - self.C * math.\n hypot(self.vx[i], self.vy[i]) * self.vy[i] * self.dt)\n i += 1\n if self.y[i] < 0:\n break\n self.x[i] = -self.y[i - 1] * (self.x[i] - self.x[i - 1]) / (self.y[\n i] - self.y[i - 1]) + self.x[i - 1]\n self.y[i] = 0\n\n\nclass maximum_range(cannon_shell):\n\n def find(self):\n max_range = 0\n temp_max = 0\n init_theta = 0\n print('\\n--------------')\n print('正在计算此初速度下最大落地距离,预计需几十秒,请耐心等待...\\n')\n while True:\n cannon_shell.__init__(self, user_input.init_v, init_theta,\n user_input.time_step)\n cannon_shell.launch(self)\n temp_max = self.x[-1]\n if max_range <= temp_max:\n max_range = temp_max\n init_theta += 0.1\n else:\n init_theta -= 0.1\n break\n print('初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('在此初速度下最大落地距离为: %.4f km' % max_range)\n print('最大落地距离对应的发射角为: %.1f °' % init_theta)\n\n\nclass show_results:\n\n def show_results_1(self):\n pl.figure(1)\n pl.title('Cannon Shell')\n pl.xlabel('x / $km$')\n pl.ylabel('y / $km$')\n pl.grid()\n pl.show()\n\n def show_results_2(self):\n pl.figure(1)\n pl.plot(self.x, self.y, label='angle: %.1f °' % self.init_theta)\n pl.draw()\n pl.legend()\n print('\\n初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('发射角度:', self.init_theta, '°')\n print('落地距离:%.4f km' % self.x[-1], '\\n')\n\n\nclass user_input:\n num_str_in = input('请输入初速度(m/s),计算间隔dt(s)的值,并用空格隔开:\\n')\n num = [float(n) for n in num_str_in.split()]\n init_v = num[0]\n time_step = num[1]\n\n\nclass user_output:\n start = cannon_shell()\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\n start.find()\n\n\nuser_input()\nuser_output()\nend = input('\\n\\n\\n按下回车结束程序...')\n",
"<docstring token>\n<import token>\npl.ion()\n\n\nclass cannon_shell:\n\n def __init__(self, init_v=0, init_theta=0, time_step=0):\n self.x = [0]\n self.y = [0]\n self.init_theta = init_theta\n self.vx = [init_v * math.cos(self.init_theta / 180 * math.pi) / 1000]\n self.vy = [init_v * math.sin(self.init_theta / 180 * math.pi) / 1000]\n self.dt = time_step\n self.C = 0\n self.g = 0.0098\n\n def launch(self):\n i = 0\n while True:\n self.C = 0.04 * math.pow(1 - 6.5 * self.y[i] / 288.15, 2.5)\n self.x.append(self.x[i] + self.vx[i] * self.dt)\n self.y.append(self.y[i] + self.vy[i] * self.dt)\n self.vx.append(self.vx[i] - self.C * math.hypot(self.vx[i],\n self.vy[i]) * self.vx[i] * self.dt)\n self.vy.append(self.vy[i] - self.g * self.dt - self.C * math.\n hypot(self.vx[i], self.vy[i]) * self.vy[i] * self.dt)\n i += 1\n if self.y[i] < 0:\n break\n self.x[i] = -self.y[i - 1] * (self.x[i] - self.x[i - 1]) / (self.y[\n i] - self.y[i - 1]) + self.x[i - 1]\n self.y[i] = 0\n\n\nclass maximum_range(cannon_shell):\n\n def find(self):\n max_range = 0\n temp_max = 0\n init_theta = 0\n print('\\n--------------')\n print('正在计算此初速度下最大落地距离,预计需几十秒,请耐心等待...\\n')\n while True:\n cannon_shell.__init__(self, user_input.init_v, init_theta,\n user_input.time_step)\n cannon_shell.launch(self)\n temp_max = self.x[-1]\n if max_range <= temp_max:\n max_range = temp_max\n init_theta += 0.1\n else:\n init_theta -= 0.1\n break\n print('初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('在此初速度下最大落地距离为: %.4f km' % max_range)\n print('最大落地距离对应的发射角为: %.1f °' % init_theta)\n\n\nclass show_results:\n\n def show_results_1(self):\n pl.figure(1)\n pl.title('Cannon Shell')\n pl.xlabel('x / $km$')\n pl.ylabel('y / $km$')\n pl.grid()\n pl.show()\n\n def show_results_2(self):\n pl.figure(1)\n pl.plot(self.x, self.y, label='angle: %.1f °' % self.init_theta)\n pl.draw()\n pl.legend()\n print('\\n初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('发射角度:', self.init_theta, '°')\n print('落地距离:%.4f km' % self.x[-1], '\\n')\n\n\nclass user_input:\n num_str_in = input('请输入初速度(m/s),计算间隔dt(s)的值,并用空格隔开:\\n')\n num = [float(n) for n in num_str_in.split()]\n init_v = num[0]\n time_step = num[1]\n\n\nclass user_output:\n start = cannon_shell()\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\n start.find()\n\n\nuser_input()\nuser_output()\n<assignment token>\n",
"<docstring token>\n<import token>\n<code token>\n\n\nclass cannon_shell:\n\n def __init__(self, init_v=0, init_theta=0, time_step=0):\n self.x = [0]\n self.y = [0]\n self.init_theta = init_theta\n self.vx = [init_v * math.cos(self.init_theta / 180 * math.pi) / 1000]\n self.vy = [init_v * math.sin(self.init_theta / 180 * math.pi) / 1000]\n self.dt = time_step\n self.C = 0\n self.g = 0.0098\n\n def launch(self):\n i = 0\n while True:\n self.C = 0.04 * math.pow(1 - 6.5 * self.y[i] / 288.15, 2.5)\n self.x.append(self.x[i] + self.vx[i] * self.dt)\n self.y.append(self.y[i] + self.vy[i] * self.dt)\n self.vx.append(self.vx[i] - self.C * math.hypot(self.vx[i],\n self.vy[i]) * self.vx[i] * self.dt)\n self.vy.append(self.vy[i] - self.g * self.dt - self.C * math.\n hypot(self.vx[i], self.vy[i]) * self.vy[i] * self.dt)\n i += 1\n if self.y[i] < 0:\n break\n self.x[i] = -self.y[i - 1] * (self.x[i] - self.x[i - 1]) / (self.y[\n i] - self.y[i - 1]) + self.x[i - 1]\n self.y[i] = 0\n\n\nclass maximum_range(cannon_shell):\n\n def find(self):\n max_range = 0\n temp_max = 0\n init_theta = 0\n print('\\n--------------')\n print('正在计算此初速度下最大落地距离,预计需几十秒,请耐心等待...\\n')\n while True:\n cannon_shell.__init__(self, user_input.init_v, init_theta,\n user_input.time_step)\n cannon_shell.launch(self)\n temp_max = self.x[-1]\n if max_range <= temp_max:\n max_range = temp_max\n init_theta += 0.1\n else:\n init_theta -= 0.1\n break\n print('初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('在此初速度下最大落地距离为: %.4f km' % max_range)\n print('最大落地距离对应的发射角为: %.1f °' % init_theta)\n\n\nclass show_results:\n\n def show_results_1(self):\n pl.figure(1)\n pl.title('Cannon Shell')\n pl.xlabel('x / $km$')\n pl.ylabel('y / $km$')\n pl.grid()\n pl.show()\n\n def show_results_2(self):\n pl.figure(1)\n pl.plot(self.x, self.y, label='angle: %.1f °' % self.init_theta)\n pl.draw()\n pl.legend()\n print('\\n初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('发射角度:', self.init_theta, '°')\n print('落地距离:%.4f km' % self.x[-1], '\\n')\n\n\nclass user_input:\n num_str_in = input('请输入初速度(m/s),计算间隔dt(s)的值,并用空格隔开:\\n')\n num = [float(n) for n in num_str_in.split()]\n init_v = num[0]\n time_step = num[1]\n\n\nclass user_output:\n start = cannon_shell()\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\n start.find()\n\n\n<code token>\n<assignment token>\n",
"<docstring token>\n<import token>\n<code token>\n\n\nclass cannon_shell:\n <function token>\n\n def launch(self):\n i = 0\n while True:\n self.C = 0.04 * math.pow(1 - 6.5 * self.y[i] / 288.15, 2.5)\n self.x.append(self.x[i] + self.vx[i] * self.dt)\n self.y.append(self.y[i] + self.vy[i] * self.dt)\n self.vx.append(self.vx[i] - self.C * math.hypot(self.vx[i],\n self.vy[i]) * self.vx[i] * self.dt)\n self.vy.append(self.vy[i] - self.g * self.dt - self.C * math.\n hypot(self.vx[i], self.vy[i]) * self.vy[i] * self.dt)\n i += 1\n if self.y[i] < 0:\n break\n self.x[i] = -self.y[i - 1] * (self.x[i] - self.x[i - 1]) / (self.y[\n i] - self.y[i - 1]) + self.x[i - 1]\n self.y[i] = 0\n\n\nclass maximum_range(cannon_shell):\n\n def find(self):\n max_range = 0\n temp_max = 0\n init_theta = 0\n print('\\n--------------')\n print('正在计算此初速度下最大落地距离,预计需几十秒,请耐心等待...\\n')\n while True:\n cannon_shell.__init__(self, user_input.init_v, init_theta,\n user_input.time_step)\n cannon_shell.launch(self)\n temp_max = self.x[-1]\n if max_range <= temp_max:\n max_range = temp_max\n init_theta += 0.1\n else:\n init_theta -= 0.1\n break\n print('初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('在此初速度下最大落地距离为: %.4f km' % max_range)\n print('最大落地距离对应的发射角为: %.1f °' % init_theta)\n\n\nclass show_results:\n\n def show_results_1(self):\n pl.figure(1)\n pl.title('Cannon Shell')\n pl.xlabel('x / $km$')\n pl.ylabel('y / $km$')\n pl.grid()\n pl.show()\n\n def show_results_2(self):\n pl.figure(1)\n pl.plot(self.x, self.y, label='angle: %.1f °' % self.init_theta)\n pl.draw()\n pl.legend()\n print('\\n初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('发射角度:', self.init_theta, '°')\n print('落地距离:%.4f km' % self.x[-1], '\\n')\n\n\nclass user_input:\n num_str_in = input('请输入初速度(m/s),计算间隔dt(s)的值,并用空格隔开:\\n')\n num = [float(n) for n in num_str_in.split()]\n init_v = num[0]\n time_step = num[1]\n\n\nclass user_output:\n start = cannon_shell()\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\n start.find()\n\n\n<code token>\n<assignment token>\n",
"<docstring token>\n<import token>\n<code token>\n\n\nclass cannon_shell:\n <function token>\n <function token>\n\n\nclass maximum_range(cannon_shell):\n\n def find(self):\n max_range = 0\n temp_max = 0\n init_theta = 0\n print('\\n--------------')\n print('正在计算此初速度下最大落地距离,预计需几十秒,请耐心等待...\\n')\n while True:\n cannon_shell.__init__(self, user_input.init_v, init_theta,\n user_input.time_step)\n cannon_shell.launch(self)\n temp_max = self.x[-1]\n if max_range <= temp_max:\n max_range = temp_max\n init_theta += 0.1\n else:\n init_theta -= 0.1\n break\n print('初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('在此初速度下最大落地距离为: %.4f km' % max_range)\n print('最大落地距离对应的发射角为: %.1f °' % init_theta)\n\n\nclass show_results:\n\n def show_results_1(self):\n pl.figure(1)\n pl.title('Cannon Shell')\n pl.xlabel('x / $km$')\n pl.ylabel('y / $km$')\n pl.grid()\n pl.show()\n\n def show_results_2(self):\n pl.figure(1)\n pl.plot(self.x, self.y, label='angle: %.1f °' % self.init_theta)\n pl.draw()\n pl.legend()\n print('\\n初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('发射角度:', self.init_theta, '°')\n print('落地距离:%.4f km' % self.x[-1], '\\n')\n\n\nclass user_input:\n num_str_in = input('请输入初速度(m/s),计算间隔dt(s)的值,并用空格隔开:\\n')\n num = [float(n) for n in num_str_in.split()]\n init_v = num[0]\n time_step = num[1]\n\n\nclass user_output:\n start = cannon_shell()\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\n start.find()\n\n\n<code token>\n<assignment token>\n",
"<docstring token>\n<import token>\n<code token>\n<class token>\n\n\nclass maximum_range(cannon_shell):\n\n def find(self):\n max_range = 0\n temp_max = 0\n init_theta = 0\n print('\\n--------------')\n print('正在计算此初速度下最大落地距离,预计需几十秒,请耐心等待...\\n')\n while True:\n cannon_shell.__init__(self, user_input.init_v, init_theta,\n user_input.time_step)\n cannon_shell.launch(self)\n temp_max = self.x[-1]\n if max_range <= temp_max:\n max_range = temp_max\n init_theta += 0.1\n else:\n init_theta -= 0.1\n break\n print('初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('在此初速度下最大落地距离为: %.4f km' % max_range)\n print('最大落地距离对应的发射角为: %.1f °' % init_theta)\n\n\nclass show_results:\n\n def show_results_1(self):\n pl.figure(1)\n pl.title('Cannon Shell')\n pl.xlabel('x / $km$')\n pl.ylabel('y / $km$')\n pl.grid()\n pl.show()\n\n def show_results_2(self):\n pl.figure(1)\n pl.plot(self.x, self.y, label='angle: %.1f °' % self.init_theta)\n pl.draw()\n pl.legend()\n print('\\n初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('发射角度:', self.init_theta, '°')\n print('落地距离:%.4f km' % self.x[-1], '\\n')\n\n\nclass user_input:\n num_str_in = input('请输入初速度(m/s),计算间隔dt(s)的值,并用空格隔开:\\n')\n num = [float(n) for n in num_str_in.split()]\n init_v = num[0]\n time_step = num[1]\n\n\nclass user_output:\n start = cannon_shell()\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\n start.find()\n\n\n<code token>\n<assignment token>\n",
"<docstring token>\n<import token>\n<code token>\n<class token>\n\n\nclass maximum_range(cannon_shell):\n <function token>\n\n\nclass show_results:\n\n def show_results_1(self):\n pl.figure(1)\n pl.title('Cannon Shell')\n pl.xlabel('x / $km$')\n pl.ylabel('y / $km$')\n pl.grid()\n pl.show()\n\n def show_results_2(self):\n pl.figure(1)\n pl.plot(self.x, self.y, label='angle: %.1f °' % self.init_theta)\n pl.draw()\n pl.legend()\n print('\\n初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('发射角度:', self.init_theta, '°')\n print('落地距离:%.4f km' % self.x[-1], '\\n')\n\n\nclass user_input:\n num_str_in = input('请输入初速度(m/s),计算间隔dt(s)的值,并用空格隔开:\\n')\n num = [float(n) for n in num_str_in.split()]\n init_v = num[0]\n time_step = num[1]\n\n\nclass user_output:\n start = cannon_shell()\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\n start.find()\n\n\n<code token>\n<assignment token>\n",
"<docstring token>\n<import token>\n<code token>\n<class token>\n<class token>\n\n\nclass show_results:\n\n def show_results_1(self):\n pl.figure(1)\n pl.title('Cannon Shell')\n pl.xlabel('x / $km$')\n pl.ylabel('y / $km$')\n pl.grid()\n pl.show()\n\n def show_results_2(self):\n pl.figure(1)\n pl.plot(self.x, self.y, label='angle: %.1f °' % self.init_theta)\n pl.draw()\n pl.legend()\n print('\\n初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('发射角度:', self.init_theta, '°')\n print('落地距离:%.4f km' % self.x[-1], '\\n')\n\n\nclass user_input:\n num_str_in = input('请输入初速度(m/s),计算间隔dt(s)的值,并用空格隔开:\\n')\n num = [float(n) for n in num_str_in.split()]\n init_v = num[0]\n time_step = num[1]\n\n\nclass user_output:\n start = cannon_shell()\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\n start.find()\n\n\n<code token>\n<assignment token>\n",
"<docstring token>\n<import token>\n<code token>\n<class token>\n<class token>\n\n\nclass show_results:\n <function token>\n\n def show_results_2(self):\n pl.figure(1)\n pl.plot(self.x, self.y, label='angle: %.1f °' % self.init_theta)\n pl.draw()\n pl.legend()\n print('\\n初速度:', user_input.init_v, 'm/s')\n print('计算间隔:', user_input.time_step, 's')\n print('发射角度:', self.init_theta, '°')\n print('落地距离:%.4f km' % self.x[-1], '\\n')\n\n\nclass user_input:\n num_str_in = input('请输入初速度(m/s),计算间隔dt(s)的值,并用空格隔开:\\n')\n num = [float(n) for n in num_str_in.split()]\n init_v = num[0]\n time_step = num[1]\n\n\nclass user_output:\n start = cannon_shell()\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\n start.find()\n\n\n<code token>\n<assignment token>\n",
"<docstring token>\n<import token>\n<code token>\n<class token>\n<class token>\n\n\nclass show_results:\n <function token>\n <function token>\n\n\nclass user_input:\n num_str_in = input('请输入初速度(m/s),计算间隔dt(s)的值,并用空格隔开:\\n')\n num = [float(n) for n in num_str_in.split()]\n init_v = num[0]\n time_step = num[1]\n\n\nclass user_output:\n start = cannon_shell()\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\n start.find()\n\n\n<code token>\n<assignment token>\n",
"<docstring token>\n<import token>\n<code token>\n<class token>\n<class token>\n<class token>\n\n\nclass user_input:\n num_str_in = input('请输入初速度(m/s),计算间隔dt(s)的值,并用空格隔开:\\n')\n num = [float(n) for n in num_str_in.split()]\n init_v = num[0]\n time_step = num[1]\n\n\nclass user_output:\n start = cannon_shell()\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\n start.find()\n\n\n<code token>\n<assignment token>\n",
"<docstring token>\n<import token>\n<code token>\n<class token>\n<class token>\n<class token>\n\n\nclass user_input:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass user_output:\n start = cannon_shell()\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\n start.find()\n\n\n<code token>\n<assignment token>\n",
"<docstring token>\n<import token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass user_output:\n start = cannon_shell()\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n start = maximum_range(user_input.init_v, init_theta, user_input.time_step)\n start.find()\n\n\n<code token>\n<assignment token>\n",
"<docstring token>\n<import token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass user_output:\n <assignment token>\n show_results.show_results_1(start)\n while True:\n init_theta = float(input(\n '--------------\\n输入初始角度(角度制,0~180)(输入999开始计算最大落地距离):\\n'))\n if init_theta != 999:\n start = cannon_shell(user_input.init_v, init_theta, user_input.\n time_step)\n start.launch()\n show_results.show_results_2(start)\n else:\n break\n <assignment token>\n start.find()\n\n\n<code token>\n<assignment token>\n",
"<docstring token>\n<import token>\n<code token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<code token>\n<assignment token>\n"
] | false |
98,842 |
b632e828149981b0bf2a1c0057105121f7d63b09
|
import json
from aiohttp import web, ClientSession
from aiohttp.test_utils import unused_port
from tests.test_data.server_activity import s2s_follow
class FakeServer:
def __init__(self, loop):
self.loop = loop
self.app = web.Application()
self.runner = None
self.port = None
self.app.router.add_get('/user', self.user_profile)
self.app.router.add_post('/user/inbox', self.inbox_post)
async def start(self):
self.port = port = unused_port()
self.runner = web.AppRunner(self.app)
await self.runner.setup()
site = web.TCPSite(self.runner, '127.0.0.1', port)
await site.start()
return port
async def stop(self):
await self.runner.cleanup()
async def inbox_post(self, request):
data = await request.json()
# from pprint import pprint
# pprint(data)
if data["type"] == "Follow":
test_follow = s2s_follow(data["actor"], data["object"], data["id"])
if not data == test_follow:
raise
accept = json.dumps({
"type": "Accept",
"id": "id",
"object": data,
"actor": data["object"]})
async with ClientSession() as session:
await session.post(f"{data['actor']}/inbox", data=accept)
return web.json_response()
async def user_profile(self, request):
return web.json_response({
"inbox": f"http://127.0.0.1:{self.port}/user/inbox"
})
|
[
"\nimport json\nfrom aiohttp import web, ClientSession\nfrom aiohttp.test_utils import unused_port\n\n\nfrom tests.test_data.server_activity import s2s_follow\n\nclass FakeServer:\n def __init__(self, loop):\n self.loop = loop\n self.app = web.Application()\n self.runner = None\n self.port = None\n self.app.router.add_get('/user', self.user_profile)\n self.app.router.add_post('/user/inbox', self.inbox_post)\n\n async def start(self):\n self.port = port = unused_port()\n self.runner = web.AppRunner(self.app)\n await self.runner.setup()\n site = web.TCPSite(self.runner, '127.0.0.1', port)\n await site.start()\n return port\n\n async def stop(self):\n await self.runner.cleanup()\n\n async def inbox_post(self, request):\n data = await request.json()\n # from pprint import pprint\n # pprint(data)\n if data[\"type\"] == \"Follow\":\n test_follow = s2s_follow(data[\"actor\"], data[\"object\"], data[\"id\"])\n if not data == test_follow:\n raise\n accept = json.dumps({\n \"type\": \"Accept\",\n \"id\": \"id\",\n \"object\": data,\n \"actor\": data[\"object\"]})\n async with ClientSession() as session:\n await session.post(f\"{data['actor']}/inbox\", data=accept)\n\n return web.json_response()\n\n async def user_profile(self, request):\n return web.json_response({\n \"inbox\": f\"http://127.0.0.1:{self.port}/user/inbox\"\n })\n\n\n",
"import json\nfrom aiohttp import web, ClientSession\nfrom aiohttp.test_utils import unused_port\nfrom tests.test_data.server_activity import s2s_follow\n\n\nclass FakeServer:\n\n def __init__(self, loop):\n self.loop = loop\n self.app = web.Application()\n self.runner = None\n self.port = None\n self.app.router.add_get('/user', self.user_profile)\n self.app.router.add_post('/user/inbox', self.inbox_post)\n\n async def start(self):\n self.port = port = unused_port()\n self.runner = web.AppRunner(self.app)\n await self.runner.setup()\n site = web.TCPSite(self.runner, '127.0.0.1', port)\n await site.start()\n return port\n\n async def stop(self):\n await self.runner.cleanup()\n\n async def inbox_post(self, request):\n data = await request.json()\n if data['type'] == 'Follow':\n test_follow = s2s_follow(data['actor'], data['object'], data['id'])\n if not data == test_follow:\n raise\n accept = json.dumps({'type': 'Accept', 'id': 'id', 'object':\n data, 'actor': data['object']})\n async with ClientSession() as session:\n await session.post(f\"{data['actor']}/inbox\", data=accept)\n return web.json_response()\n\n async def user_profile(self, request):\n return web.json_response({'inbox':\n f'http://127.0.0.1:{self.port}/user/inbox'})\n",
"<import token>\n\n\nclass FakeServer:\n\n def __init__(self, loop):\n self.loop = loop\n self.app = web.Application()\n self.runner = None\n self.port = None\n self.app.router.add_get('/user', self.user_profile)\n self.app.router.add_post('/user/inbox', self.inbox_post)\n\n async def start(self):\n self.port = port = unused_port()\n self.runner = web.AppRunner(self.app)\n await self.runner.setup()\n site = web.TCPSite(self.runner, '127.0.0.1', port)\n await site.start()\n return port\n\n async def stop(self):\n await self.runner.cleanup()\n\n async def inbox_post(self, request):\n data = await request.json()\n if data['type'] == 'Follow':\n test_follow = s2s_follow(data['actor'], data['object'], data['id'])\n if not data == test_follow:\n raise\n accept = json.dumps({'type': 'Accept', 'id': 'id', 'object':\n data, 'actor': data['object']})\n async with ClientSession() as session:\n await session.post(f\"{data['actor']}/inbox\", data=accept)\n return web.json_response()\n\n async def user_profile(self, request):\n return web.json_response({'inbox':\n f'http://127.0.0.1:{self.port}/user/inbox'})\n",
"<import token>\n\n\nclass FakeServer:\n <function token>\n\n async def start(self):\n self.port = port = unused_port()\n self.runner = web.AppRunner(self.app)\n await self.runner.setup()\n site = web.TCPSite(self.runner, '127.0.0.1', port)\n await site.start()\n return port\n\n async def stop(self):\n await self.runner.cleanup()\n\n async def inbox_post(self, request):\n data = await request.json()\n if data['type'] == 'Follow':\n test_follow = s2s_follow(data['actor'], data['object'], data['id'])\n if not data == test_follow:\n raise\n accept = json.dumps({'type': 'Accept', 'id': 'id', 'object':\n data, 'actor': data['object']})\n async with ClientSession() as session:\n await session.post(f\"{data['actor']}/inbox\", data=accept)\n return web.json_response()\n\n async def user_profile(self, request):\n return web.json_response({'inbox':\n f'http://127.0.0.1:{self.port}/user/inbox'})\n",
"<import token>\n<class token>\n"
] | false |
98,843 |
76003407e068d7ba20cc0ae2f36e704a73bd187e
|
#!/usr/bin/env python3
import time
import sys
import schedule
from gpu import check_temperature
from process import check_is_running
def initialise_scheduler():
schedule.every(30).seconds.do(check_temperature)
while True:
schedule.run_pending()
time.sleep(1)
def init():
if check_is_running('app_nhm.exe') and check_is_running('OpenHardwareMonitor.exe'):
print('Nicehash miner and open hardware monitor found, initialising GPU monitor.')
check_temperature()
initialise_scheduler()
else:
print('Nicehash (app_nhm.exe) or Open Hardware Monitor (OpenHardwareMonitor.exe) not found in running processes, exiting.')
sys.exit()
# Miner go brr
init()
|
[
"#!/usr/bin/env python3\n\nimport time\nimport sys\nimport schedule\nfrom gpu import check_temperature\nfrom process import check_is_running\n\ndef initialise_scheduler():\n schedule.every(30).seconds.do(check_temperature)\n\n while True:\n schedule.run_pending()\n time.sleep(1)\n\ndef init():\n if check_is_running('app_nhm.exe') and check_is_running('OpenHardwareMonitor.exe'):\n print('Nicehash miner and open hardware monitor found, initialising GPU monitor.')\n check_temperature()\n initialise_scheduler()\n else:\n print('Nicehash (app_nhm.exe) or Open Hardware Monitor (OpenHardwareMonitor.exe) not found in running processes, exiting.')\n sys.exit()\n\n\n# Miner go brr\ninit()",
"import time\nimport sys\nimport schedule\nfrom gpu import check_temperature\nfrom process import check_is_running\n\n\ndef initialise_scheduler():\n schedule.every(30).seconds.do(check_temperature)\n while True:\n schedule.run_pending()\n time.sleep(1)\n\n\ndef init():\n if check_is_running('app_nhm.exe') and check_is_running(\n 'OpenHardwareMonitor.exe'):\n print(\n 'Nicehash miner and open hardware monitor found, initialising GPU monitor.'\n )\n check_temperature()\n initialise_scheduler()\n else:\n print(\n 'Nicehash (app_nhm.exe) or Open Hardware Monitor (OpenHardwareMonitor.exe) not found in running processes, exiting.'\n )\n sys.exit()\n\n\ninit()\n",
"<import token>\n\n\ndef initialise_scheduler():\n schedule.every(30).seconds.do(check_temperature)\n while True:\n schedule.run_pending()\n time.sleep(1)\n\n\ndef init():\n if check_is_running('app_nhm.exe') and check_is_running(\n 'OpenHardwareMonitor.exe'):\n print(\n 'Nicehash miner and open hardware monitor found, initialising GPU monitor.'\n )\n check_temperature()\n initialise_scheduler()\n else:\n print(\n 'Nicehash (app_nhm.exe) or Open Hardware Monitor (OpenHardwareMonitor.exe) not found in running processes, exiting.'\n )\n sys.exit()\n\n\ninit()\n",
"<import token>\n\n\ndef initialise_scheduler():\n schedule.every(30).seconds.do(check_temperature)\n while True:\n schedule.run_pending()\n time.sleep(1)\n\n\ndef init():\n if check_is_running('app_nhm.exe') and check_is_running(\n 'OpenHardwareMonitor.exe'):\n print(\n 'Nicehash miner and open hardware monitor found, initialising GPU monitor.'\n )\n check_temperature()\n initialise_scheduler()\n else:\n print(\n 'Nicehash (app_nhm.exe) or Open Hardware Monitor (OpenHardwareMonitor.exe) not found in running processes, exiting.'\n )\n sys.exit()\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef init():\n if check_is_running('app_nhm.exe') and check_is_running(\n 'OpenHardwareMonitor.exe'):\n print(\n 'Nicehash miner and open hardware monitor found, initialising GPU monitor.'\n )\n check_temperature()\n initialise_scheduler()\n else:\n print(\n 'Nicehash (app_nhm.exe) or Open Hardware Monitor (OpenHardwareMonitor.exe) not found in running processes, exiting.'\n )\n sys.exit()\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,844 |
a88e7dd9836d843d0d383cfbd63492d8c2846236
|
import sys
sys.stdin = open('sosu.txt')
def issosu(a, b):
result = []
for i in range(a, b+1):
if i == 2:
result.append(i)
elif i > 2:
for j in range(2, int(i**0.5)+1):
if not i%j:
break
else:
result.append(i)
return result
a, b = map(int, input().split())
m, n = min(a, b), max(a, b)
L = sorted(issosu(m,n))
t = len(L)
k = L[0]+L[-1]
print(t)
print(k)
|
[
"\nimport sys\nsys.stdin = open('sosu.txt')\n\ndef issosu(a, b):\n result = []\n for i in range(a, b+1):\n if i == 2:\n result.append(i)\n elif i > 2:\n for j in range(2, int(i**0.5)+1):\n if not i%j:\n break\n else:\n result.append(i)\n return result\n\n\n\na, b = map(int, input().split())\nm, n = min(a, b), max(a, b)\n\nL = sorted(issosu(m,n))\n\nt = len(L)\nk = L[0]+L[-1]\n\nprint(t)\nprint(k)\n\n\n",
"import sys\nsys.stdin = open('sosu.txt')\n\n\ndef issosu(a, b):\n result = []\n for i in range(a, b + 1):\n if i == 2:\n result.append(i)\n elif i > 2:\n for j in range(2, int(i ** 0.5) + 1):\n if not i % j:\n break\n else:\n result.append(i)\n return result\n\n\na, b = map(int, input().split())\nm, n = min(a, b), max(a, b)\nL = sorted(issosu(m, n))\nt = len(L)\nk = L[0] + L[-1]\nprint(t)\nprint(k)\n",
"<import token>\nsys.stdin = open('sosu.txt')\n\n\ndef issosu(a, b):\n result = []\n for i in range(a, b + 1):\n if i == 2:\n result.append(i)\n elif i > 2:\n for j in range(2, int(i ** 0.5) + 1):\n if not i % j:\n break\n else:\n result.append(i)\n return result\n\n\na, b = map(int, input().split())\nm, n = min(a, b), max(a, b)\nL = sorted(issosu(m, n))\nt = len(L)\nk = L[0] + L[-1]\nprint(t)\nprint(k)\n",
"<import token>\n<assignment token>\n\n\ndef issosu(a, b):\n result = []\n for i in range(a, b + 1):\n if i == 2:\n result.append(i)\n elif i > 2:\n for j in range(2, int(i ** 0.5) + 1):\n if not i % j:\n break\n else:\n result.append(i)\n return result\n\n\n<assignment token>\nprint(t)\nprint(k)\n",
"<import token>\n<assignment token>\n\n\ndef issosu(a, b):\n result = []\n for i in range(a, b + 1):\n if i == 2:\n result.append(i)\n elif i > 2:\n for j in range(2, int(i ** 0.5) + 1):\n if not i % j:\n break\n else:\n result.append(i)\n return result\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
98,845 |
6f4278dd7b7e920be6f20e017efc270f0e47ae1d
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from django.urls import reverse
from .models import *
def index(request):
question_list = Question.objects.order_by('pub_date')
context = {'question_list': question_list}
return render(request, 'poll/index.html', context) # Shortcut for the two lines below
# template = loader.get_template('poll/index.html')
# return HttpResponse(template.render(context, request))
# output = ', '.join(q.question_text for q in question_list)
# return HttpResponse(output)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
context = {'question': question}
return render(request, 'poll/detail.html', context)
# return HttpResponse(f"You are looking at question Number {question_id}")
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'poll/results.html', {'question': question})
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
selected_choice = question.choice_set.get(pk=request.POST['choice'])
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('poll:results', args=(question_id,)))
|
[
"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import loader\nfrom django.urls import reverse\n\nfrom .models import *\n\n\ndef index(request):\n question_list = Question.objects.order_by('pub_date')\n context = {'question_list': question_list}\n return render(request, 'poll/index.html', context) # Shortcut for the two lines below\n # template = loader.get_template('poll/index.html')\n # return HttpResponse(template.render(context, request))\n # output = ', '.join(q.question_text for q in question_list)\n # return HttpResponse(output)\n\n\ndef detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n context = {'question': question}\n return render(request, 'poll/detail.html', context)\n # return HttpResponse(f\"You are looking at question Number {question_id}\")\n\n\ndef results(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'poll/results.html', {'question': question})\n\n\ndef vote(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n selected_choice = question.choice_set.get(pk=request.POST['choice'])\n\n selected_choice.votes += 1\n selected_choice.save()\n return HttpResponseRedirect(reverse('poll:results', args=(question_id,)))\n\n",
"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.template import loader\nfrom django.urls import reverse\nfrom .models import *\n\n\ndef index(request):\n question_list = Question.objects.order_by('pub_date')\n context = {'question_list': question_list}\n return render(request, 'poll/index.html', context)\n\n\ndef detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n context = {'question': question}\n return render(request, 'poll/detail.html', context)\n\n\ndef results(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'poll/results.html', {'question': question})\n\n\ndef vote(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n selected_choice = question.choice_set.get(pk=request.POST['choice'])\n selected_choice.votes += 1\n selected_choice.save()\n return HttpResponseRedirect(reverse('poll:results', args=(question_id,)))\n",
"<import token>\n\n\ndef index(request):\n question_list = Question.objects.order_by('pub_date')\n context = {'question_list': question_list}\n return render(request, 'poll/index.html', context)\n\n\ndef detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n context = {'question': question}\n return render(request, 'poll/detail.html', context)\n\n\ndef results(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'poll/results.html', {'question': question})\n\n\ndef vote(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n selected_choice = question.choice_set.get(pk=request.POST['choice'])\n selected_choice.votes += 1\n selected_choice.save()\n return HttpResponseRedirect(reverse('poll:results', args=(question_id,)))\n",
"<import token>\n\n\ndef index(request):\n question_list = Question.objects.order_by('pub_date')\n context = {'question_list': question_list}\n return render(request, 'poll/index.html', context)\n\n\ndef detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n context = {'question': question}\n return render(request, 'poll/detail.html', context)\n\n\ndef results(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'poll/results.html', {'question': question})\n\n\n<function token>\n",
"<import token>\n\n\ndef index(request):\n question_list = Question.objects.order_by('pub_date')\n context = {'question_list': question_list}\n return render(request, 'poll/index.html', context)\n\n\n<function token>\n\n\ndef results(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'poll/results.html', {'question': question})\n\n\n<function token>\n",
"<import token>\n\n\ndef index(request):\n question_list = Question.objects.order_by('pub_date')\n context = {'question_list': question_list}\n return render(request, 'poll/index.html', context)\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,846 |
866ec87f2935cc666598535f421dba372626078d
|
# a small collection of custom error messages
# pertaining to the CSV_Algorithms library
class ColumnCount(Exception):
def __init__(self, expected, found):
self.message = f'''
Mail Maga CSV did not have correct
column count.
Expected: {expected}, Found: {found}
'''
class MatchingColumn(Exception):
def __init__(self, fileName, offendingColumn, correctColumn):
self.message = f'''
Expected {fileName} to posses column {correctColumn}
but instead found {offendingColumn}. Please make sure
that you have uploaded the correct CSV.
'''
class AcceptableFormat(Exception):
def __init__(self, fileName, acceptedEncodings):
self.message = f'''
The uploaded file {fileName} does not conform to any
of the accepted file encodings {acceptedEncodings}
'''
|
[
"# a small collection of custom error messages \n# pertaining to the CSV_Algorithms library\n\nclass ColumnCount(Exception):\n def __init__(self, expected, found):\n self.message = f'''\n Mail Maga CSV did not have correct\n column count. \n Expected: {expected}, Found: {found}\n '''\n\nclass MatchingColumn(Exception):\n def __init__(self, fileName, offendingColumn, correctColumn):\n self.message = f'''\n Expected {fileName} to posses column {correctColumn}\n but instead found {offendingColumn}. Please make sure\n that you have uploaded the correct CSV.\n '''\n\nclass AcceptableFormat(Exception):\n def __init__(self, fileName, acceptedEncodings):\n self.message = f'''\n The uploaded file {fileName} does not conform to any \n of the accepted file encodings {acceptedEncodings}\n '''",
"class ColumnCount(Exception):\n\n def __init__(self, expected, found):\n self.message = f\"\"\"\n Mail Maga CSV did not have correct\n column count. \n Expected: {expected}, Found: {found}\n \"\"\"\n\n\nclass MatchingColumn(Exception):\n\n def __init__(self, fileName, offendingColumn, correctColumn):\n self.message = f\"\"\"\n Expected {fileName} to posses column {correctColumn}\n but instead found {offendingColumn}. Please make sure\n that you have uploaded the correct CSV.\n \"\"\"\n\n\nclass AcceptableFormat(Exception):\n\n def __init__(self, fileName, acceptedEncodings):\n self.message = f\"\"\"\n The uploaded file {fileName} does not conform to any \n of the accepted file encodings {acceptedEncodings}\n \"\"\"\n",
"class ColumnCount(Exception):\n <function token>\n\n\nclass MatchingColumn(Exception):\n\n def __init__(self, fileName, offendingColumn, correctColumn):\n self.message = f\"\"\"\n Expected {fileName} to posses column {correctColumn}\n but instead found {offendingColumn}. Please make sure\n that you have uploaded the correct CSV.\n \"\"\"\n\n\nclass AcceptableFormat(Exception):\n\n def __init__(self, fileName, acceptedEncodings):\n self.message = f\"\"\"\n The uploaded file {fileName} does not conform to any \n of the accepted file encodings {acceptedEncodings}\n \"\"\"\n",
"<class token>\n\n\nclass MatchingColumn(Exception):\n\n def __init__(self, fileName, offendingColumn, correctColumn):\n self.message = f\"\"\"\n Expected {fileName} to posses column {correctColumn}\n but instead found {offendingColumn}. Please make sure\n that you have uploaded the correct CSV.\n \"\"\"\n\n\nclass AcceptableFormat(Exception):\n\n def __init__(self, fileName, acceptedEncodings):\n self.message = f\"\"\"\n The uploaded file {fileName} does not conform to any \n of the accepted file encodings {acceptedEncodings}\n \"\"\"\n",
"<class token>\n\n\nclass MatchingColumn(Exception):\n <function token>\n\n\nclass AcceptableFormat(Exception):\n\n def __init__(self, fileName, acceptedEncodings):\n self.message = f\"\"\"\n The uploaded file {fileName} does not conform to any \n of the accepted file encodings {acceptedEncodings}\n \"\"\"\n",
"<class token>\n<class token>\n\n\nclass AcceptableFormat(Exception):\n\n def __init__(self, fileName, acceptedEncodings):\n self.message = f\"\"\"\n The uploaded file {fileName} does not conform to any \n of the accepted file encodings {acceptedEncodings}\n \"\"\"\n",
"<class token>\n<class token>\n\n\nclass AcceptableFormat(Exception):\n <function token>\n",
"<class token>\n<class token>\n<class token>\n"
] | false |
98,847 |
6b695c859c288046d26e6bd7966dbcca68ba27e9
|
from my_detector import MyDetector
import sys
sys.path.append("C:/Users/hliu/Desktop/DL/toolbox")
import tool
import lung_seg
import glob
import pandas as pd
sys.path.append("C:/Users/hliu/Desktop/DL/models/classification")
if __name__ == "__main__":
weight_fp = "C:/Users/hliu/Desktop/tmp/final/mask_rcnn_pneumonia_0019_1.1470_lei.h5"
image_fp = "C:/Users/hliu/Desktop/DL/dataset/Kaggle/stage_1_test_images/000db696-cf54-4385-b10b-6b16fbb3f985.dcm"
my_detector = MyDetector()
my_detector.load_model(weight_fp)
my_detector.visualize(image_fp, show=True, min_conf=0.95)
|
[
"from my_detector import MyDetector\r\n\r\n\r\nimport sys\r\nsys.path.append(\"C:/Users/hliu/Desktop/DL/toolbox\")\r\nimport tool\r\nimport lung_seg\r\nimport glob\r\nimport pandas as pd\r\nsys.path.append(\"C:/Users/hliu/Desktop/DL/models/classification\")\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n weight_fp = \"C:/Users/hliu/Desktop/tmp/final/mask_rcnn_pneumonia_0019_1.1470_lei.h5\"\r\n image_fp = \"C:/Users/hliu/Desktop/DL/dataset/Kaggle/stage_1_test_images/000db696-cf54-4385-b10b-6b16fbb3f985.dcm\"\r\n\r\n\r\n my_detector = MyDetector()\r\n my_detector.load_model(weight_fp)\r\n my_detector.visualize(image_fp, show=True, min_conf=0.95)",
"from my_detector import MyDetector\nimport sys\nsys.path.append('C:/Users/hliu/Desktop/DL/toolbox')\nimport tool\nimport lung_seg\nimport glob\nimport pandas as pd\nsys.path.append('C:/Users/hliu/Desktop/DL/models/classification')\nif __name__ == '__main__':\n weight_fp = (\n 'C:/Users/hliu/Desktop/tmp/final/mask_rcnn_pneumonia_0019_1.1470_lei.h5'\n )\n image_fp = (\n 'C:/Users/hliu/Desktop/DL/dataset/Kaggle/stage_1_test_images/000db696-cf54-4385-b10b-6b16fbb3f985.dcm'\n )\n my_detector = MyDetector()\n my_detector.load_model(weight_fp)\n my_detector.visualize(image_fp, show=True, min_conf=0.95)\n",
"<import token>\nsys.path.append('C:/Users/hliu/Desktop/DL/toolbox')\n<import token>\nsys.path.append('C:/Users/hliu/Desktop/DL/models/classification')\nif __name__ == '__main__':\n weight_fp = (\n 'C:/Users/hliu/Desktop/tmp/final/mask_rcnn_pneumonia_0019_1.1470_lei.h5'\n )\n image_fp = (\n 'C:/Users/hliu/Desktop/DL/dataset/Kaggle/stage_1_test_images/000db696-cf54-4385-b10b-6b16fbb3f985.dcm'\n )\n my_detector = MyDetector()\n my_detector.load_model(weight_fp)\n my_detector.visualize(image_fp, show=True, min_conf=0.95)\n",
"<import token>\n<code token>\n<import token>\n<code token>\n"
] | false |
98,848 |
926178f100912f31d368e6399d6f7bba7d28ff31
|
# Generated by Django 2.2.17 on 2021-03-22 04:30
import uuid
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Column",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"title",
models.CharField(blank=True, max_length=50, null=True),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("last_modified", models.DateTimeField(auto_now=True)),
("published", models.BooleanField(default=False)),
("visible", models.BooleanField(default=True)),
("colour", models.PositiveIntegerField(null=True)),
(
"column_type",
models.PositiveIntegerField(
choices=[
(0, "Custom Activity Column"),
(1, "Out of Class (Instructor)"),
(2, "Out of Class (Students)"),
(3, "In Class (Instructor)"),
(4, "In Class (Students)"),
(10, "Custom Course Column"),
(11, "Preparation"),
(12, "Lesson"),
(13, "Artifact"),
(14, "Assessment"),
(20, "Custom Program Category"),
],
default=0,
),
),
("is_original", models.BooleanField(default=False)),
(
"hash",
models.UUIDField(
default=uuid.uuid4, editable=False, unique=True
),
),
(
"author",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"parent_column",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="course_flow.Column",
),
),
],
options={
"verbose_name": "Column",
"verbose_name_plural": "Columns",
},
),
migrations.CreateModel(
name="ColumnWorkflow",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("added_on", models.DateTimeField(auto_now_add=True)),
("rank", models.PositiveIntegerField(default=0)),
(
"column",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Column",
),
),
],
options={
"verbose_name": "Column-Workflow Link",
"verbose_name_plural": "Column-Workflow Links",
},
),
migrations.CreateModel(
name="Discipline",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"title",
models.CharField(
help_text="Enter the name of a new discipline.",
max_length=100,
unique=True,
verbose_name="Discipline name",
),
),
],
options={
"verbose_name": "discipline",
"verbose_name_plural": "disciplines",
},
),
migrations.CreateModel(
name="Node",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"title",
models.CharField(blank=True, max_length=50, null=True),
),
(
"description",
models.TextField(blank=True, max_length=500, null=True),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("last_modified", models.DateTimeField(auto_now=True)),
("published", models.BooleanField(default=False)),
("is_original", models.BooleanField(default=True)),
("has_autolink", models.BooleanField(default=False)),
("is_dropped", models.BooleanField(default=False)),
(
"context_classification",
models.PositiveIntegerField(
choices=[
(0, "None"),
(1, "Individual Work"),
(2, "Work in Groups"),
(3, "Whole Class"),
(101, "Formative"),
(102, "Summative"),
(103, "Comprehensive"),
],
default=0,
),
),
(
"task_classification",
models.PositiveIntegerField(
choices=[
(0, "None"),
(1, "Gather Information"),
(2, "Discuss"),
(3, "Problem Solve"),
(4, "Analyze"),
(5, "Assess/Review Peers"),
(6, "Debate"),
(7, "Game/Roleplay"),
(8, "Create/Design"),
(9, "Revise/Improve"),
(10, "Read"),
(11, "Write"),
(12, "Present"),
(13, "Experiment/Inquiry"),
(14, "Quiz/Test"),
(15, "Instructor Resource Curation"),
(16, "Instructor Orchestration"),
(17, "Instructor Evaluation"),
(18, "Other"),
(101, "Jigsaw"),
(102, "Peer Instruction"),
(103, "Case Studies"),
(104, "Gallery Walk"),
(105, "Reflective Writing"),
(106, "Two-Stage Exam"),
(107, "Toolkit"),
(108, "One Minute Paper"),
(109, "Distributed Problem Solving"),
(110, "Peer Assessment"),
],
default=0,
),
),
(
"node_type",
models.PositiveIntegerField(
choices=[
(0, "Activity Node"),
(1, "Course Node"),
(2, "Program Node"),
],
default=0,
),
),
(
"time_required",
models.CharField(blank=True, max_length=30, null=True),
),
(
"time_units",
models.PositiveIntegerField(
choices=[
(0, ""),
(1, "seconds"),
(2, "minutes"),
(3, "hours"),
(4, "days"),
(5, "weeks"),
(6, "months"),
(7, "yrs"),
(8, "credits"),
],
default=0,
),
),
("represents_workflow", models.BooleanField(default=False)),
(
"hash",
models.UUIDField(
default=uuid.uuid4, editable=False, unique=True
),
),
(
"author",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="authored_nodes",
to=settings.AUTH_USER_MODEL,
),
),
(
"column",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
to="course_flow.Column",
),
),
],
),
migrations.CreateModel(
name="NodeWeek",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("added_on", models.DateTimeField(auto_now_add=True)),
("rank", models.PositiveIntegerField(default=0)),
(
"node",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Node",
),
),
],
options={
"verbose_name": "Node-Week Link",
"verbose_name_plural": "Node-Week Links",
},
),
migrations.CreateModel(
name="Outcome",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=500)),
("description", models.TextField(max_length=500)),
("created_on", models.DateTimeField(auto_now_add=True)),
("last_modified", models.DateTimeField(auto_now=True)),
("published", models.BooleanField(default=False)),
("is_original", models.BooleanField(default=True)),
("is_dropped", models.BooleanField(default=True)),
("depth", models.PositiveIntegerField(default=0)),
(
"hash",
models.UUIDField(
default=uuid.uuid4, editable=False, unique=True
),
),
(
"author",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "Outcome",
"verbose_name_plural": "Outcomes",
},
),
migrations.CreateModel(
name="OutcomeProject",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("added_on", models.DateTimeField(auto_now_add=True)),
("rank", models.PositiveIntegerField(default=0)),
(
"outcome",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Outcome",
),
),
],
options={
"verbose_name": "Outcome-Project Link",
"verbose_name_plural": "Outcome-Project Links",
},
),
migrations.CreateModel(
name="Project",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"title",
models.CharField(blank=True, max_length=50, null=True),
),
(
"description",
models.CharField(blank=True, max_length=500, null=True),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("last_modified", models.DateTimeField(auto_now=True)),
("published", models.BooleanField(default=False)),
("is_original", models.BooleanField(default=False)),
(
"author",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"outcomes",
models.ManyToManyField(
blank=True,
through="course_flow.OutcomeProject",
to="course_flow.Outcome",
),
),
(
"parent_project",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="course_flow.Project",
),
),
],
options={
"verbose_name": "Project",
"verbose_name_plural": "Projects",
},
),
migrations.CreateModel(
name="Week",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"title",
models.CharField(blank=True, max_length=50, null=True),
),
(
"description",
models.TextField(blank=True, max_length=500, null=True),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("last_modified", models.DateTimeField(auto_now=True)),
("default", models.BooleanField(default=False)),
("is_original", models.BooleanField(default=True)),
("published", models.BooleanField(default=False)),
("is_strategy", models.BooleanField(default=False)),
(
"hash",
models.UUIDField(
default=uuid.uuid4, editable=False, unique=True
),
),
(
"strategy_classification",
models.PositiveIntegerField(
choices=[
(0, "None"),
(1, "Jigsaw"),
(2, "Peer Instruction"),
(3, "Case Studies"),
(4, "Gallery Walk"),
(5, "Reflective Writing"),
(6, "Two-Stage Exam"),
(7, "Toolkit"),
(8, "One Minute Paper"),
(9, "Distributed Problem Solving"),
(10, "Peer Assessment"),
(11, "Other"),
],
default=0,
),
),
(
"week_type",
models.PositiveIntegerField(
choices=[(0, "Part"), (1, "Week"), (2, "Term")],
default=0,
),
),
(
"author",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"nodes",
models.ManyToManyField(
blank=True,
through="course_flow.NodeWeek",
to="course_flow.Node",
),
),
],
options={
"verbose_name": "Week",
"verbose_name_plural": "Weeks",
},
),
migrations.CreateModel(
name="WeekWorkflow",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("added_on", models.DateTimeField(auto_now_add=True)),
("rank", models.PositiveIntegerField(default=0)),
(
"week",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Week",
),
),
],
options={
"verbose_name": "Week-Workflow Link",
"verbose_name_plural": "Week-Workflow Links",
},
),
migrations.CreateModel(
name="Workflow",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"title",
models.CharField(blank=True, max_length=50, null=True),
),
(
"description",
models.TextField(blank=True, max_length=500, null=True),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("last_modified", models.DateTimeField(auto_now=True)),
("static", models.BooleanField(default=False)),
("published", models.BooleanField(default=False)),
("is_strategy", models.BooleanField(default=False)),
("from_saltise", models.BooleanField(default=False)),
("is_original", models.BooleanField(default=True)),
(
"hash",
models.UUIDField(
default=uuid.uuid4, editable=False, unique=True
),
),
(
"outcomes_type",
models.PositiveIntegerField(
choices=[(0, "Normal"), (1, "Advanced")], default=0
),
),
(
"outcomes_sort",
models.PositiveIntegerField(
choices=[
(0, "Time"),
(1, "Category"),
(2, "Task"),
(3, "Context"),
],
default=0,
),
),
(
"columns",
models.ManyToManyField(
blank=True,
through="course_flow.ColumnWorkflow",
to="course_flow.Column",
),
),
(
"parent_workflow",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="course_flow.Workflow",
),
),
(
"weeks",
models.ManyToManyField(
blank=True,
through="course_flow.WeekWorkflow",
to="course_flow.Week",
),
),
],
),
migrations.CreateModel(
name="WorkflowProject",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("added_on", models.DateTimeField(auto_now_add=True)),
("rank", models.PositiveIntegerField(default=0)),
(
"project",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Project",
),
),
(
"workflow",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Workflow",
),
),
],
options={
"verbose_name": "Workflow-Project Link",
"verbose_name_plural": "Workflow-Project Links",
},
),
migrations.AddField(
model_name="weekworkflow",
name="workflow",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Workflow",
),
),
migrations.AddField(
model_name="week",
name="original_strategy",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="course_flow.Workflow",
),
),
migrations.AddField(
model_name="week",
name="parent_week",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="course_flow.Week",
),
),
migrations.AddField(
model_name="project",
name="workflows",
field=models.ManyToManyField(
blank=True,
through="course_flow.WorkflowProject",
to="course_flow.Workflow",
),
),
migrations.CreateModel(
name="OutcomeWorkflow",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("added_on", models.DateTimeField(auto_now_add=True)),
("rank", models.PositiveIntegerField(default=0)),
(
"outcome",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Outcome",
),
),
(
"workflow",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Workflow",
),
),
],
options={
"verbose_name": "Outcome-Workflow Link",
"verbose_name_plural": "Outcome-Workflow Links",
},
),
migrations.AddField(
model_name="outcomeproject",
name="project",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Project",
),
),
migrations.CreateModel(
name="OutcomeOutcome",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("added_on", models.DateTimeField(auto_now_add=True)),
("rank", models.PositiveIntegerField(default=0)),
(
"child",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="parent_outcome_links",
to="course_flow.Outcome",
),
),
(
"parent",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="child_outcome_links",
to="course_flow.Outcome",
),
),
],
options={
"verbose_name": "Outcome-Outcome Link",
"verbose_name_plural": "Outcome-Outcome Links",
},
),
migrations.CreateModel(
name="OutcomeNode",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("added_on", models.DateTimeField(auto_now_add=True)),
("rank", models.PositiveIntegerField(default=0)),
("degree", models.PositiveIntegerField(default=1)),
(
"node",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Node",
),
),
(
"outcome",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Outcome",
),
),
],
options={
"verbose_name": "Outcome-Node Link",
"verbose_name_plural": "Outcome-Node Links",
},
),
migrations.AddField(
model_name="outcome",
name="children",
field=models.ManyToManyField(
blank=True,
related_name="parent_outcomes",
through="course_flow.OutcomeOutcome",
to="course_flow.Outcome",
),
),
migrations.AddField(
model_name="outcome",
name="parent_outcome",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="course_flow.Outcome",
),
),
migrations.AddField(
model_name="nodeweek",
name="week",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Week",
),
),
migrations.CreateModel(
name="NodeLink",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"title",
models.CharField(blank=True, max_length=100, null=True),
),
("published", models.BooleanField(default=False)),
(
"source_port",
models.PositiveIntegerField(
choices=[(1, "e"), (2, "s"), (3, "w")], default=2
),
),
(
"target_port",
models.PositiveIntegerField(
choices=[(0, "n"), (1, "e"), (3, "w")], default=0
),
),
("dashed", models.BooleanField(default=False)),
("created_on", models.DateTimeField(auto_now_add=True)),
("last_modified", models.DateTimeField(auto_now=True)),
("is_original", models.BooleanField(default=True)),
(
"hash",
models.UUIDField(
default=uuid.uuid4, editable=False, unique=True
),
),
(
"author",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
(
"parent_nodelink",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="course_flow.NodeLink",
),
),
(
"source_node",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="outgoing_links",
to="course_flow.Node",
),
),
(
"target_node",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="incoming_links",
to="course_flow.Node",
),
),
],
options={
"verbose_name": "Node Link",
"verbose_name_plural": "Node Links",
},
),
migrations.CreateModel(
name="NodeCompletionStatus",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("is_completed", models.BooleanField(default=False)),
(
"node",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Node",
),
),
(
"student",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "Node Completion Status",
"verbose_name_plural": "Node Completion Statuses",
},
),
migrations.AddField(
model_name="node",
name="linked_workflow",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="course_flow.Workflow",
),
),
migrations.AddField(
model_name="node",
name="outcomes",
field=models.ManyToManyField(
blank=True,
through="course_flow.OutcomeNode",
to="course_flow.Outcome",
),
),
migrations.AddField(
model_name="node",
name="parent_node",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="course_flow.Node",
),
),
migrations.AddField(
model_name="node",
name="students",
field=models.ManyToManyField(
blank=True,
related_name="assigned_nodes",
through="course_flow.NodeCompletionStatus",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="columnworkflow",
name="workflow",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Workflow",
),
),
migrations.CreateModel(
name="Program",
fields=[
(
"workflow_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="course_flow.Workflow",
),
),
(
"author",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
],
bases=("course_flow.workflow",),
),
migrations.CreateModel(
name="Course",
fields=[
(
"workflow_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="course_flow.Workflow",
),
),
(
"author",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="authored_courses",
to=settings.AUTH_USER_MODEL,
),
),
(
"discipline",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="course_flow.Discipline",
),
),
(
"students",
models.ManyToManyField(
blank=True,
related_name="assigned_courses",
to=settings.AUTH_USER_MODEL,
),
),
],
bases=("course_flow.workflow",),
),
migrations.CreateModel(
name="Activity",
fields=[
(
"workflow_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="course_flow.Workflow",
),
),
(
"author",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="authored_activities",
to=settings.AUTH_USER_MODEL,
),
),
(
"students",
models.ManyToManyField(
blank=True,
related_name="assigned_activities",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "Activity",
"verbose_name_plural": "Activities",
},
bases=("course_flow.workflow",),
),
]
|
[
"# Generated by Django 2.2.17 on 2021-03-22 04:30\n\nimport uuid\n\nimport django.db.models.deletion\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"Column\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\n \"title\",\n models.CharField(blank=True, max_length=50, null=True),\n ),\n (\"created_on\", models.DateTimeField(auto_now_add=True)),\n (\"last_modified\", models.DateTimeField(auto_now=True)),\n (\"published\", models.BooleanField(default=False)),\n (\"visible\", models.BooleanField(default=True)),\n (\"colour\", models.PositiveIntegerField(null=True)),\n (\n \"column_type\",\n models.PositiveIntegerField(\n choices=[\n (0, \"Custom Activity Column\"),\n (1, \"Out of Class (Instructor)\"),\n (2, \"Out of Class (Students)\"),\n (3, \"In Class (Instructor)\"),\n (4, \"In Class (Students)\"),\n (10, \"Custom Course Column\"),\n (11, \"Preparation\"),\n (12, \"Lesson\"),\n (13, \"Artifact\"),\n (14, \"Assessment\"),\n (20, \"Custom Program Category\"),\n ],\n default=0,\n ),\n ),\n (\"is_original\", models.BooleanField(default=False)),\n (\n \"hash\",\n models.UUIDField(\n default=uuid.uuid4, editable=False, unique=True\n ),\n ),\n (\n \"author\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n (\n \"parent_column\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=\"course_flow.Column\",\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Column\",\n \"verbose_name_plural\": \"Columns\",\n },\n ),\n migrations.CreateModel(\n name=\"ColumnWorkflow\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"added_on\", models.DateTimeField(auto_now_add=True)),\n (\"rank\", models.PositiveIntegerField(default=0)),\n (\n \"column\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Column\",\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Column-Workflow Link\",\n \"verbose_name_plural\": \"Column-Workflow Links\",\n },\n ),\n migrations.CreateModel(\n name=\"Discipline\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\n \"title\",\n models.CharField(\n help_text=\"Enter the name of a new discipline.\",\n max_length=100,\n unique=True,\n verbose_name=\"Discipline name\",\n ),\n ),\n ],\n options={\n \"verbose_name\": \"discipline\",\n \"verbose_name_plural\": \"disciplines\",\n },\n ),\n migrations.CreateModel(\n name=\"Node\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\n \"title\",\n models.CharField(blank=True, max_length=50, null=True),\n ),\n (\n \"description\",\n models.TextField(blank=True, max_length=500, null=True),\n ),\n (\"created_on\", models.DateTimeField(auto_now_add=True)),\n (\"last_modified\", models.DateTimeField(auto_now=True)),\n (\"published\", models.BooleanField(default=False)),\n (\"is_original\", models.BooleanField(default=True)),\n (\"has_autolink\", models.BooleanField(default=False)),\n (\"is_dropped\", models.BooleanField(default=False)),\n (\n \"context_classification\",\n models.PositiveIntegerField(\n choices=[\n (0, \"None\"),\n (1, \"Individual Work\"),\n (2, \"Work in Groups\"),\n (3, \"Whole Class\"),\n (101, \"Formative\"),\n (102, \"Summative\"),\n (103, \"Comprehensive\"),\n ],\n default=0,\n ),\n ),\n (\n \"task_classification\",\n models.PositiveIntegerField(\n choices=[\n (0, \"None\"),\n (1, \"Gather Information\"),\n (2, \"Discuss\"),\n (3, \"Problem Solve\"),\n (4, \"Analyze\"),\n (5, \"Assess/Review Peers\"),\n (6, \"Debate\"),\n (7, \"Game/Roleplay\"),\n (8, \"Create/Design\"),\n (9, \"Revise/Improve\"),\n (10, \"Read\"),\n (11, \"Write\"),\n (12, \"Present\"),\n (13, \"Experiment/Inquiry\"),\n (14, \"Quiz/Test\"),\n (15, \"Instructor Resource Curation\"),\n (16, \"Instructor Orchestration\"),\n (17, \"Instructor Evaluation\"),\n (18, \"Other\"),\n (101, \"Jigsaw\"),\n (102, \"Peer Instruction\"),\n (103, \"Case Studies\"),\n (104, \"Gallery Walk\"),\n (105, \"Reflective Writing\"),\n (106, \"Two-Stage Exam\"),\n (107, \"Toolkit\"),\n (108, \"One Minute Paper\"),\n (109, \"Distributed Problem Solving\"),\n (110, \"Peer Assessment\"),\n ],\n default=0,\n ),\n ),\n (\n \"node_type\",\n models.PositiveIntegerField(\n choices=[\n (0, \"Activity Node\"),\n (1, \"Course Node\"),\n (2, \"Program Node\"),\n ],\n default=0,\n ),\n ),\n (\n \"time_required\",\n models.CharField(blank=True, max_length=30, null=True),\n ),\n (\n \"time_units\",\n models.PositiveIntegerField(\n choices=[\n (0, \"\"),\n (1, \"seconds\"),\n (2, \"minutes\"),\n (3, \"hours\"),\n (4, \"days\"),\n (5, \"weeks\"),\n (6, \"months\"),\n (7, \"yrs\"),\n (8, \"credits\"),\n ],\n default=0,\n ),\n ),\n (\"represents_workflow\", models.BooleanField(default=False)),\n (\n \"hash\",\n models.UUIDField(\n default=uuid.uuid4, editable=False, unique=True\n ),\n ),\n (\n \"author\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n related_name=\"authored_nodes\",\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n (\n \"column\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.DO_NOTHING,\n to=\"course_flow.Column\",\n ),\n ),\n ],\n ),\n migrations.CreateModel(\n name=\"NodeWeek\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"added_on\", models.DateTimeField(auto_now_add=True)),\n (\"rank\", models.PositiveIntegerField(default=0)),\n (\n \"node\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Node\",\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Node-Week Link\",\n \"verbose_name_plural\": \"Node-Week Links\",\n },\n ),\n migrations.CreateModel(\n name=\"Outcome\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"title\", models.CharField(max_length=500)),\n (\"description\", models.TextField(max_length=500)),\n (\"created_on\", models.DateTimeField(auto_now_add=True)),\n (\"last_modified\", models.DateTimeField(auto_now=True)),\n (\"published\", models.BooleanField(default=False)),\n (\"is_original\", models.BooleanField(default=True)),\n (\"is_dropped\", models.BooleanField(default=True)),\n (\"depth\", models.PositiveIntegerField(default=0)),\n (\n \"hash\",\n models.UUIDField(\n default=uuid.uuid4, editable=False, unique=True\n ),\n ),\n (\n \"author\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Outcome\",\n \"verbose_name_plural\": \"Outcomes\",\n },\n ),\n migrations.CreateModel(\n name=\"OutcomeProject\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"added_on\", models.DateTimeField(auto_now_add=True)),\n (\"rank\", models.PositiveIntegerField(default=0)),\n (\n \"outcome\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Outcome\",\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Outcome-Project Link\",\n \"verbose_name_plural\": \"Outcome-Project Links\",\n },\n ),\n migrations.CreateModel(\n name=\"Project\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\n \"title\",\n models.CharField(blank=True, max_length=50, null=True),\n ),\n (\n \"description\",\n models.CharField(blank=True, max_length=500, null=True),\n ),\n (\"created_on\", models.DateTimeField(auto_now_add=True)),\n (\"last_modified\", models.DateTimeField(auto_now=True)),\n (\"published\", models.BooleanField(default=False)),\n (\"is_original\", models.BooleanField(default=False)),\n (\n \"author\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n (\n \"outcomes\",\n models.ManyToManyField(\n blank=True,\n through=\"course_flow.OutcomeProject\",\n to=\"course_flow.Outcome\",\n ),\n ),\n (\n \"parent_project\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=\"course_flow.Project\",\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Project\",\n \"verbose_name_plural\": \"Projects\",\n },\n ),\n migrations.CreateModel(\n name=\"Week\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\n \"title\",\n models.CharField(blank=True, max_length=50, null=True),\n ),\n (\n \"description\",\n models.TextField(blank=True, max_length=500, null=True),\n ),\n (\"created_on\", models.DateTimeField(auto_now_add=True)),\n (\"last_modified\", models.DateTimeField(auto_now=True)),\n (\"default\", models.BooleanField(default=False)),\n (\"is_original\", models.BooleanField(default=True)),\n (\"published\", models.BooleanField(default=False)),\n (\"is_strategy\", models.BooleanField(default=False)),\n (\n \"hash\",\n models.UUIDField(\n default=uuid.uuid4, editable=False, unique=True\n ),\n ),\n (\n \"strategy_classification\",\n models.PositiveIntegerField(\n choices=[\n (0, \"None\"),\n (1, \"Jigsaw\"),\n (2, \"Peer Instruction\"),\n (3, \"Case Studies\"),\n (4, \"Gallery Walk\"),\n (5, \"Reflective Writing\"),\n (6, \"Two-Stage Exam\"),\n (7, \"Toolkit\"),\n (8, \"One Minute Paper\"),\n (9, \"Distributed Problem Solving\"),\n (10, \"Peer Assessment\"),\n (11, \"Other\"),\n ],\n default=0,\n ),\n ),\n (\n \"week_type\",\n models.PositiveIntegerField(\n choices=[(0, \"Part\"), (1, \"Week\"), (2, \"Term\")],\n default=0,\n ),\n ),\n (\n \"author\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n (\n \"nodes\",\n models.ManyToManyField(\n blank=True,\n through=\"course_flow.NodeWeek\",\n to=\"course_flow.Node\",\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Week\",\n \"verbose_name_plural\": \"Weeks\",\n },\n ),\n migrations.CreateModel(\n name=\"WeekWorkflow\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"added_on\", models.DateTimeField(auto_now_add=True)),\n (\"rank\", models.PositiveIntegerField(default=0)),\n (\n \"week\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Week\",\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Week-Workflow Link\",\n \"verbose_name_plural\": \"Week-Workflow Links\",\n },\n ),\n migrations.CreateModel(\n name=\"Workflow\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\n \"title\",\n models.CharField(blank=True, max_length=50, null=True),\n ),\n (\n \"description\",\n models.TextField(blank=True, max_length=500, null=True),\n ),\n (\"created_on\", models.DateTimeField(auto_now_add=True)),\n (\"last_modified\", models.DateTimeField(auto_now=True)),\n (\"static\", models.BooleanField(default=False)),\n (\"published\", models.BooleanField(default=False)),\n (\"is_strategy\", models.BooleanField(default=False)),\n (\"from_saltise\", models.BooleanField(default=False)),\n (\"is_original\", models.BooleanField(default=True)),\n (\n \"hash\",\n models.UUIDField(\n default=uuid.uuid4, editable=False, unique=True\n ),\n ),\n (\n \"outcomes_type\",\n models.PositiveIntegerField(\n choices=[(0, \"Normal\"), (1, \"Advanced\")], default=0\n ),\n ),\n (\n \"outcomes_sort\",\n models.PositiveIntegerField(\n choices=[\n (0, \"Time\"),\n (1, \"Category\"),\n (2, \"Task\"),\n (3, \"Context\"),\n ],\n default=0,\n ),\n ),\n (\n \"columns\",\n models.ManyToManyField(\n blank=True,\n through=\"course_flow.ColumnWorkflow\",\n to=\"course_flow.Column\",\n ),\n ),\n (\n \"parent_workflow\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=\"course_flow.Workflow\",\n ),\n ),\n (\n \"weeks\",\n models.ManyToManyField(\n blank=True,\n through=\"course_flow.WeekWorkflow\",\n to=\"course_flow.Week\",\n ),\n ),\n ],\n ),\n migrations.CreateModel(\n name=\"WorkflowProject\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"added_on\", models.DateTimeField(auto_now_add=True)),\n (\"rank\", models.PositiveIntegerField(default=0)),\n (\n \"project\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Project\",\n ),\n ),\n (\n \"workflow\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Workflow\",\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Workflow-Project Link\",\n \"verbose_name_plural\": \"Workflow-Project Links\",\n },\n ),\n migrations.AddField(\n model_name=\"weekworkflow\",\n name=\"workflow\",\n field=models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Workflow\",\n ),\n ),\n migrations.AddField(\n model_name=\"week\",\n name=\"original_strategy\",\n field=models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=\"course_flow.Workflow\",\n ),\n ),\n migrations.AddField(\n model_name=\"week\",\n name=\"parent_week\",\n field=models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=\"course_flow.Week\",\n ),\n ),\n migrations.AddField(\n model_name=\"project\",\n name=\"workflows\",\n field=models.ManyToManyField(\n blank=True,\n through=\"course_flow.WorkflowProject\",\n to=\"course_flow.Workflow\",\n ),\n ),\n migrations.CreateModel(\n name=\"OutcomeWorkflow\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"added_on\", models.DateTimeField(auto_now_add=True)),\n (\"rank\", models.PositiveIntegerField(default=0)),\n (\n \"outcome\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Outcome\",\n ),\n ),\n (\n \"workflow\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Workflow\",\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Outcome-Workflow Link\",\n \"verbose_name_plural\": \"Outcome-Workflow Links\",\n },\n ),\n migrations.AddField(\n model_name=\"outcomeproject\",\n name=\"project\",\n field=models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Project\",\n ),\n ),\n migrations.CreateModel(\n name=\"OutcomeOutcome\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"added_on\", models.DateTimeField(auto_now_add=True)),\n (\"rank\", models.PositiveIntegerField(default=0)),\n (\n \"child\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"parent_outcome_links\",\n to=\"course_flow.Outcome\",\n ),\n ),\n (\n \"parent\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"child_outcome_links\",\n to=\"course_flow.Outcome\",\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Outcome-Outcome Link\",\n \"verbose_name_plural\": \"Outcome-Outcome Links\",\n },\n ),\n migrations.CreateModel(\n name=\"OutcomeNode\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"added_on\", models.DateTimeField(auto_now_add=True)),\n (\"rank\", models.PositiveIntegerField(default=0)),\n (\"degree\", models.PositiveIntegerField(default=1)),\n (\n \"node\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Node\",\n ),\n ),\n (\n \"outcome\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Outcome\",\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Outcome-Node Link\",\n \"verbose_name_plural\": \"Outcome-Node Links\",\n },\n ),\n migrations.AddField(\n model_name=\"outcome\",\n name=\"children\",\n field=models.ManyToManyField(\n blank=True,\n related_name=\"parent_outcomes\",\n through=\"course_flow.OutcomeOutcome\",\n to=\"course_flow.Outcome\",\n ),\n ),\n migrations.AddField(\n model_name=\"outcome\",\n name=\"parent_outcome\",\n field=models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=\"course_flow.Outcome\",\n ),\n ),\n migrations.AddField(\n model_name=\"nodeweek\",\n name=\"week\",\n field=models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Week\",\n ),\n ),\n migrations.CreateModel(\n name=\"NodeLink\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\n \"title\",\n models.CharField(blank=True, max_length=100, null=True),\n ),\n (\"published\", models.BooleanField(default=False)),\n (\n \"source_port\",\n models.PositiveIntegerField(\n choices=[(1, \"e\"), (2, \"s\"), (3, \"w\")], default=2\n ),\n ),\n (\n \"target_port\",\n models.PositiveIntegerField(\n choices=[(0, \"n\"), (1, \"e\"), (3, \"w\")], default=0\n ),\n ),\n (\"dashed\", models.BooleanField(default=False)),\n (\"created_on\", models.DateTimeField(auto_now_add=True)),\n (\"last_modified\", models.DateTimeField(auto_now=True)),\n (\"is_original\", models.BooleanField(default=True)),\n (\n \"hash\",\n models.UUIDField(\n default=uuid.uuid4, editable=False, unique=True\n ),\n ),\n (\n \"author\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n (\n \"parent_nodelink\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=\"course_flow.NodeLink\",\n ),\n ),\n (\n \"source_node\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"outgoing_links\",\n to=\"course_flow.Node\",\n ),\n ),\n (\n \"target_node\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n related_name=\"incoming_links\",\n to=\"course_flow.Node\",\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Node Link\",\n \"verbose_name_plural\": \"Node Links\",\n },\n ),\n migrations.CreateModel(\n name=\"NodeCompletionStatus\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\"is_completed\", models.BooleanField(default=False)),\n (\n \"node\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Node\",\n ),\n ),\n (\n \"student\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Node Completion Status\",\n \"verbose_name_plural\": \"Node Completion Statuses\",\n },\n ),\n migrations.AddField(\n model_name=\"node\",\n name=\"linked_workflow\",\n field=models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=\"course_flow.Workflow\",\n ),\n ),\n migrations.AddField(\n model_name=\"node\",\n name=\"outcomes\",\n field=models.ManyToManyField(\n blank=True,\n through=\"course_flow.OutcomeNode\",\n to=\"course_flow.Outcome\",\n ),\n ),\n migrations.AddField(\n model_name=\"node\",\n name=\"parent_node\",\n field=models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=\"course_flow.Node\",\n ),\n ),\n migrations.AddField(\n model_name=\"node\",\n name=\"students\",\n field=models.ManyToManyField(\n blank=True,\n related_name=\"assigned_nodes\",\n through=\"course_flow.NodeCompletionStatus\",\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n migrations.AddField(\n model_name=\"columnworkflow\",\n name=\"workflow\",\n field=models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"course_flow.Workflow\",\n ),\n ),\n migrations.CreateModel(\n name=\"Program\",\n fields=[\n (\n \"workflow_ptr\",\n models.OneToOneField(\n auto_created=True,\n on_delete=django.db.models.deletion.CASCADE,\n parent_link=True,\n primary_key=True,\n serialize=False,\n to=\"course_flow.Workflow\",\n ),\n ),\n (\n \"author\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n ],\n bases=(\"course_flow.workflow\",),\n ),\n migrations.CreateModel(\n name=\"Course\",\n fields=[\n (\n \"workflow_ptr\",\n models.OneToOneField(\n auto_created=True,\n on_delete=django.db.models.deletion.CASCADE,\n parent_link=True,\n primary_key=True,\n serialize=False,\n to=\"course_flow.Workflow\",\n ),\n ),\n (\n \"author\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n related_name=\"authored_courses\",\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n (\n \"discipline\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n to=\"course_flow.Discipline\",\n ),\n ),\n (\n \"students\",\n models.ManyToManyField(\n blank=True,\n related_name=\"assigned_courses\",\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n ],\n bases=(\"course_flow.workflow\",),\n ),\n migrations.CreateModel(\n name=\"Activity\",\n fields=[\n (\n \"workflow_ptr\",\n models.OneToOneField(\n auto_created=True,\n on_delete=django.db.models.deletion.CASCADE,\n parent_link=True,\n primary_key=True,\n serialize=False,\n to=\"course_flow.Workflow\",\n ),\n ),\n (\n \"author\",\n models.ForeignKey(\n null=True,\n on_delete=django.db.models.deletion.SET_NULL,\n related_name=\"authored_activities\",\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n (\n \"students\",\n models.ManyToManyField(\n blank=True,\n related_name=\"assigned_activities\",\n to=settings.AUTH_USER_MODEL,\n ),\n ),\n ],\n options={\n \"verbose_name\": \"Activity\",\n \"verbose_name_plural\": \"Activities\",\n },\n bases=(\"course_flow.workflow\",),\n ),\n ]\n",
"import uuid\nimport django.db.models.deletion\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Column', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('title', models.CharField(blank=True,\n max_length=50, null=True)), ('created_on', models.DateTimeField(\n auto_now_add=True)), ('last_modified', models.DateTimeField(\n auto_now=True)), ('published', models.BooleanField(default=False)),\n ('visible', models.BooleanField(default=True)), ('colour', models.\n PositiveIntegerField(null=True)), ('column_type', models.\n PositiveIntegerField(choices=[(0, 'Custom Activity Column'), (1,\n 'Out of Class (Instructor)'), (2, 'Out of Class (Students)'), (3,\n 'In Class (Instructor)'), (4, 'In Class (Students)'), (10,\n 'Custom Course Column'), (11, 'Preparation'), (12, 'Lesson'), (13,\n 'Artifact'), (14, 'Assessment'), (20, 'Custom Program Category')],\n default=0)), ('is_original', models.BooleanField(default=False)), (\n 'hash', models.UUIDField(default=uuid.uuid4, editable=False, unique\n =True)), ('author', models.ForeignKey(null=True, on_delete=django.\n db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), (\n 'parent_column', models.ForeignKey(null=True, on_delete=django.db.\n models.deletion.SET_NULL, to='course_flow.Column'))], options={\n 'verbose_name': 'Column', 'verbose_name_plural': 'Columns'}),\n migrations.CreateModel(name='ColumnWorkflow', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('added_on', models.DateTimeField(auto_now_add\n =True)), ('rank', models.PositiveIntegerField(default=0)), (\n 'column', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, to='course_flow.Column'))], options={'verbose_name':\n 'Column-Workflow Link', 'verbose_name_plural':\n 'Column-Workflow Links'}), migrations.CreateModel(name='Discipline',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('title', models.CharField(\n help_text='Enter the name of a new discipline.', max_length=100,\n unique=True, verbose_name='Discipline name'))], options={\n 'verbose_name': 'discipline', 'verbose_name_plural': 'disciplines'}\n ), migrations.CreateModel(name='Node', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('title', models.CharField(blank=True,\n max_length=50, null=True)), ('description', models.TextField(blank=\n True, max_length=500, null=True)), ('created_on', models.\n DateTimeField(auto_now_add=True)), ('last_modified', models.\n DateTimeField(auto_now=True)), ('published', models.BooleanField(\n default=False)), ('is_original', models.BooleanField(default=True)),\n ('has_autolink', models.BooleanField(default=False)), ('is_dropped',\n models.BooleanField(default=False)), ('context_classification',\n models.PositiveIntegerField(choices=[(0, 'None'), (1,\n 'Individual Work'), (2, 'Work in Groups'), (3, 'Whole Class'), (101,\n 'Formative'), (102, 'Summative'), (103, 'Comprehensive')], default=\n 0)), ('task_classification', models.PositiveIntegerField(choices=[(\n 0, 'None'), (1, 'Gather Information'), (2, 'Discuss'), (3,\n 'Problem Solve'), (4, 'Analyze'), (5, 'Assess/Review Peers'), (6,\n 'Debate'), (7, 'Game/Roleplay'), (8, 'Create/Design'), (9,\n 'Revise/Improve'), (10, 'Read'), (11, 'Write'), (12, 'Present'), (\n 13, 'Experiment/Inquiry'), (14, 'Quiz/Test'), (15,\n 'Instructor Resource Curation'), (16, 'Instructor Orchestration'),\n (17, 'Instructor Evaluation'), (18, 'Other'), (101, 'Jigsaw'), (102,\n 'Peer Instruction'), (103, 'Case Studies'), (104, 'Gallery Walk'),\n (105, 'Reflective Writing'), (106, 'Two-Stage Exam'), (107,\n 'Toolkit'), (108, 'One Minute Paper'), (109,\n 'Distributed Problem Solving'), (110, 'Peer Assessment')], default=\n 0)), ('node_type', models.PositiveIntegerField(choices=[(0,\n 'Activity Node'), (1, 'Course Node'), (2, 'Program Node')], default\n =0)), ('time_required', models.CharField(blank=True, max_length=30,\n null=True)), ('time_units', models.PositiveIntegerField(choices=[(0,\n ''), (1, 'seconds'), (2, 'minutes'), (3, 'hours'), (4, 'days'), (5,\n 'weeks'), (6, 'months'), (7, 'yrs'), (8, 'credits')], default=0)),\n ('represents_workflow', models.BooleanField(default=False)), (\n 'hash', models.UUIDField(default=uuid.uuid4, editable=False, unique\n =True)), ('author', models.ForeignKey(null=True, on_delete=django.\n db.models.deletion.SET_NULL, related_name='authored_nodes', to=\n settings.AUTH_USER_MODEL)), ('column', models.ForeignKey(null=True,\n on_delete=django.db.models.deletion.DO_NOTHING, to=\n 'course_flow.Column'))]), migrations.CreateModel(name='NodeWeek',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('added_on', models.\n DateTimeField(auto_now_add=True)), ('rank', models.\n PositiveIntegerField(default=0)), ('node', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='course_flow.Node')\n )], options={'verbose_name': 'Node-Week Link',\n 'verbose_name_plural': 'Node-Week Links'}), migrations.CreateModel(\n name='Outcome', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('title',\n models.CharField(max_length=500)), ('description', models.TextField\n (max_length=500)), ('created_on', models.DateTimeField(auto_now_add\n =True)), ('last_modified', models.DateTimeField(auto_now=True)), (\n 'published', models.BooleanField(default=False)), ('is_original',\n models.BooleanField(default=True)), ('is_dropped', models.\n BooleanField(default=True)), ('depth', models.PositiveIntegerField(\n default=0)), ('hash', models.UUIDField(default=uuid.uuid4, editable\n =False, unique=True)), ('author', models.ForeignKey(null=True,\n on_delete=django.db.models.deletion.SET_NULL, to=settings.\n AUTH_USER_MODEL))], options={'verbose_name': 'Outcome',\n 'verbose_name_plural': 'Outcomes'}), migrations.CreateModel(name=\n 'OutcomeProject', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('added_on',\n models.DateTimeField(auto_now_add=True)), ('rank', models.\n PositiveIntegerField(default=0)), ('outcome', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to=\n 'course_flow.Outcome'))], options={'verbose_name':\n 'Outcome-Project Link', 'verbose_name_plural':\n 'Outcome-Project Links'}), migrations.CreateModel(name='Project',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('title', models.CharField(\n blank=True, max_length=50, null=True)), ('description', models.\n CharField(blank=True, max_length=500, null=True)), ('created_on',\n models.DateTimeField(auto_now_add=True)), ('last_modified', models.\n DateTimeField(auto_now=True)), ('published', models.BooleanField(\n default=False)), ('is_original', models.BooleanField(default=False)\n ), ('author', models.ForeignKey(null=True, on_delete=django.db.\n models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), (\n 'outcomes', models.ManyToManyField(blank=True, through=\n 'course_flow.OutcomeProject', to='course_flow.Outcome')), (\n 'parent_project', models.ForeignKey(null=True, on_delete=django.db.\n models.deletion.SET_NULL, to='course_flow.Project'))], options={\n 'verbose_name': 'Project', 'verbose_name_plural': 'Projects'}),\n migrations.CreateModel(name='Week', fields=[('id', models.AutoField\n (auto_created=True, primary_key=True, serialize=False, verbose_name\n ='ID')), ('title', models.CharField(blank=True, max_length=50, null\n =True)), ('description', models.TextField(blank=True, max_length=\n 500, null=True)), ('created_on', models.DateTimeField(auto_now_add=\n True)), ('last_modified', models.DateTimeField(auto_now=True)), (\n 'default', models.BooleanField(default=False)), ('is_original',\n models.BooleanField(default=True)), ('published', models.\n BooleanField(default=False)), ('is_strategy', models.BooleanField(\n default=False)), ('hash', models.UUIDField(default=uuid.uuid4,\n editable=False, unique=True)), ('strategy_classification', models.\n PositiveIntegerField(choices=[(0, 'None'), (1, 'Jigsaw'), (2,\n 'Peer Instruction'), (3, 'Case Studies'), (4, 'Gallery Walk'), (5,\n 'Reflective Writing'), (6, 'Two-Stage Exam'), (7, 'Toolkit'), (8,\n 'One Minute Paper'), (9, 'Distributed Problem Solving'), (10,\n 'Peer Assessment'), (11, 'Other')], default=0)), ('week_type',\n models.PositiveIntegerField(choices=[(0, 'Part'), (1, 'Week'), (2,\n 'Term')], default=0)), ('author', models.ForeignKey(null=True,\n on_delete=django.db.models.deletion.SET_NULL, to=settings.\n AUTH_USER_MODEL)), ('nodes', models.ManyToManyField(blank=True,\n through='course_flow.NodeWeek', to='course_flow.Node'))], options={\n 'verbose_name': 'Week', 'verbose_name_plural': 'Weeks'}),\n migrations.CreateModel(name='WeekWorkflow', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('added_on', models.DateTimeField(auto_now_add\n =True)), ('rank', models.PositiveIntegerField(default=0)), ('week',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'course_flow.Week'))], options={'verbose_name':\n 'Week-Workflow Link', 'verbose_name_plural': 'Week-Workflow Links'}\n ), migrations.CreateModel(name='Workflow', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('title', models.CharField(blank=True,\n max_length=50, null=True)), ('description', models.TextField(blank=\n True, max_length=500, null=True)), ('created_on', models.\n DateTimeField(auto_now_add=True)), ('last_modified', models.\n DateTimeField(auto_now=True)), ('static', models.BooleanField(\n default=False)), ('published', models.BooleanField(default=False)),\n ('is_strategy', models.BooleanField(default=False)), (\n 'from_saltise', models.BooleanField(default=False)), ('is_original',\n models.BooleanField(default=True)), ('hash', models.UUIDField(\n default=uuid.uuid4, editable=False, unique=True)), ('outcomes_type',\n models.PositiveIntegerField(choices=[(0, 'Normal'), (1, 'Advanced')\n ], default=0)), ('outcomes_sort', models.PositiveIntegerField(\n choices=[(0, 'Time'), (1, 'Category'), (2, 'Task'), (3, 'Context')],\n default=0)), ('columns', models.ManyToManyField(blank=True, through\n ='course_flow.ColumnWorkflow', to='course_flow.Column')), (\n 'parent_workflow', models.ForeignKey(null=True, on_delete=django.db\n .models.deletion.SET_NULL, to='course_flow.Workflow')), ('weeks',\n models.ManyToManyField(blank=True, through=\n 'course_flow.WeekWorkflow', to='course_flow.Week'))]), migrations.\n CreateModel(name='WorkflowProject', fields=[('id', models.AutoField\n (auto_created=True, primary_key=True, serialize=False, verbose_name\n ='ID')), ('added_on', models.DateTimeField(auto_now_add=True)), (\n 'rank', models.PositiveIntegerField(default=0)), ('project', models\n .ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'course_flow.Project')), ('workflow', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, to='course_flow.Workflow'))],\n options={'verbose_name': 'Workflow-Project Link',\n 'verbose_name_plural': 'Workflow-Project Links'}), migrations.\n AddField(model_name='weekworkflow', name='workflow', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'course_flow.Workflow')), migrations.AddField(model_name='week',\n name='original_strategy', field=models.ForeignKey(null=True,\n on_delete=django.db.models.deletion.SET_NULL, to=\n 'course_flow.Workflow')), migrations.AddField(model_name='week',\n name='parent_week', field=models.ForeignKey(null=True, on_delete=\n django.db.models.deletion.SET_NULL, to='course_flow.Week')),\n migrations.AddField(model_name='project', name='workflows', field=\n models.ManyToManyField(blank=True, through=\n 'course_flow.WorkflowProject', to='course_flow.Workflow')),\n migrations.CreateModel(name='OutcomeWorkflow', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('added_on', models.DateTimeField(\n auto_now_add=True)), ('rank', models.PositiveIntegerField(default=0\n )), ('outcome', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='course_flow.Outcome')), ('workflow', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'course_flow.Workflow'))], options={'verbose_name':\n 'Outcome-Workflow Link', 'verbose_name_plural':\n 'Outcome-Workflow Links'}), migrations.AddField(model_name=\n 'outcomeproject', name='project', field=models.ForeignKey(on_delete\n =django.db.models.deletion.CASCADE, to='course_flow.Project')),\n migrations.CreateModel(name='OutcomeOutcome', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('added_on', models.DateTimeField(auto_now_add\n =True)), ('rank', models.PositiveIntegerField(default=0)), ('child',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='parent_outcome_links', to='course_flow.Outcome')), (\n 'parent', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, related_name='child_outcome_links', to=\n 'course_flow.Outcome'))], options={'verbose_name':\n 'Outcome-Outcome Link', 'verbose_name_plural':\n 'Outcome-Outcome Links'}), migrations.CreateModel(name=\n 'OutcomeNode', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('added_on',\n models.DateTimeField(auto_now_add=True)), ('rank', models.\n PositiveIntegerField(default=0)), ('degree', models.\n PositiveIntegerField(default=1)), ('node', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='course_flow.Node')\n ), ('outcome', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='course_flow.Outcome'))], options={\n 'verbose_name': 'Outcome-Node Link', 'verbose_name_plural':\n 'Outcome-Node Links'}), migrations.AddField(model_name='outcome',\n name='children', field=models.ManyToManyField(blank=True,\n related_name='parent_outcomes', through=\n 'course_flow.OutcomeOutcome', to='course_flow.Outcome')),\n migrations.AddField(model_name='outcome', name='parent_outcome',\n field=models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='course_flow.Outcome')), migrations.AddField(\n model_name='nodeweek', name='week', field=models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='course_flow.Week')\n ), migrations.CreateModel(name='NodeLink', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('title', models.CharField(blank=True,\n max_length=100, null=True)), ('published', models.BooleanField(\n default=False)), ('source_port', models.PositiveIntegerField(\n choices=[(1, 'e'), (2, 's'), (3, 'w')], default=2)), ('target_port',\n models.PositiveIntegerField(choices=[(0, 'n'), (1, 'e'), (3, 'w')],\n default=0)), ('dashed', models.BooleanField(default=False)), (\n 'created_on', models.DateTimeField(auto_now_add=True)), (\n 'last_modified', models.DateTimeField(auto_now=True)), (\n 'is_original', models.BooleanField(default=True)), ('hash', models.\n UUIDField(default=uuid.uuid4, editable=False, unique=True)), (\n 'author', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), (\n 'parent_nodelink', models.ForeignKey(null=True, on_delete=django.db\n .models.deletion.SET_NULL, to='course_flow.NodeLink')), (\n 'source_node', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, related_name='outgoing_links', to=\n 'course_flow.Node')), ('target_node', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='incoming_links',\n to='course_flow.Node'))], options={'verbose_name': 'Node Link',\n 'verbose_name_plural': 'Node Links'}), migrations.CreateModel(name=\n 'NodeCompletionStatus', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('is_completed', models.BooleanField(default=False)), (\n 'node', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, to='course_flow.Node')), ('student', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL))], options={'verbose_name':\n 'Node Completion Status', 'verbose_name_plural':\n 'Node Completion Statuses'}), migrations.AddField(model_name='node',\n name='linked_workflow', field=models.ForeignKey(null=True,\n on_delete=django.db.models.deletion.SET_NULL, to=\n 'course_flow.Workflow')), migrations.AddField(model_name='node',\n name='outcomes', field=models.ManyToManyField(blank=True, through=\n 'course_flow.OutcomeNode', to='course_flow.Outcome')), migrations.\n AddField(model_name='node', name='parent_node', field=models.\n ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to='course_flow.Node')), migrations.AddField(model_name='node',\n name='students', field=models.ManyToManyField(blank=True,\n related_name='assigned_nodes', through=\n 'course_flow.NodeCompletionStatus', to=settings.AUTH_USER_MODEL)),\n migrations.AddField(model_name='columnworkflow', name='workflow',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n to='course_flow.Workflow')), migrations.CreateModel(name='Program',\n fields=[('workflow_ptr', models.OneToOneField(auto_created=True,\n on_delete=django.db.models.deletion.CASCADE, parent_link=True,\n primary_key=True, serialize=False, to='course_flow.Workflow')), (\n 'author', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.SET_NULL, to=settings.AUTH_USER_MODEL))], bases=(\n 'course_flow.workflow',)), migrations.CreateModel(name='Course',\n fields=[('workflow_ptr', models.OneToOneField(auto_created=True,\n on_delete=django.db.models.deletion.CASCADE, parent_link=True,\n primary_key=True, serialize=False, to='course_flow.Workflow')), (\n 'author', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='authored_courses', to=settings.\n AUTH_USER_MODEL)), ('discipline', models.ForeignKey(null=True,\n on_delete=django.db.models.deletion.SET_NULL, to=\n 'course_flow.Discipline')), ('students', models.ManyToManyField(\n blank=True, related_name='assigned_courses', to=settings.\n AUTH_USER_MODEL))], bases=('course_flow.workflow',)), migrations.\n CreateModel(name='Activity', fields=[('workflow_ptr', models.\n OneToOneField(auto_created=True, on_delete=django.db.models.\n deletion.CASCADE, parent_link=True, primary_key=True, serialize=\n False, to='course_flow.Workflow')), ('author', models.ForeignKey(\n null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='authored_activities', to=settings.AUTH_USER_MODEL)),\n ('students', models.ManyToManyField(blank=True, related_name=\n 'assigned_activities', to=settings.AUTH_USER_MODEL))], options={\n 'verbose_name': 'Activity', 'verbose_name_plural': 'Activities'},\n bases=('course_flow.workflow',))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Column', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('title', models.CharField(blank=True,\n max_length=50, null=True)), ('created_on', models.DateTimeField(\n auto_now_add=True)), ('last_modified', models.DateTimeField(\n auto_now=True)), ('published', models.BooleanField(default=False)),\n ('visible', models.BooleanField(default=True)), ('colour', models.\n PositiveIntegerField(null=True)), ('column_type', models.\n PositiveIntegerField(choices=[(0, 'Custom Activity Column'), (1,\n 'Out of Class (Instructor)'), (2, 'Out of Class (Students)'), (3,\n 'In Class (Instructor)'), (4, 'In Class (Students)'), (10,\n 'Custom Course Column'), (11, 'Preparation'), (12, 'Lesson'), (13,\n 'Artifact'), (14, 'Assessment'), (20, 'Custom Program Category')],\n default=0)), ('is_original', models.BooleanField(default=False)), (\n 'hash', models.UUIDField(default=uuid.uuid4, editable=False, unique\n =True)), ('author', models.ForeignKey(null=True, on_delete=django.\n db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), (\n 'parent_column', models.ForeignKey(null=True, on_delete=django.db.\n models.deletion.SET_NULL, to='course_flow.Column'))], options={\n 'verbose_name': 'Column', 'verbose_name_plural': 'Columns'}),\n migrations.CreateModel(name='ColumnWorkflow', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('added_on', models.DateTimeField(auto_now_add\n =True)), ('rank', models.PositiveIntegerField(default=0)), (\n 'column', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, to='course_flow.Column'))], options={'verbose_name':\n 'Column-Workflow Link', 'verbose_name_plural':\n 'Column-Workflow Links'}), migrations.CreateModel(name='Discipline',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('title', models.CharField(\n help_text='Enter the name of a new discipline.', max_length=100,\n unique=True, verbose_name='Discipline name'))], options={\n 'verbose_name': 'discipline', 'verbose_name_plural': 'disciplines'}\n ), migrations.CreateModel(name='Node', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('title', models.CharField(blank=True,\n max_length=50, null=True)), ('description', models.TextField(blank=\n True, max_length=500, null=True)), ('created_on', models.\n DateTimeField(auto_now_add=True)), ('last_modified', models.\n DateTimeField(auto_now=True)), ('published', models.BooleanField(\n default=False)), ('is_original', models.BooleanField(default=True)),\n ('has_autolink', models.BooleanField(default=False)), ('is_dropped',\n models.BooleanField(default=False)), ('context_classification',\n models.PositiveIntegerField(choices=[(0, 'None'), (1,\n 'Individual Work'), (2, 'Work in Groups'), (3, 'Whole Class'), (101,\n 'Formative'), (102, 'Summative'), (103, 'Comprehensive')], default=\n 0)), ('task_classification', models.PositiveIntegerField(choices=[(\n 0, 'None'), (1, 'Gather Information'), (2, 'Discuss'), (3,\n 'Problem Solve'), (4, 'Analyze'), (5, 'Assess/Review Peers'), (6,\n 'Debate'), (7, 'Game/Roleplay'), (8, 'Create/Design'), (9,\n 'Revise/Improve'), (10, 'Read'), (11, 'Write'), (12, 'Present'), (\n 13, 'Experiment/Inquiry'), (14, 'Quiz/Test'), (15,\n 'Instructor Resource Curation'), (16, 'Instructor Orchestration'),\n (17, 'Instructor Evaluation'), (18, 'Other'), (101, 'Jigsaw'), (102,\n 'Peer Instruction'), (103, 'Case Studies'), (104, 'Gallery Walk'),\n (105, 'Reflective Writing'), (106, 'Two-Stage Exam'), (107,\n 'Toolkit'), (108, 'One Minute Paper'), (109,\n 'Distributed Problem Solving'), (110, 'Peer Assessment')], default=\n 0)), ('node_type', models.PositiveIntegerField(choices=[(0,\n 'Activity Node'), (1, 'Course Node'), (2, 'Program Node')], default\n =0)), ('time_required', models.CharField(blank=True, max_length=30,\n null=True)), ('time_units', models.PositiveIntegerField(choices=[(0,\n ''), (1, 'seconds'), (2, 'minutes'), (3, 'hours'), (4, 'days'), (5,\n 'weeks'), (6, 'months'), (7, 'yrs'), (8, 'credits')], default=0)),\n ('represents_workflow', models.BooleanField(default=False)), (\n 'hash', models.UUIDField(default=uuid.uuid4, editable=False, unique\n =True)), ('author', models.ForeignKey(null=True, on_delete=django.\n db.models.deletion.SET_NULL, related_name='authored_nodes', to=\n settings.AUTH_USER_MODEL)), ('column', models.ForeignKey(null=True,\n on_delete=django.db.models.deletion.DO_NOTHING, to=\n 'course_flow.Column'))]), migrations.CreateModel(name='NodeWeek',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('added_on', models.\n DateTimeField(auto_now_add=True)), ('rank', models.\n PositiveIntegerField(default=0)), ('node', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='course_flow.Node')\n )], options={'verbose_name': 'Node-Week Link',\n 'verbose_name_plural': 'Node-Week Links'}), migrations.CreateModel(\n name='Outcome', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('title',\n models.CharField(max_length=500)), ('description', models.TextField\n (max_length=500)), ('created_on', models.DateTimeField(auto_now_add\n =True)), ('last_modified', models.DateTimeField(auto_now=True)), (\n 'published', models.BooleanField(default=False)), ('is_original',\n models.BooleanField(default=True)), ('is_dropped', models.\n BooleanField(default=True)), ('depth', models.PositiveIntegerField(\n default=0)), ('hash', models.UUIDField(default=uuid.uuid4, editable\n =False, unique=True)), ('author', models.ForeignKey(null=True,\n on_delete=django.db.models.deletion.SET_NULL, to=settings.\n AUTH_USER_MODEL))], options={'verbose_name': 'Outcome',\n 'verbose_name_plural': 'Outcomes'}), migrations.CreateModel(name=\n 'OutcomeProject', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('added_on',\n models.DateTimeField(auto_now_add=True)), ('rank', models.\n PositiveIntegerField(default=0)), ('outcome', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to=\n 'course_flow.Outcome'))], options={'verbose_name':\n 'Outcome-Project Link', 'verbose_name_plural':\n 'Outcome-Project Links'}), migrations.CreateModel(name='Project',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('title', models.CharField(\n blank=True, max_length=50, null=True)), ('description', models.\n CharField(blank=True, max_length=500, null=True)), ('created_on',\n models.DateTimeField(auto_now_add=True)), ('last_modified', models.\n DateTimeField(auto_now=True)), ('published', models.BooleanField(\n default=False)), ('is_original', models.BooleanField(default=False)\n ), ('author', models.ForeignKey(null=True, on_delete=django.db.\n models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), (\n 'outcomes', models.ManyToManyField(blank=True, through=\n 'course_flow.OutcomeProject', to='course_flow.Outcome')), (\n 'parent_project', models.ForeignKey(null=True, on_delete=django.db.\n models.deletion.SET_NULL, to='course_flow.Project'))], options={\n 'verbose_name': 'Project', 'verbose_name_plural': 'Projects'}),\n migrations.CreateModel(name='Week', fields=[('id', models.AutoField\n (auto_created=True, primary_key=True, serialize=False, verbose_name\n ='ID')), ('title', models.CharField(blank=True, max_length=50, null\n =True)), ('description', models.TextField(blank=True, max_length=\n 500, null=True)), ('created_on', models.DateTimeField(auto_now_add=\n True)), ('last_modified', models.DateTimeField(auto_now=True)), (\n 'default', models.BooleanField(default=False)), ('is_original',\n models.BooleanField(default=True)), ('published', models.\n BooleanField(default=False)), ('is_strategy', models.BooleanField(\n default=False)), ('hash', models.UUIDField(default=uuid.uuid4,\n editable=False, unique=True)), ('strategy_classification', models.\n PositiveIntegerField(choices=[(0, 'None'), (1, 'Jigsaw'), (2,\n 'Peer Instruction'), (3, 'Case Studies'), (4, 'Gallery Walk'), (5,\n 'Reflective Writing'), (6, 'Two-Stage Exam'), (7, 'Toolkit'), (8,\n 'One Minute Paper'), (9, 'Distributed Problem Solving'), (10,\n 'Peer Assessment'), (11, 'Other')], default=0)), ('week_type',\n models.PositiveIntegerField(choices=[(0, 'Part'), (1, 'Week'), (2,\n 'Term')], default=0)), ('author', models.ForeignKey(null=True,\n on_delete=django.db.models.deletion.SET_NULL, to=settings.\n AUTH_USER_MODEL)), ('nodes', models.ManyToManyField(blank=True,\n through='course_flow.NodeWeek', to='course_flow.Node'))], options={\n 'verbose_name': 'Week', 'verbose_name_plural': 'Weeks'}),\n migrations.CreateModel(name='WeekWorkflow', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('added_on', models.DateTimeField(auto_now_add\n =True)), ('rank', models.PositiveIntegerField(default=0)), ('week',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'course_flow.Week'))], options={'verbose_name':\n 'Week-Workflow Link', 'verbose_name_plural': 'Week-Workflow Links'}\n ), migrations.CreateModel(name='Workflow', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('title', models.CharField(blank=True,\n max_length=50, null=True)), ('description', models.TextField(blank=\n True, max_length=500, null=True)), ('created_on', models.\n DateTimeField(auto_now_add=True)), ('last_modified', models.\n DateTimeField(auto_now=True)), ('static', models.BooleanField(\n default=False)), ('published', models.BooleanField(default=False)),\n ('is_strategy', models.BooleanField(default=False)), (\n 'from_saltise', models.BooleanField(default=False)), ('is_original',\n models.BooleanField(default=True)), ('hash', models.UUIDField(\n default=uuid.uuid4, editable=False, unique=True)), ('outcomes_type',\n models.PositiveIntegerField(choices=[(0, 'Normal'), (1, 'Advanced')\n ], default=0)), ('outcomes_sort', models.PositiveIntegerField(\n choices=[(0, 'Time'), (1, 'Category'), (2, 'Task'), (3, 'Context')],\n default=0)), ('columns', models.ManyToManyField(blank=True, through\n ='course_flow.ColumnWorkflow', to='course_flow.Column')), (\n 'parent_workflow', models.ForeignKey(null=True, on_delete=django.db\n .models.deletion.SET_NULL, to='course_flow.Workflow')), ('weeks',\n models.ManyToManyField(blank=True, through=\n 'course_flow.WeekWorkflow', to='course_flow.Week'))]), migrations.\n CreateModel(name='WorkflowProject', fields=[('id', models.AutoField\n (auto_created=True, primary_key=True, serialize=False, verbose_name\n ='ID')), ('added_on', models.DateTimeField(auto_now_add=True)), (\n 'rank', models.PositiveIntegerField(default=0)), ('project', models\n .ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'course_flow.Project')), ('workflow', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, to='course_flow.Workflow'))],\n options={'verbose_name': 'Workflow-Project Link',\n 'verbose_name_plural': 'Workflow-Project Links'}), migrations.\n AddField(model_name='weekworkflow', name='workflow', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'course_flow.Workflow')), migrations.AddField(model_name='week',\n name='original_strategy', field=models.ForeignKey(null=True,\n on_delete=django.db.models.deletion.SET_NULL, to=\n 'course_flow.Workflow')), migrations.AddField(model_name='week',\n name='parent_week', field=models.ForeignKey(null=True, on_delete=\n django.db.models.deletion.SET_NULL, to='course_flow.Week')),\n migrations.AddField(model_name='project', name='workflows', field=\n models.ManyToManyField(blank=True, through=\n 'course_flow.WorkflowProject', to='course_flow.Workflow')),\n migrations.CreateModel(name='OutcomeWorkflow', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('added_on', models.DateTimeField(\n auto_now_add=True)), ('rank', models.PositiveIntegerField(default=0\n )), ('outcome', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='course_flow.Outcome')), ('workflow', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'course_flow.Workflow'))], options={'verbose_name':\n 'Outcome-Workflow Link', 'verbose_name_plural':\n 'Outcome-Workflow Links'}), migrations.AddField(model_name=\n 'outcomeproject', name='project', field=models.ForeignKey(on_delete\n =django.db.models.deletion.CASCADE, to='course_flow.Project')),\n migrations.CreateModel(name='OutcomeOutcome', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('added_on', models.DateTimeField(auto_now_add\n =True)), ('rank', models.PositiveIntegerField(default=0)), ('child',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='parent_outcome_links', to='course_flow.Outcome')), (\n 'parent', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, related_name='child_outcome_links', to=\n 'course_flow.Outcome'))], options={'verbose_name':\n 'Outcome-Outcome Link', 'verbose_name_plural':\n 'Outcome-Outcome Links'}), migrations.CreateModel(name=\n 'OutcomeNode', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('added_on',\n models.DateTimeField(auto_now_add=True)), ('rank', models.\n PositiveIntegerField(default=0)), ('degree', models.\n PositiveIntegerField(default=1)), ('node', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='course_flow.Node')\n ), ('outcome', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='course_flow.Outcome'))], options={\n 'verbose_name': 'Outcome-Node Link', 'verbose_name_plural':\n 'Outcome-Node Links'}), migrations.AddField(model_name='outcome',\n name='children', field=models.ManyToManyField(blank=True,\n related_name='parent_outcomes', through=\n 'course_flow.OutcomeOutcome', to='course_flow.Outcome')),\n migrations.AddField(model_name='outcome', name='parent_outcome',\n field=models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='course_flow.Outcome')), migrations.AddField(\n model_name='nodeweek', name='week', field=models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='course_flow.Week')\n ), migrations.CreateModel(name='NodeLink', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('title', models.CharField(blank=True,\n max_length=100, null=True)), ('published', models.BooleanField(\n default=False)), ('source_port', models.PositiveIntegerField(\n choices=[(1, 'e'), (2, 's'), (3, 'w')], default=2)), ('target_port',\n models.PositiveIntegerField(choices=[(0, 'n'), (1, 'e'), (3, 'w')],\n default=0)), ('dashed', models.BooleanField(default=False)), (\n 'created_on', models.DateTimeField(auto_now_add=True)), (\n 'last_modified', models.DateTimeField(auto_now=True)), (\n 'is_original', models.BooleanField(default=True)), ('hash', models.\n UUIDField(default=uuid.uuid4, editable=False, unique=True)), (\n 'author', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), (\n 'parent_nodelink', models.ForeignKey(null=True, on_delete=django.db\n .models.deletion.SET_NULL, to='course_flow.NodeLink')), (\n 'source_node', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, related_name='outgoing_links', to=\n 'course_flow.Node')), ('target_node', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='incoming_links',\n to='course_flow.Node'))], options={'verbose_name': 'Node Link',\n 'verbose_name_plural': 'Node Links'}), migrations.CreateModel(name=\n 'NodeCompletionStatus', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('is_completed', models.BooleanField(default=False)), (\n 'node', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, to='course_flow.Node')), ('student', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL))], options={'verbose_name':\n 'Node Completion Status', 'verbose_name_plural':\n 'Node Completion Statuses'}), migrations.AddField(model_name='node',\n name='linked_workflow', field=models.ForeignKey(null=True,\n on_delete=django.db.models.deletion.SET_NULL, to=\n 'course_flow.Workflow')), migrations.AddField(model_name='node',\n name='outcomes', field=models.ManyToManyField(blank=True, through=\n 'course_flow.OutcomeNode', to='course_flow.Outcome')), migrations.\n AddField(model_name='node', name='parent_node', field=models.\n ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to='course_flow.Node')), migrations.AddField(model_name='node',\n name='students', field=models.ManyToManyField(blank=True,\n related_name='assigned_nodes', through=\n 'course_flow.NodeCompletionStatus', to=settings.AUTH_USER_MODEL)),\n migrations.AddField(model_name='columnworkflow', name='workflow',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n to='course_flow.Workflow')), migrations.CreateModel(name='Program',\n fields=[('workflow_ptr', models.OneToOneField(auto_created=True,\n on_delete=django.db.models.deletion.CASCADE, parent_link=True,\n primary_key=True, serialize=False, to='course_flow.Workflow')), (\n 'author', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.SET_NULL, to=settings.AUTH_USER_MODEL))], bases=(\n 'course_flow.workflow',)), migrations.CreateModel(name='Course',\n fields=[('workflow_ptr', models.OneToOneField(auto_created=True,\n on_delete=django.db.models.deletion.CASCADE, parent_link=True,\n primary_key=True, serialize=False, to='course_flow.Workflow')), (\n 'author', models.ForeignKey(null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='authored_courses', to=settings.\n AUTH_USER_MODEL)), ('discipline', models.ForeignKey(null=True,\n on_delete=django.db.models.deletion.SET_NULL, to=\n 'course_flow.Discipline')), ('students', models.ManyToManyField(\n blank=True, related_name='assigned_courses', to=settings.\n AUTH_USER_MODEL))], bases=('course_flow.workflow',)), migrations.\n CreateModel(name='Activity', fields=[('workflow_ptr', models.\n OneToOneField(auto_created=True, on_delete=django.db.models.\n deletion.CASCADE, parent_link=True, primary_key=True, serialize=\n False, to='course_flow.Workflow')), ('author', models.ForeignKey(\n null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='authored_activities', to=settings.AUTH_USER_MODEL)),\n ('students', models.ManyToManyField(blank=True, related_name=\n 'assigned_activities', to=settings.AUTH_USER_MODEL))], options={\n 'verbose_name': 'Activity', 'verbose_name_plural': 'Activities'},\n bases=('course_flow.workflow',))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,849 |
db9e2f017318cc1110dc15282b7ef86331c00028
|
from __future__ import absolute_import
from dancedeets.servlets import api
from . import db
@api.apiroute(r'/favorites')
class RsvpAjaxHandler(api.ApiHandler):
def get(self):
favorite_event_ids = db.get_favorite_event_ids_for_user(user_id=self.fbl.fb_uid)
favorites_json = {'favorites': favorite_event_ids}
self.write_json_success(favorites_json)
def post(self):
if self.json_body:
event_id = self.json_body.get('event_id')
else:
event_id = self.request.get('event_id')
db.add_favorite(self.fbl.fb_uid, event_id)
self.write_json_success()
def delete(self):
if self.json_body:
event_id = self.json_body.get('event_id')
else:
event_id = self.request.get('event_id')
db.delete_favorite(self.fbl.fb_uid, event_id)
self.write_json_success()
|
[
"from __future__ import absolute_import\n\nfrom dancedeets.servlets import api\nfrom . import db\n\n\[email protected](r'/favorites')\nclass RsvpAjaxHandler(api.ApiHandler):\n def get(self):\n favorite_event_ids = db.get_favorite_event_ids_for_user(user_id=self.fbl.fb_uid)\n favorites_json = {'favorites': favorite_event_ids}\n self.write_json_success(favorites_json)\n\n def post(self):\n if self.json_body:\n event_id = self.json_body.get('event_id')\n else:\n event_id = self.request.get('event_id')\n db.add_favorite(self.fbl.fb_uid, event_id)\n self.write_json_success()\n\n def delete(self):\n if self.json_body:\n event_id = self.json_body.get('event_id')\n else:\n event_id = self.request.get('event_id')\n db.delete_favorite(self.fbl.fb_uid, event_id)\n self.write_json_success()\n",
"from __future__ import absolute_import\nfrom dancedeets.servlets import api\nfrom . import db\n\n\[email protected]('/favorites')\nclass RsvpAjaxHandler(api.ApiHandler):\n\n def get(self):\n favorite_event_ids = db.get_favorite_event_ids_for_user(user_id=\n self.fbl.fb_uid)\n favorites_json = {'favorites': favorite_event_ids}\n self.write_json_success(favorites_json)\n\n def post(self):\n if self.json_body:\n event_id = self.json_body.get('event_id')\n else:\n event_id = self.request.get('event_id')\n db.add_favorite(self.fbl.fb_uid, event_id)\n self.write_json_success()\n\n def delete(self):\n if self.json_body:\n event_id = self.json_body.get('event_id')\n else:\n event_id = self.request.get('event_id')\n db.delete_favorite(self.fbl.fb_uid, event_id)\n self.write_json_success()\n",
"<import token>\n\n\[email protected]('/favorites')\nclass RsvpAjaxHandler(api.ApiHandler):\n\n def get(self):\n favorite_event_ids = db.get_favorite_event_ids_for_user(user_id=\n self.fbl.fb_uid)\n favorites_json = {'favorites': favorite_event_ids}\n self.write_json_success(favorites_json)\n\n def post(self):\n if self.json_body:\n event_id = self.json_body.get('event_id')\n else:\n event_id = self.request.get('event_id')\n db.add_favorite(self.fbl.fb_uid, event_id)\n self.write_json_success()\n\n def delete(self):\n if self.json_body:\n event_id = self.json_body.get('event_id')\n else:\n event_id = self.request.get('event_id')\n db.delete_favorite(self.fbl.fb_uid, event_id)\n self.write_json_success()\n",
"<import token>\n\n\[email protected]('/favorites')\nclass RsvpAjaxHandler(api.ApiHandler):\n\n def get(self):\n favorite_event_ids = db.get_favorite_event_ids_for_user(user_id=\n self.fbl.fb_uid)\n favorites_json = {'favorites': favorite_event_ids}\n self.write_json_success(favorites_json)\n\n def post(self):\n if self.json_body:\n event_id = self.json_body.get('event_id')\n else:\n event_id = self.request.get('event_id')\n db.add_favorite(self.fbl.fb_uid, event_id)\n self.write_json_success()\n <function token>\n",
"<import token>\n\n\[email protected]('/favorites')\nclass RsvpAjaxHandler(api.ApiHandler):\n <function token>\n\n def post(self):\n if self.json_body:\n event_id = self.json_body.get('event_id')\n else:\n event_id = self.request.get('event_id')\n db.add_favorite(self.fbl.fb_uid, event_id)\n self.write_json_success()\n <function token>\n",
"<import token>\n\n\[email protected]('/favorites')\nclass RsvpAjaxHandler(api.ApiHandler):\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,850 |
6a70bfb3d7d46f7bf5463eb5f852bb72c8a5c8fa
|
# Generated by Django 3.0 on 2020-04-07 09:46
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='JDcommodityInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('productname', models.CharField(max_length=256)),
('productimg', models.CharField(max_length=128)),
('productprice', models.DecimalField(decimal_places=2, max_digits=5)),
],
),
migrations.CreateModel(
name='ToutiaoIInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64)),
('auther', models.CharField(max_length=16)),
('date', models.DateField()),
('img', models.CharField(max_length=128)),
('linkaddress', models.CharField(max_length=128)),
],
),
]
|
[
"# Generated by Django 3.0 on 2020-04-07 09:46\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='JDcommodityInfo',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('productname', models.CharField(max_length=256)),\n ('productimg', models.CharField(max_length=128)),\n ('productprice', models.DecimalField(decimal_places=2, max_digits=5)),\n ],\n ),\n migrations.CreateModel(\n name='ToutiaoIInfo',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=64)),\n ('auther', models.CharField(max_length=16)),\n ('date', models.DateField()),\n ('img', models.CharField(max_length=128)),\n ('linkaddress', models.CharField(max_length=128)),\n ],\n ),\n ]\n",
"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='JDcommodityInfo', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('productname', models.\n CharField(max_length=256)), ('productimg', models.CharField(\n max_length=128)), ('productprice', models.DecimalField(\n decimal_places=2, max_digits=5))]), migrations.CreateModel(name=\n 'ToutiaoIInfo', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('title',\n models.CharField(max_length=64)), ('auther', models.CharField(\n max_length=16)), ('date', models.DateField()), ('img', models.\n CharField(max_length=128)), ('linkaddress', models.CharField(\n max_length=128))])]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='JDcommodityInfo', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('productname', models.\n CharField(max_length=256)), ('productimg', models.CharField(\n max_length=128)), ('productprice', models.DecimalField(\n decimal_places=2, max_digits=5))]), migrations.CreateModel(name=\n 'ToutiaoIInfo', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('title',\n models.CharField(max_length=64)), ('auther', models.CharField(\n max_length=16)), ('date', models.DateField()), ('img', models.\n CharField(max_length=128)), ('linkaddress', models.CharField(\n max_length=128))])]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,851 |
7acf6d7e9aabdfc2911972467690a237aefd78ee
|
from django.urls import include, path
from rest_framework.routers import SimpleRouter
from adverts.api import views
app_name = "adverts"
#########
# Routes
#########
router = SimpleRouter()
router.register(r"currentadvert", views.CurrentAdvertViewSet, "currentadvert")
router.register(r"advert", views.AdvertViewSet)
router.register(r"adverttype", views.AdvertTypeViewSet)
router.register(r"advertiser", views.AdvertiserViewSet)
urlpatterns = [
path("", include(router.urls)),
]
|
[
"from django.urls import include, path\nfrom rest_framework.routers import SimpleRouter\n\nfrom adverts.api import views\n\napp_name = \"adverts\"\n\n#########\n# Routes\n#########\nrouter = SimpleRouter()\nrouter.register(r\"currentadvert\", views.CurrentAdvertViewSet, \"currentadvert\")\nrouter.register(r\"advert\", views.AdvertViewSet)\nrouter.register(r\"adverttype\", views.AdvertTypeViewSet)\nrouter.register(r\"advertiser\", views.AdvertiserViewSet)\n\n\nurlpatterns = [\n path(\"\", include(router.urls)),\n]\n",
"from django.urls import include, path\nfrom rest_framework.routers import SimpleRouter\nfrom adverts.api import views\napp_name = 'adverts'\nrouter = SimpleRouter()\nrouter.register('currentadvert', views.CurrentAdvertViewSet, 'currentadvert')\nrouter.register('advert', views.AdvertViewSet)\nrouter.register('adverttype', views.AdvertTypeViewSet)\nrouter.register('advertiser', views.AdvertiserViewSet)\nurlpatterns = [path('', include(router.urls))]\n",
"<import token>\napp_name = 'adverts'\nrouter = SimpleRouter()\nrouter.register('currentadvert', views.CurrentAdvertViewSet, 'currentadvert')\nrouter.register('advert', views.AdvertViewSet)\nrouter.register('adverttype', views.AdvertTypeViewSet)\nrouter.register('advertiser', views.AdvertiserViewSet)\nurlpatterns = [path('', include(router.urls))]\n",
"<import token>\n<assignment token>\nrouter.register('currentadvert', views.CurrentAdvertViewSet, 'currentadvert')\nrouter.register('advert', views.AdvertViewSet)\nrouter.register('adverttype', views.AdvertTypeViewSet)\nrouter.register('advertiser', views.AdvertiserViewSet)\n<assignment token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n"
] | false |
98,852 |
11736594c666051ed0bb2fe603f22e83ca531324
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Joel Palmius
import bpy
import json
import os
from bpy.props import BoolProperty, StringProperty, EnumProperty, IntProperty, CollectionProperty, FloatProperty
_licenses = []
_licenses.append(("CC0", "CC0", "Creative Commons Zero", 1))
_licenses.append(("CC-BY", "CC-BY", "Creative Commons Attribution", 2))
_licenses.append(("AGPL", "AGPL", "Affero Gnu Public License (don't use unless absolutely necessary)", 3))
_licenseDescription = "Set an output license for the material. This will have no practical effect apart from being included in the written MHMAT file."
_textures = []
_textures.append(("NORMALIZE", "Normalize", "Copy to a name based on MHMAT filename", 1))
_textures.append(("COPY", "Copy", "Copy without rename", 2))
_textures.append(("LINK", "Link", "Link to original location, with absolute pathname", 3))
_texturesDescription = "How do we handle texture file names and paths? Unless you know what you are doing, you will want to use normalize. This will copy all images to an appropriate location with an appropriate filename, valid for uploading to the asset repository."
_litspheres = []
_litspheres.append(("leather", "leather", "Leather litsphere. This is appropriate for all clothes, not only leather.", 1))
_litspheres.append(("standard_skin", "standard skin", "Standard skin litsphere. This is appropriate for all skins.", 2))
_litspheres.append(("african", "african skin", "African skin litsphere", 3))
_litspheres.append(("asian", "asian skin", "Asian skin litsphere", 4))
_litspheres.append(("caucasian", "caucasian skin", "Caucasian skin litsphere", 5))
_litspheres.append(("toon01", "toon", "Toon skin litsphere", 6))
_litspheres.append(("eye", "eye", "Eye litsphere", 7))
_litspheres.append(("hair", "hair", "Hair litsphere", 8))
_litsphereDescription = "A litsphere texture is used for emulate lighting and reflections inside MakeHuman. It thus has no effect outside MakeHuman. For any clothing (not just leather), you will want to use the \"leather\" litsphere."
def extraProperties():
# Object properties, normally set by MPFB
if not hasattr(bpy.types.Object, "MhObjectType"):
bpy.types.Object.MhObjectType = StringProperty(name="Object type", description="This is what type of MakeHuman object is (such as Clothes, Eyes...)", default="")
if not hasattr(bpy.types.Object, "MhHuman"):
bpy.types.Object.MhHuman = BoolProperty(name="Is MH Human", description="Old makeclothes property for deciding object type", default=False)
bpy.types.Scene.MhMsCreateDiffuse = BoolProperty(name="Create diffuse placeholder", description="Create a placeholder for a diffuse texture", default=True)
bpy.types.Scene.MhMsCreateNormal = BoolProperty(name="Create normal map placeholder", description="Create a placeholder for a normal map", default=False)
bpy.types.Scene.MhMsCreateBump = BoolProperty(name="Create bump map placeholder", description="Create a placeholder for a bump map", default=False)
bpy.types.Scene.MhMsOverwrite1 = BoolProperty(name="Overwrite existing (create)", description="Overwrite existing material(s) on object", default=False)
bpy.types.Scene.MhMsOverwrite2 = BoolProperty(name="Overwrite existing (import)", description="Overwrite existing material(s) on object", default=False)
# Metadata keys
bpy.types.Object.MhMsName = StringProperty(name="Name", description="The name of this material. This will have little practical effect apart from being written to the mhmat file.", default="material")
bpy.types.Object.MhMsTag = StringProperty(name="Tag", description="A category the material fits into, for example \"blond\" or \"female\". This will influence sorting and filtering in MH.", default="")
bpy.types.Object.MhMsDescription = StringProperty(name="Description", description="A description of the material. It will have little practical effect apart from being written to the mhmat file.", default="")
bpy.types.Object.MhMsMatLicense = bpy.props.EnumProperty(items=_licenses, name="License", description=_licenseDescription, default="CC0")
bpy.types.Object.MhMsAuthor = StringProperty(name="Author", description="The author of this material. This will have little practical effect apart from being written to the mhmat file.", default="")
bpy.types.Object.MhMsHomepage = StringProperty(name="Home page", description="The home page of the material, if any. This will have little practical effect apart from being written to the mhmat file.", default="")
# Boolean keys
bpy.types.Object.MhMsBackfaceCull = BoolProperty(name="Backface culling", description="If the back side of faces with the material should be invisible. This has no effect in exports, but may be important in MH", default=True)
bpy.types.Object.MhMsCastShadows = BoolProperty(name="Cast shadows", description="If the material casts shadows. This has no effect in exports.", default=True)
bpy.types.Object.MhMsReceiveShadows = BoolProperty(name="Receive shadows", description="If the material receives shadows. This has no effect in exports.", default=True)
bpy.types.Object.MhMsAlphaToCoverage = BoolProperty(name="AlphaToCoverage", description="I have no idea what this does, but it might be important", default=True)
bpy.types.Object.MhMsShadeless = BoolProperty(name="Shadeless", description="If the material is shadeless. It is unlikely you want this.", default=False)
bpy.types.Object.MhMsWireframe = BoolProperty(name="Wireframe", description="If the material is to be rendered as a wireframe. It is unlikely you want this.", default=False)
bpy.types.Object.MhMsTransparent = BoolProperty(name="Transparent", description="If the material is to be rendered as a transparent. It is unlikely you want this, as the normal approach is using the alpha channel in the diffuse texture.", default=False)
bpy.types.Object.MhMsDepthless = BoolProperty(name="Depthless", description="If the material is to be rendered as having no depth. It is unlikely you want this.", default=False)
bpy.types.Object.MhMsSSSEnable = BoolProperty(name="SSS Enable", description="If the material is to be rendered with sub surface scattering.", default=False)
bpy.types.Object.MhMsUseLit = BoolProperty(name="Use Litsphere", description="Use the litsphere shader when rendering material in MakeHuman. This does not have any effect on materials outside MakeHuman", default=True)
bpy.types.Object.MhMsWriteBlendMaterial = BoolProperty(name="Write Blend material", description="Stores the second material on the active object in a blend file", default=False)
# Options
bpy.types.Object.MhMsLitsphere = bpy.props.EnumProperty(items=_litspheres, name="Litsphere", description=_litsphereDescription, default="leather")
bpy.types.Object.MhMsTextures = bpy.props.EnumProperty(items=_textures, name="Textures", description=_texturesDescription, default="NORMALIZE")
|
[
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Author: Joel Palmius\n\nimport bpy\nimport json\nimport os\nfrom bpy.props import BoolProperty, StringProperty, EnumProperty, IntProperty, CollectionProperty, FloatProperty\n\n_licenses = []\n_licenses.append((\"CC0\", \"CC0\", \"Creative Commons Zero\", 1))\n_licenses.append((\"CC-BY\", \"CC-BY\", \"Creative Commons Attribution\", 2))\n_licenses.append((\"AGPL\", \"AGPL\", \"Affero Gnu Public License (don't use unless absolutely necessary)\", 3))\n_licenseDescription = \"Set an output license for the material. This will have no practical effect apart from being included in the written MHMAT file.\"\n\n_textures = []\n_textures.append((\"NORMALIZE\", \"Normalize\", \"Copy to a name based on MHMAT filename\", 1))\n_textures.append((\"COPY\", \"Copy\", \"Copy without rename\", 2))\n_textures.append((\"LINK\", \"Link\", \"Link to original location, with absolute pathname\", 3))\n_texturesDescription = \"How do we handle texture file names and paths? Unless you know what you are doing, you will want to use normalize. This will copy all images to an appropriate location with an appropriate filename, valid for uploading to the asset repository.\"\n\n_litspheres = []\n_litspheres.append((\"leather\", \"leather\", \"Leather litsphere. This is appropriate for all clothes, not only leather.\", 1))\n_litspheres.append((\"standard_skin\", \"standard skin\", \"Standard skin litsphere. This is appropriate for all skins.\", 2))\n_litspheres.append((\"african\", \"african skin\", \"African skin litsphere\", 3))\n_litspheres.append((\"asian\", \"asian skin\", \"Asian skin litsphere\", 4))\n_litspheres.append((\"caucasian\", \"caucasian skin\", \"Caucasian skin litsphere\", 5))\n_litspheres.append((\"toon01\", \"toon\", \"Toon skin litsphere\", 6))\n_litspheres.append((\"eye\", \"eye\", \"Eye litsphere\", 7))\n_litspheres.append((\"hair\", \"hair\", \"Hair litsphere\", 8))\n_litsphereDescription = \"A litsphere texture is used for emulate lighting and reflections inside MakeHuman. It thus has no effect outside MakeHuman. For any clothing (not just leather), you will want to use the \\\"leather\\\" litsphere.\"\n\ndef extraProperties():\n\n # Object properties, normally set by MPFB\n if not hasattr(bpy.types.Object, \"MhObjectType\"):\n bpy.types.Object.MhObjectType = StringProperty(name=\"Object type\", description=\"This is what type of MakeHuman object is (such as Clothes, Eyes...)\", default=\"\")\n if not hasattr(bpy.types.Object, \"MhHuman\"):\n bpy.types.Object.MhHuman = BoolProperty(name=\"Is MH Human\", description=\"Old makeclothes property for deciding object type\", default=False)\n\n bpy.types.Scene.MhMsCreateDiffuse = BoolProperty(name=\"Create diffuse placeholder\", description=\"Create a placeholder for a diffuse texture\", default=True)\n bpy.types.Scene.MhMsCreateNormal = BoolProperty(name=\"Create normal map placeholder\", description=\"Create a placeholder for a normal map\", default=False)\n bpy.types.Scene.MhMsCreateBump = BoolProperty(name=\"Create bump map placeholder\", description=\"Create a placeholder for a bump map\", default=False)\n\n bpy.types.Scene.MhMsOverwrite1 = BoolProperty(name=\"Overwrite existing (create)\", description=\"Overwrite existing material(s) on object\", default=False)\n bpy.types.Scene.MhMsOverwrite2 = BoolProperty(name=\"Overwrite existing (import)\", description=\"Overwrite existing material(s) on object\", default=False)\n\n # Metadata keys\n bpy.types.Object.MhMsName = StringProperty(name=\"Name\", description=\"The name of this material. This will have little practical effect apart from being written to the mhmat file.\", default=\"material\")\n bpy.types.Object.MhMsTag = StringProperty(name=\"Tag\", description=\"A category the material fits into, for example \\\"blond\\\" or \\\"female\\\". This will influence sorting and filtering in MH.\", default=\"\")\n bpy.types.Object.MhMsDescription = StringProperty(name=\"Description\", description=\"A description of the material. It will have little practical effect apart from being written to the mhmat file.\", default=\"\")\n bpy.types.Object.MhMsMatLicense = bpy.props.EnumProperty(items=_licenses, name=\"License\", description=_licenseDescription, default=\"CC0\")\n bpy.types.Object.MhMsAuthor = StringProperty(name=\"Author\", description=\"The author of this material. This will have little practical effect apart from being written to the mhmat file.\", default=\"\")\n bpy.types.Object.MhMsHomepage = StringProperty(name=\"Home page\", description=\"The home page of the material, if any. This will have little practical effect apart from being written to the mhmat file.\", default=\"\")\n\n # Boolean keys\n bpy.types.Object.MhMsBackfaceCull = BoolProperty(name=\"Backface culling\", description=\"If the back side of faces with the material should be invisible. This has no effect in exports, but may be important in MH\", default=True)\n bpy.types.Object.MhMsCastShadows = BoolProperty(name=\"Cast shadows\", description=\"If the material casts shadows. This has no effect in exports.\", default=True)\n bpy.types.Object.MhMsReceiveShadows = BoolProperty(name=\"Receive shadows\", description=\"If the material receives shadows. This has no effect in exports.\", default=True)\n bpy.types.Object.MhMsAlphaToCoverage = BoolProperty(name=\"AlphaToCoverage\", description=\"I have no idea what this does, but it might be important\", default=True)\n bpy.types.Object.MhMsShadeless = BoolProperty(name=\"Shadeless\", description=\"If the material is shadeless. It is unlikely you want this.\", default=False)\n bpy.types.Object.MhMsWireframe = BoolProperty(name=\"Wireframe\", description=\"If the material is to be rendered as a wireframe. It is unlikely you want this.\", default=False)\n bpy.types.Object.MhMsTransparent = BoolProperty(name=\"Transparent\", description=\"If the material is to be rendered as a transparent. It is unlikely you want this, as the normal approach is using the alpha channel in the diffuse texture.\", default=False)\n bpy.types.Object.MhMsDepthless = BoolProperty(name=\"Depthless\", description=\"If the material is to be rendered as having no depth. It is unlikely you want this.\", default=False)\n bpy.types.Object.MhMsSSSEnable = BoolProperty(name=\"SSS Enable\", description=\"If the material is to be rendered with sub surface scattering.\", default=False)\n bpy.types.Object.MhMsUseLit = BoolProperty(name=\"Use Litsphere\", description=\"Use the litsphere shader when rendering material in MakeHuman. This does not have any effect on materials outside MakeHuman\", default=True)\n bpy.types.Object.MhMsWriteBlendMaterial = BoolProperty(name=\"Write Blend material\", description=\"Stores the second material on the active object in a blend file\", default=False)\n\n # Options\n bpy.types.Object.MhMsLitsphere = bpy.props.EnumProperty(items=_litspheres, name=\"Litsphere\", description=_litsphereDescription, default=\"leather\")\n bpy.types.Object.MhMsTextures = bpy.props.EnumProperty(items=_textures, name=\"Textures\", description=_texturesDescription, default=\"NORMALIZE\")\n",
"import bpy\nimport json\nimport os\nfrom bpy.props import BoolProperty, StringProperty, EnumProperty, IntProperty, CollectionProperty, FloatProperty\n_licenses = []\n_licenses.append(('CC0', 'CC0', 'Creative Commons Zero', 1))\n_licenses.append(('CC-BY', 'CC-BY', 'Creative Commons Attribution', 2))\n_licenses.append(('AGPL', 'AGPL',\n \"Affero Gnu Public License (don't use unless absolutely necessary)\", 3))\n_licenseDescription = (\n 'Set an output license for the material. This will have no practical effect apart from being included in the written MHMAT file.'\n )\n_textures = []\n_textures.append(('NORMALIZE', 'Normalize',\n 'Copy to a name based on MHMAT filename', 1))\n_textures.append(('COPY', 'Copy', 'Copy without rename', 2))\n_textures.append(('LINK', 'Link',\n 'Link to original location, with absolute pathname', 3))\n_texturesDescription = (\n 'How do we handle texture file names and paths? Unless you know what you are doing, you will want to use normalize. This will copy all images to an appropriate location with an appropriate filename, valid for uploading to the asset repository.'\n )\n_litspheres = []\n_litspheres.append(('leather', 'leather',\n 'Leather litsphere. This is appropriate for all clothes, not only leather.'\n , 1))\n_litspheres.append(('standard_skin', 'standard skin',\n 'Standard skin litsphere. This is appropriate for all skins.', 2))\n_litspheres.append(('african', 'african skin', 'African skin litsphere', 3))\n_litspheres.append(('asian', 'asian skin', 'Asian skin litsphere', 4))\n_litspheres.append(('caucasian', 'caucasian skin',\n 'Caucasian skin litsphere', 5))\n_litspheres.append(('toon01', 'toon', 'Toon skin litsphere', 6))\n_litspheres.append(('eye', 'eye', 'Eye litsphere', 7))\n_litspheres.append(('hair', 'hair', 'Hair litsphere', 8))\n_litsphereDescription = (\n 'A litsphere texture is used for emulate lighting and reflections inside MakeHuman. It thus has no effect outside MakeHuman. For any clothing (not just leather), you will want to use the \"leather\" litsphere.'\n )\n\n\ndef extraProperties():\n if not hasattr(bpy.types.Object, 'MhObjectType'):\n bpy.types.Object.MhObjectType = StringProperty(name='Object type',\n description=\n 'This is what type of MakeHuman object is (such as Clothes, Eyes...)'\n , default='')\n if not hasattr(bpy.types.Object, 'MhHuman'):\n bpy.types.Object.MhHuman = BoolProperty(name='Is MH Human',\n description='Old makeclothes property for deciding object type',\n default=False)\n bpy.types.Scene.MhMsCreateDiffuse = BoolProperty(name=\n 'Create diffuse placeholder', description=\n 'Create a placeholder for a diffuse texture', default=True)\n bpy.types.Scene.MhMsCreateNormal = BoolProperty(name=\n 'Create normal map placeholder', description=\n 'Create a placeholder for a normal map', default=False)\n bpy.types.Scene.MhMsCreateBump = BoolProperty(name=\n 'Create bump map placeholder', description=\n 'Create a placeholder for a bump map', default=False)\n bpy.types.Scene.MhMsOverwrite1 = BoolProperty(name=\n 'Overwrite existing (create)', description=\n 'Overwrite existing material(s) on object', default=False)\n bpy.types.Scene.MhMsOverwrite2 = BoolProperty(name=\n 'Overwrite existing (import)', description=\n 'Overwrite existing material(s) on object', default=False)\n bpy.types.Object.MhMsName = StringProperty(name='Name', description=\n 'The name of this material. This will have little practical effect apart from being written to the mhmat file.'\n , default='material')\n bpy.types.Object.MhMsTag = StringProperty(name='Tag', description=\n 'A category the material fits into, for example \"blond\" or \"female\". This will influence sorting and filtering in MH.'\n , default='')\n bpy.types.Object.MhMsDescription = StringProperty(name='Description',\n description=\n 'A description of the material. It will have little practical effect apart from being written to the mhmat file.'\n , default='')\n bpy.types.Object.MhMsMatLicense = bpy.props.EnumProperty(items=\n _licenses, name='License', description=_licenseDescription, default\n ='CC0')\n bpy.types.Object.MhMsAuthor = StringProperty(name='Author', description\n =\n 'The author of this material. This will have little practical effect apart from being written to the mhmat file.'\n , default='')\n bpy.types.Object.MhMsHomepage = StringProperty(name='Home page',\n description=\n 'The home page of the material, if any. This will have little practical effect apart from being written to the mhmat file.'\n , default='')\n bpy.types.Object.MhMsBackfaceCull = BoolProperty(name=\n 'Backface culling', description=\n 'If the back side of faces with the material should be invisible. This has no effect in exports, but may be important in MH'\n , default=True)\n bpy.types.Object.MhMsCastShadows = BoolProperty(name='Cast shadows',\n description=\n 'If the material casts shadows. This has no effect in exports.',\n default=True)\n bpy.types.Object.MhMsReceiveShadows = BoolProperty(name=\n 'Receive shadows', description=\n 'If the material receives shadows. This has no effect in exports.',\n default=True)\n bpy.types.Object.MhMsAlphaToCoverage = BoolProperty(name=\n 'AlphaToCoverage', description=\n 'I have no idea what this does, but it might be important', default\n =True)\n bpy.types.Object.MhMsShadeless = BoolProperty(name='Shadeless',\n description=\n 'If the material is shadeless. It is unlikely you want this.',\n default=False)\n bpy.types.Object.MhMsWireframe = BoolProperty(name='Wireframe',\n description=\n 'If the material is to be rendered as a wireframe. It is unlikely you want this.'\n , default=False)\n bpy.types.Object.MhMsTransparent = BoolProperty(name='Transparent',\n description=\n 'If the material is to be rendered as a transparent. It is unlikely you want this, as the normal approach is using the alpha channel in the diffuse texture.'\n , default=False)\n bpy.types.Object.MhMsDepthless = BoolProperty(name='Depthless',\n description=\n 'If the material is to be rendered as having no depth. It is unlikely you want this.'\n , default=False)\n bpy.types.Object.MhMsSSSEnable = BoolProperty(name='SSS Enable',\n description=\n 'If the material is to be rendered with sub surface scattering.',\n default=False)\n bpy.types.Object.MhMsUseLit = BoolProperty(name='Use Litsphere',\n description=\n 'Use the litsphere shader when rendering material in MakeHuman. This does not have any effect on materials outside MakeHuman'\n , default=True)\n bpy.types.Object.MhMsWriteBlendMaterial = BoolProperty(name=\n 'Write Blend material', description=\n 'Stores the second material on the active object in a blend file',\n default=False)\n bpy.types.Object.MhMsLitsphere = bpy.props.EnumProperty(items=\n _litspheres, name='Litsphere', description=_litsphereDescription,\n default='leather')\n bpy.types.Object.MhMsTextures = bpy.props.EnumProperty(items=_textures,\n name='Textures', description=_texturesDescription, default='NORMALIZE')\n",
"<import token>\n_licenses = []\n_licenses.append(('CC0', 'CC0', 'Creative Commons Zero', 1))\n_licenses.append(('CC-BY', 'CC-BY', 'Creative Commons Attribution', 2))\n_licenses.append(('AGPL', 'AGPL',\n \"Affero Gnu Public License (don't use unless absolutely necessary)\", 3))\n_licenseDescription = (\n 'Set an output license for the material. This will have no practical effect apart from being included in the written MHMAT file.'\n )\n_textures = []\n_textures.append(('NORMALIZE', 'Normalize',\n 'Copy to a name based on MHMAT filename', 1))\n_textures.append(('COPY', 'Copy', 'Copy without rename', 2))\n_textures.append(('LINK', 'Link',\n 'Link to original location, with absolute pathname', 3))\n_texturesDescription = (\n 'How do we handle texture file names and paths? Unless you know what you are doing, you will want to use normalize. This will copy all images to an appropriate location with an appropriate filename, valid for uploading to the asset repository.'\n )\n_litspheres = []\n_litspheres.append(('leather', 'leather',\n 'Leather litsphere. This is appropriate for all clothes, not only leather.'\n , 1))\n_litspheres.append(('standard_skin', 'standard skin',\n 'Standard skin litsphere. This is appropriate for all skins.', 2))\n_litspheres.append(('african', 'african skin', 'African skin litsphere', 3))\n_litspheres.append(('asian', 'asian skin', 'Asian skin litsphere', 4))\n_litspheres.append(('caucasian', 'caucasian skin',\n 'Caucasian skin litsphere', 5))\n_litspheres.append(('toon01', 'toon', 'Toon skin litsphere', 6))\n_litspheres.append(('eye', 'eye', 'Eye litsphere', 7))\n_litspheres.append(('hair', 'hair', 'Hair litsphere', 8))\n_litsphereDescription = (\n 'A litsphere texture is used for emulate lighting and reflections inside MakeHuman. It thus has no effect outside MakeHuman. For any clothing (not just leather), you will want to use the \"leather\" litsphere.'\n )\n\n\ndef extraProperties():\n if not hasattr(bpy.types.Object, 'MhObjectType'):\n bpy.types.Object.MhObjectType = StringProperty(name='Object type',\n description=\n 'This is what type of MakeHuman object is (such as Clothes, Eyes...)'\n , default='')\n if not hasattr(bpy.types.Object, 'MhHuman'):\n bpy.types.Object.MhHuman = BoolProperty(name='Is MH Human',\n description='Old makeclothes property for deciding object type',\n default=False)\n bpy.types.Scene.MhMsCreateDiffuse = BoolProperty(name=\n 'Create diffuse placeholder', description=\n 'Create a placeholder for a diffuse texture', default=True)\n bpy.types.Scene.MhMsCreateNormal = BoolProperty(name=\n 'Create normal map placeholder', description=\n 'Create a placeholder for a normal map', default=False)\n bpy.types.Scene.MhMsCreateBump = BoolProperty(name=\n 'Create bump map placeholder', description=\n 'Create a placeholder for a bump map', default=False)\n bpy.types.Scene.MhMsOverwrite1 = BoolProperty(name=\n 'Overwrite existing (create)', description=\n 'Overwrite existing material(s) on object', default=False)\n bpy.types.Scene.MhMsOverwrite2 = BoolProperty(name=\n 'Overwrite existing (import)', description=\n 'Overwrite existing material(s) on object', default=False)\n bpy.types.Object.MhMsName = StringProperty(name='Name', description=\n 'The name of this material. This will have little practical effect apart from being written to the mhmat file.'\n , default='material')\n bpy.types.Object.MhMsTag = StringProperty(name='Tag', description=\n 'A category the material fits into, for example \"blond\" or \"female\". This will influence sorting and filtering in MH.'\n , default='')\n bpy.types.Object.MhMsDescription = StringProperty(name='Description',\n description=\n 'A description of the material. It will have little practical effect apart from being written to the mhmat file.'\n , default='')\n bpy.types.Object.MhMsMatLicense = bpy.props.EnumProperty(items=\n _licenses, name='License', description=_licenseDescription, default\n ='CC0')\n bpy.types.Object.MhMsAuthor = StringProperty(name='Author', description\n =\n 'The author of this material. This will have little practical effect apart from being written to the mhmat file.'\n , default='')\n bpy.types.Object.MhMsHomepage = StringProperty(name='Home page',\n description=\n 'The home page of the material, if any. This will have little practical effect apart from being written to the mhmat file.'\n , default='')\n bpy.types.Object.MhMsBackfaceCull = BoolProperty(name=\n 'Backface culling', description=\n 'If the back side of faces with the material should be invisible. This has no effect in exports, but may be important in MH'\n , default=True)\n bpy.types.Object.MhMsCastShadows = BoolProperty(name='Cast shadows',\n description=\n 'If the material casts shadows. This has no effect in exports.',\n default=True)\n bpy.types.Object.MhMsReceiveShadows = BoolProperty(name=\n 'Receive shadows', description=\n 'If the material receives shadows. This has no effect in exports.',\n default=True)\n bpy.types.Object.MhMsAlphaToCoverage = BoolProperty(name=\n 'AlphaToCoverage', description=\n 'I have no idea what this does, but it might be important', default\n =True)\n bpy.types.Object.MhMsShadeless = BoolProperty(name='Shadeless',\n description=\n 'If the material is shadeless. It is unlikely you want this.',\n default=False)\n bpy.types.Object.MhMsWireframe = BoolProperty(name='Wireframe',\n description=\n 'If the material is to be rendered as a wireframe. It is unlikely you want this.'\n , default=False)\n bpy.types.Object.MhMsTransparent = BoolProperty(name='Transparent',\n description=\n 'If the material is to be rendered as a transparent. It is unlikely you want this, as the normal approach is using the alpha channel in the diffuse texture.'\n , default=False)\n bpy.types.Object.MhMsDepthless = BoolProperty(name='Depthless',\n description=\n 'If the material is to be rendered as having no depth. It is unlikely you want this.'\n , default=False)\n bpy.types.Object.MhMsSSSEnable = BoolProperty(name='SSS Enable',\n description=\n 'If the material is to be rendered with sub surface scattering.',\n default=False)\n bpy.types.Object.MhMsUseLit = BoolProperty(name='Use Litsphere',\n description=\n 'Use the litsphere shader when rendering material in MakeHuman. This does not have any effect on materials outside MakeHuman'\n , default=True)\n bpy.types.Object.MhMsWriteBlendMaterial = BoolProperty(name=\n 'Write Blend material', description=\n 'Stores the second material on the active object in a blend file',\n default=False)\n bpy.types.Object.MhMsLitsphere = bpy.props.EnumProperty(items=\n _litspheres, name='Litsphere', description=_litsphereDescription,\n default='leather')\n bpy.types.Object.MhMsTextures = bpy.props.EnumProperty(items=_textures,\n name='Textures', description=_texturesDescription, default='NORMALIZE')\n",
"<import token>\n<assignment token>\n_licenses.append(('CC0', 'CC0', 'Creative Commons Zero', 1))\n_licenses.append(('CC-BY', 'CC-BY', 'Creative Commons Attribution', 2))\n_licenses.append(('AGPL', 'AGPL',\n \"Affero Gnu Public License (don't use unless absolutely necessary)\", 3))\n<assignment token>\n_textures.append(('NORMALIZE', 'Normalize',\n 'Copy to a name based on MHMAT filename', 1))\n_textures.append(('COPY', 'Copy', 'Copy without rename', 2))\n_textures.append(('LINK', 'Link',\n 'Link to original location, with absolute pathname', 3))\n<assignment token>\n_litspheres.append(('leather', 'leather',\n 'Leather litsphere. This is appropriate for all clothes, not only leather.'\n , 1))\n_litspheres.append(('standard_skin', 'standard skin',\n 'Standard skin litsphere. This is appropriate for all skins.', 2))\n_litspheres.append(('african', 'african skin', 'African skin litsphere', 3))\n_litspheres.append(('asian', 'asian skin', 'Asian skin litsphere', 4))\n_litspheres.append(('caucasian', 'caucasian skin',\n 'Caucasian skin litsphere', 5))\n_litspheres.append(('toon01', 'toon', 'Toon skin litsphere', 6))\n_litspheres.append(('eye', 'eye', 'Eye litsphere', 7))\n_litspheres.append(('hair', 'hair', 'Hair litsphere', 8))\n<assignment token>\n\n\ndef extraProperties():\n if not hasattr(bpy.types.Object, 'MhObjectType'):\n bpy.types.Object.MhObjectType = StringProperty(name='Object type',\n description=\n 'This is what type of MakeHuman object is (such as Clothes, Eyes...)'\n , default='')\n if not hasattr(bpy.types.Object, 'MhHuman'):\n bpy.types.Object.MhHuman = BoolProperty(name='Is MH Human',\n description='Old makeclothes property for deciding object type',\n default=False)\n bpy.types.Scene.MhMsCreateDiffuse = BoolProperty(name=\n 'Create diffuse placeholder', description=\n 'Create a placeholder for a diffuse texture', default=True)\n bpy.types.Scene.MhMsCreateNormal = BoolProperty(name=\n 'Create normal map placeholder', description=\n 'Create a placeholder for a normal map', default=False)\n bpy.types.Scene.MhMsCreateBump = BoolProperty(name=\n 'Create bump map placeholder', description=\n 'Create a placeholder for a bump map', default=False)\n bpy.types.Scene.MhMsOverwrite1 = BoolProperty(name=\n 'Overwrite existing (create)', description=\n 'Overwrite existing material(s) on object', default=False)\n bpy.types.Scene.MhMsOverwrite2 = BoolProperty(name=\n 'Overwrite existing (import)', description=\n 'Overwrite existing material(s) on object', default=False)\n bpy.types.Object.MhMsName = StringProperty(name='Name', description=\n 'The name of this material. This will have little practical effect apart from being written to the mhmat file.'\n , default='material')\n bpy.types.Object.MhMsTag = StringProperty(name='Tag', description=\n 'A category the material fits into, for example \"blond\" or \"female\". This will influence sorting and filtering in MH.'\n , default='')\n bpy.types.Object.MhMsDescription = StringProperty(name='Description',\n description=\n 'A description of the material. It will have little practical effect apart from being written to the mhmat file.'\n , default='')\n bpy.types.Object.MhMsMatLicense = bpy.props.EnumProperty(items=\n _licenses, name='License', description=_licenseDescription, default\n ='CC0')\n bpy.types.Object.MhMsAuthor = StringProperty(name='Author', description\n =\n 'The author of this material. This will have little practical effect apart from being written to the mhmat file.'\n , default='')\n bpy.types.Object.MhMsHomepage = StringProperty(name='Home page',\n description=\n 'The home page of the material, if any. This will have little practical effect apart from being written to the mhmat file.'\n , default='')\n bpy.types.Object.MhMsBackfaceCull = BoolProperty(name=\n 'Backface culling', description=\n 'If the back side of faces with the material should be invisible. This has no effect in exports, but may be important in MH'\n , default=True)\n bpy.types.Object.MhMsCastShadows = BoolProperty(name='Cast shadows',\n description=\n 'If the material casts shadows. This has no effect in exports.',\n default=True)\n bpy.types.Object.MhMsReceiveShadows = BoolProperty(name=\n 'Receive shadows', description=\n 'If the material receives shadows. This has no effect in exports.',\n default=True)\n bpy.types.Object.MhMsAlphaToCoverage = BoolProperty(name=\n 'AlphaToCoverage', description=\n 'I have no idea what this does, but it might be important', default\n =True)\n bpy.types.Object.MhMsShadeless = BoolProperty(name='Shadeless',\n description=\n 'If the material is shadeless. It is unlikely you want this.',\n default=False)\n bpy.types.Object.MhMsWireframe = BoolProperty(name='Wireframe',\n description=\n 'If the material is to be rendered as a wireframe. It is unlikely you want this.'\n , default=False)\n bpy.types.Object.MhMsTransparent = BoolProperty(name='Transparent',\n description=\n 'If the material is to be rendered as a transparent. It is unlikely you want this, as the normal approach is using the alpha channel in the diffuse texture.'\n , default=False)\n bpy.types.Object.MhMsDepthless = BoolProperty(name='Depthless',\n description=\n 'If the material is to be rendered as having no depth. It is unlikely you want this.'\n , default=False)\n bpy.types.Object.MhMsSSSEnable = BoolProperty(name='SSS Enable',\n description=\n 'If the material is to be rendered with sub surface scattering.',\n default=False)\n bpy.types.Object.MhMsUseLit = BoolProperty(name='Use Litsphere',\n description=\n 'Use the litsphere shader when rendering material in MakeHuman. This does not have any effect on materials outside MakeHuman'\n , default=True)\n bpy.types.Object.MhMsWriteBlendMaterial = BoolProperty(name=\n 'Write Blend material', description=\n 'Stores the second material on the active object in a blend file',\n default=False)\n bpy.types.Object.MhMsLitsphere = bpy.props.EnumProperty(items=\n _litspheres, name='Litsphere', description=_litsphereDescription,\n default='leather')\n bpy.types.Object.MhMsTextures = bpy.props.EnumProperty(items=_textures,\n name='Textures', description=_texturesDescription, default='NORMALIZE')\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef extraProperties():\n if not hasattr(bpy.types.Object, 'MhObjectType'):\n bpy.types.Object.MhObjectType = StringProperty(name='Object type',\n description=\n 'This is what type of MakeHuman object is (such as Clothes, Eyes...)'\n , default='')\n if not hasattr(bpy.types.Object, 'MhHuman'):\n bpy.types.Object.MhHuman = BoolProperty(name='Is MH Human',\n description='Old makeclothes property for deciding object type',\n default=False)\n bpy.types.Scene.MhMsCreateDiffuse = BoolProperty(name=\n 'Create diffuse placeholder', description=\n 'Create a placeholder for a diffuse texture', default=True)\n bpy.types.Scene.MhMsCreateNormal = BoolProperty(name=\n 'Create normal map placeholder', description=\n 'Create a placeholder for a normal map', default=False)\n bpy.types.Scene.MhMsCreateBump = BoolProperty(name=\n 'Create bump map placeholder', description=\n 'Create a placeholder for a bump map', default=False)\n bpy.types.Scene.MhMsOverwrite1 = BoolProperty(name=\n 'Overwrite existing (create)', description=\n 'Overwrite existing material(s) on object', default=False)\n bpy.types.Scene.MhMsOverwrite2 = BoolProperty(name=\n 'Overwrite existing (import)', description=\n 'Overwrite existing material(s) on object', default=False)\n bpy.types.Object.MhMsName = StringProperty(name='Name', description=\n 'The name of this material. This will have little practical effect apart from being written to the mhmat file.'\n , default='material')\n bpy.types.Object.MhMsTag = StringProperty(name='Tag', description=\n 'A category the material fits into, for example \"blond\" or \"female\". This will influence sorting and filtering in MH.'\n , default='')\n bpy.types.Object.MhMsDescription = StringProperty(name='Description',\n description=\n 'A description of the material. It will have little practical effect apart from being written to the mhmat file.'\n , default='')\n bpy.types.Object.MhMsMatLicense = bpy.props.EnumProperty(items=\n _licenses, name='License', description=_licenseDescription, default\n ='CC0')\n bpy.types.Object.MhMsAuthor = StringProperty(name='Author', description\n =\n 'The author of this material. This will have little practical effect apart from being written to the mhmat file.'\n , default='')\n bpy.types.Object.MhMsHomepage = StringProperty(name='Home page',\n description=\n 'The home page of the material, if any. This will have little practical effect apart from being written to the mhmat file.'\n , default='')\n bpy.types.Object.MhMsBackfaceCull = BoolProperty(name=\n 'Backface culling', description=\n 'If the back side of faces with the material should be invisible. This has no effect in exports, but may be important in MH'\n , default=True)\n bpy.types.Object.MhMsCastShadows = BoolProperty(name='Cast shadows',\n description=\n 'If the material casts shadows. This has no effect in exports.',\n default=True)\n bpy.types.Object.MhMsReceiveShadows = BoolProperty(name=\n 'Receive shadows', description=\n 'If the material receives shadows. This has no effect in exports.',\n default=True)\n bpy.types.Object.MhMsAlphaToCoverage = BoolProperty(name=\n 'AlphaToCoverage', description=\n 'I have no idea what this does, but it might be important', default\n =True)\n bpy.types.Object.MhMsShadeless = BoolProperty(name='Shadeless',\n description=\n 'If the material is shadeless. It is unlikely you want this.',\n default=False)\n bpy.types.Object.MhMsWireframe = BoolProperty(name='Wireframe',\n description=\n 'If the material is to be rendered as a wireframe. It is unlikely you want this.'\n , default=False)\n bpy.types.Object.MhMsTransparent = BoolProperty(name='Transparent',\n description=\n 'If the material is to be rendered as a transparent. It is unlikely you want this, as the normal approach is using the alpha channel in the diffuse texture.'\n , default=False)\n bpy.types.Object.MhMsDepthless = BoolProperty(name='Depthless',\n description=\n 'If the material is to be rendered as having no depth. It is unlikely you want this.'\n , default=False)\n bpy.types.Object.MhMsSSSEnable = BoolProperty(name='SSS Enable',\n description=\n 'If the material is to be rendered with sub surface scattering.',\n default=False)\n bpy.types.Object.MhMsUseLit = BoolProperty(name='Use Litsphere',\n description=\n 'Use the litsphere shader when rendering material in MakeHuman. This does not have any effect on materials outside MakeHuman'\n , default=True)\n bpy.types.Object.MhMsWriteBlendMaterial = BoolProperty(name=\n 'Write Blend material', description=\n 'Stores the second material on the active object in a blend file',\n default=False)\n bpy.types.Object.MhMsLitsphere = bpy.props.EnumProperty(items=\n _litspheres, name='Litsphere', description=_litsphereDescription,\n default='leather')\n bpy.types.Object.MhMsTextures = bpy.props.EnumProperty(items=_textures,\n name='Textures', description=_texturesDescription, default='NORMALIZE')\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n"
] | false |
98,853 |
7854931af9b60f22faf23ecd630bb083c8a20c43
|
from django.urls import path
from .views import DecrementProductInCart, PlaceOrder, SignUpUser,DeleteUser,UpdateInfoUser,GetCartInfo,AddIntoCart,DeleteFromCart,PlaceOrder,CancelOrder,GetOrderInfo,LoginUser,LogoutUSer,IncrementProductInCart,DecrementProductInCart
urlpatterns = [
path('api/create-user/', SignUpUser.as_view()),
path('api/delete-user/', DeleteUser.as_view()),
path('api/update-user/', UpdateInfoUser.as_view()),
path('api/get-cart/', GetCartInfo.as_view()),
path('api/add-cart/', AddIntoCart.as_view()),
path('api/delete-cart/', DeleteFromCart.as_view()),
path('api/place-order/', PlaceOrder.as_view()),
path('api/cancel-order/', CancelOrder.as_view()),
path('api/get-order/', GetOrderInfo.as_view()),
path('api/login-user/', LoginUser.as_view()),
path('api/logout-user/', LogoutUSer.as_view()),
path('api/increment-cart-product/', IncrementProductInCart.as_view()),
path('api/decrement-cart-product/', DecrementProductInCart.as_view()),
]
|
[
"from django.urls import path\nfrom .views import DecrementProductInCart, PlaceOrder, SignUpUser,DeleteUser,UpdateInfoUser,GetCartInfo,AddIntoCart,DeleteFromCart,PlaceOrder,CancelOrder,GetOrderInfo,LoginUser,LogoutUSer,IncrementProductInCart,DecrementProductInCart\n\n\nurlpatterns = [\n path('api/create-user/', SignUpUser.as_view()),\n path('api/delete-user/', DeleteUser.as_view()),\n path('api/update-user/', UpdateInfoUser.as_view()),\n path('api/get-cart/', GetCartInfo.as_view()),\n path('api/add-cart/', AddIntoCart.as_view()),\n path('api/delete-cart/', DeleteFromCart.as_view()),\n path('api/place-order/', PlaceOrder.as_view()),\n path('api/cancel-order/', CancelOrder.as_view()),\n path('api/get-order/', GetOrderInfo.as_view()),\n path('api/login-user/', LoginUser.as_view()),\n path('api/logout-user/', LogoutUSer.as_view()),\n path('api/increment-cart-product/', IncrementProductInCart.as_view()),\n path('api/decrement-cart-product/', DecrementProductInCart.as_view()),\n]\n",
"from django.urls import path\nfrom .views import DecrementProductInCart, PlaceOrder, SignUpUser, DeleteUser, UpdateInfoUser, GetCartInfo, AddIntoCart, DeleteFromCart, PlaceOrder, CancelOrder, GetOrderInfo, LoginUser, LogoutUSer, IncrementProductInCart, DecrementProductInCart\nurlpatterns = [path('api/create-user/', SignUpUser.as_view()), path(\n 'api/delete-user/', DeleteUser.as_view()), path('api/update-user/',\n UpdateInfoUser.as_view()), path('api/get-cart/', GetCartInfo.as_view()),\n path('api/add-cart/', AddIntoCart.as_view()), path('api/delete-cart/',\n DeleteFromCart.as_view()), path('api/place-order/', PlaceOrder.as_view(\n )), path('api/cancel-order/', CancelOrder.as_view()), path(\n 'api/get-order/', GetOrderInfo.as_view()), path('api/login-user/',\n LoginUser.as_view()), path('api/logout-user/', LogoutUSer.as_view()),\n path('api/increment-cart-product/', IncrementProductInCart.as_view()),\n path('api/decrement-cart-product/', DecrementProductInCart.as_view())]\n",
"<import token>\nurlpatterns = [path('api/create-user/', SignUpUser.as_view()), path(\n 'api/delete-user/', DeleteUser.as_view()), path('api/update-user/',\n UpdateInfoUser.as_view()), path('api/get-cart/', GetCartInfo.as_view()),\n path('api/add-cart/', AddIntoCart.as_view()), path('api/delete-cart/',\n DeleteFromCart.as_view()), path('api/place-order/', PlaceOrder.as_view(\n )), path('api/cancel-order/', CancelOrder.as_view()), path(\n 'api/get-order/', GetOrderInfo.as_view()), path('api/login-user/',\n LoginUser.as_view()), path('api/logout-user/', LogoutUSer.as_view()),\n path('api/increment-cart-product/', IncrementProductInCart.as_view()),\n path('api/decrement-cart-product/', DecrementProductInCart.as_view())]\n",
"<import token>\n<assignment token>\n"
] | false |
98,854 |
defc292986aed1d932d7143b7a421464875ebfcd
|
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D,TimeDistributed, LSTM, Conv3D
from keras.applications import VGG16
from keras import backend as K
from keras import regularizers
from sklearn.preprocessing import OneHotEncoder
import os
import numpy as np
import nibabel as nib # to read nii files
import shutil # for file operations
import glob # use to make file list from diectory
import matplotlib.pyplot as plt
import cv2 # opencv library
import matplotlib.image as mpimg
import random
from sklearn.model_selection import train_test_split
import traceback
import sys
import keras
from gen import DataGenerator
classes = ['Alzheimer',"MCI", 'Normal']
# Declaring the home variables that will be used throughout the script.
home_files_dir = '/home/ubuntu/Select_original_fmri/'
#output_dir='/home/ubuntu/final_src/DeepNeuralnets--Alzheimer/videoclassification/'
train_list=[]
test_list=[]
file_list=[]
for class_ in classes:
print ('working on ' + class_ + '...')
for root, dir ,files in os.walk(os.path.join('output_array', class_)):
length=len(files)
print("root: ",root)
if class_ == 'Alzheimer':
for file_ in files:
npy = np.load(root+'/'+file_)
if npy.shape == (64, 64, 6720):
file_list.append((npy,0))
#image data division
test_list=file_list[:int(length*0.2)]
train_list=file_list[len(test_list):]
elif class_ == 'MCI':
len2=len(file_list)
for file_ in files[:25]:
npy = np.load(root+'/'+file_)
if npy.shape == (64, 64, 6720):
file_list.append((npy,1))
#image data diision
test_list +=file_list[len2:int(len2+length*0.2)]
train_list += file_list[int(len2+length*0.2):]
# for Normal Class
else:
len3=len(file_list)
for file_ in files:
npy = np.load(root+'/'+file_)
if npy.shape == (64, 64, 6720):
file_list.append((npy,2))
#image data diision
test_list += file_list[len3:int(len3+length*0.2)]
train_list += file_list[int(len3+length*0.2):]
#print ("length of train list: ",len(train_list))
#print("length of train labels:",len(train_labels))
#print("length of test list:",len(test_list))
#print("length of test labels:",len(test_labels))
np.random.shuffle(train_list)
np.random.shuffle(test_list)
X_train,Y_train=zip(*train_list)
X_test,Y_test=zip(*test_list)
#X_train=np.array(X_train,dtype=np.uint8)
#Y_train=np.array(Y_train,dtype=np.uint8)
#X_test=np.array(X_test,dtype=np.uint8)
#print X_test.shape
#Y_test=np.array(Y_test,dtype=np.uint8)
X_test = np.transpose(X_test, [0, 3, 2, 1])
#print X_train.shape
#X_train = np.transpose(X_train, [0, 3, 2, 1])
#for i in X_train:
# print("X train shape: ",i.shape)
#print("Y label shape: ",Y_train.shape)
#print("X test shape: ",X_test.shape)
#print("Y test label: ",Y_test.shape)
#print('done...')
# Parameters
params = {'dim_x': 6720,
'dim_y': 64,
'dim_z': 64,
'batch_size': 1,
'shuffle': True}
training_generator = DataGenerator(**params).generate(Y_train, X_train)
validation_generator = DataGenerator(**params).generate(Y_test, X_test)
batch_size = 1
input_shape = [6720, 64,64]
#Y_train= np_utils.to_categorical(Y_train, num_classes=3)
#Y_test= np_utils.to_categorical(Y_test, num_classes=3)
#np.random.shuffle(data)
#print(X_train.shape)
#print(Y_train.shape)
#print(X_test.shape)
#print(Y_test.shape)
#del(train_list)
#del(test_list)
model = Sequential()
model.add(Conv2D(
32, (3,3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3,3), activation='relu'))
model.add(Conv2D(128, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (2,2), activation='relu'))
model.add(Conv2D(256, (2,2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(32))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
model.summary()
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
print Y_train
print Y_test
# model.fit(X_train, Y_train,batch_size=batch_size, epochs=100,verbose=1,validation_data=(X_test, Y_test))
# Train model on dataset
model.fit_generator(generator = training_generator,
steps_per_epoch = len(X_train)//batch_size,
epochs = 2,
validation_data = validation_generator,
validation_steps = len(X_test)//batch_size)
Y_test= np_utils.to_categorical(Y_test, num_classes=3)
print X_test[0].shape
for i in range(len(X_train)-2):
print i,i+1
print model.predict_classes(nX_train[i:i+1])
#score = model.evaluate(X_test, Y_test, verbose=0)
json_string = model.to_json()
with open("arch.json","w") as f:
f.write(json_string)
model.save_weights("weights.h5")
#print('Test loss:', score[0])
#print('Test accuracy:', score[1])
|
[
"from keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D,TimeDistributed, LSTM, Conv3D\nfrom keras.applications import VGG16\nfrom keras import backend as K\nfrom keras import regularizers\nfrom sklearn.preprocessing import OneHotEncoder\nimport os\nimport numpy as np\nimport nibabel as nib # to read nii files\nimport shutil # for file operations\nimport glob # use to make file list from diectory\nimport matplotlib.pyplot as plt\nimport cv2 # opencv library\nimport matplotlib.image as mpimg\nimport random\nfrom sklearn.model_selection import train_test_split \nimport traceback\nimport sys\nimport keras\nfrom gen import DataGenerator\n\nclasses = ['Alzheimer',\"MCI\", 'Normal']\n# Declaring the home variables that will be used throughout the script.\n\nhome_files_dir = '/home/ubuntu/Select_original_fmri/'\n#output_dir='/home/ubuntu/final_src/DeepNeuralnets--Alzheimer/videoclassification/'\n\n\ntrain_list=[]\ntest_list=[]\nfile_list=[]\n\nfor class_ in classes:\n print ('working on ' + class_ + '...')\n \n for root, dir ,files in os.walk(os.path.join('output_array', class_)): \n length=len(files)\n print(\"root: \",root)\n if class_ == 'Alzheimer': \n for file_ in files: \n npy = np.load(root+'/'+file_)\n if npy.shape == (64, 64, 6720): \n file_list.append((npy,0))\n #image data division\n test_list=file_list[:int(length*0.2)]\n train_list=file_list[len(test_list):]\n \n \n elif class_ == 'MCI':\n len2=len(file_list)\n for file_ in files[:25]: \n npy = np.load(root+'/'+file_)\n if npy.shape == (64, 64, 6720): \n file_list.append((npy,1))\n #image data diision\n test_list +=file_list[len2:int(len2+length*0.2)]\n train_list += file_list[int(len2+length*0.2):]\n \n \n # for Normal Class\n \n else:\n len3=len(file_list)\n for file_ in files: \n npy = np.load(root+'/'+file_)\n if npy.shape == (64, 64, 6720): \n file_list.append((npy,2))\n #image data diision\n test_list += file_list[len3:int(len3+length*0.2)]\n train_list += file_list[int(len3+length*0.2):]\n \n#print (\"length of train list: \",len(train_list))\n#print(\"length of train labels:\",len(train_labels))\n#print(\"length of test list:\",len(test_list))\n#print(\"length of test labels:\",len(test_labels))\nnp.random.shuffle(train_list)\nnp.random.shuffle(test_list)\nX_train,Y_train=zip(*train_list)\nX_test,Y_test=zip(*test_list)\n\n#X_train=np.array(X_train,dtype=np.uint8)\n#Y_train=np.array(Y_train,dtype=np.uint8)\n#X_test=np.array(X_test,dtype=np.uint8)\n#print X_test.shape\n#Y_test=np.array(Y_test,dtype=np.uint8)\nX_test = np.transpose(X_test, [0, 3, 2, 1])\n#print X_train.shape\n#X_train = np.transpose(X_train, [0, 3, 2, 1])\n\n#for i in X_train:\n \n# print(\"X train shape: \",i.shape)\n#print(\"Y label shape: \",Y_train.shape)\n#print(\"X test shape: \",X_test.shape)\n#print(\"Y test label: \",Y_test.shape)\n#print('done...')\n \n# Parameters\nparams = {'dim_x': 6720,\n 'dim_y': 64,\n 'dim_z': 64,\n 'batch_size': 1,\n 'shuffle': True}\n\ntraining_generator = DataGenerator(**params).generate(Y_train, X_train)\nvalidation_generator = DataGenerator(**params).generate(Y_test, X_test)\n \n\nbatch_size = 1\ninput_shape = [6720, 64,64]\n#Y_train= np_utils.to_categorical(Y_train, num_classes=3)\n#Y_test= np_utils.to_categorical(Y_test, num_classes=3)\n#np.random.shuffle(data)\n#print(X_train.shape)\n#print(Y_train.shape)\n#print(X_test.shape)\n#print(Y_test.shape)\n#del(train_list)\n#del(test_list)\nmodel = Sequential()\nmodel.add(Conv2D(\n 32, (3,3), activation='relu', input_shape=input_shape))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(64, (3,3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(128, (3,3), activation='relu'))\nmodel.add(Conv2D(128, (3,3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Conv2D(256, (2,2), activation='relu'))\nmodel.add(Conv2D(256, (2,2), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(32))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(3, activation='softmax'))\nmodel.summary()\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\nprint Y_train\nprint Y_test\n# model.fit(X_train, Y_train,batch_size=batch_size, epochs=100,verbose=1,validation_data=(X_test, Y_test))\n# Train model on dataset\nmodel.fit_generator(generator = training_generator,\n steps_per_epoch = len(X_train)//batch_size,\n epochs = 2,\n validation_data = validation_generator,\n validation_steps = len(X_test)//batch_size)\nY_test= np_utils.to_categorical(Y_test, num_classes=3)\nprint X_test[0].shape\nfor i in range(len(X_train)-2):\n print i,i+1\n print model.predict_classes(nX_train[i:i+1])\n#score = model.evaluate(X_test, Y_test, verbose=0)\njson_string = model.to_json()\nwith open(\"arch.json\",\"w\") as f:\n f.write(json_string)\nmodel.save_weights(\"weights.h5\")\n#print('Test loss:', score[0])\n#print('Test accuracy:', score[1])"
] | true |
98,855 |
12311601c53eb67b532cd375cf5eac8ca83e0da8
|
def gcd(a, b):
while b:
a, b = b, a % b
return a
def lcm(a, b):
return a * b // gcd(a, b)
while True:
try:
a, b = list(map(int, input().split()))
print(lcm(a, b))
except:
break
|
[
"def gcd(a, b):\n while b:\n a, b = b, a % b\n return a\n\n\ndef lcm(a, b):\n return a * b // gcd(a, b)\n\n\nwhile True:\n try:\n a, b = list(map(int, input().split()))\n print(lcm(a, b))\n except:\n break\n",
"def gcd(a, b):\n while b:\n a, b = b, a % b\n return a\n\n\ndef lcm(a, b):\n return a * b // gcd(a, b)\n\n\n<code token>\n",
"<function token>\n\n\ndef lcm(a, b):\n return a * b // gcd(a, b)\n\n\n<code token>\n",
"<function token>\n<function token>\n<code token>\n"
] | false |
98,856 |
f1983f3791c6038f5a697cf2b9856a62d017b016
|
#!/usr/bin/env python
import sys, os ; sys.path.append(os.getcwd())
import unicodedata
import collections
import nacltaia
import base91a
import codecs
import select
import socket
import time
import pwd
import re
taias = dict()
RE = 'a-zA-Z0-9^(\)\-_{\}[\]|'
re_SPLIT_SPACE = re.compile(' +',re.IGNORECASE).split
re_SPLIT_SPACE_COLON = re.compile(' +:?',re.IGNORECASE).split
re_SPLIT_BRACKETS = re.compile('\[|]',re.IGNORECASE).split
re_CRYPTOSERV = re.compile('^:['+RE+']+!nacltaia-otr@service',re.IGNORECASE).search
re_NICK_PRIVMSG_NOTICE_TOPIC = re.compile('^:['+RE+']+![~'+RE+'.]+@['+RE+'.]+ +((PRIVMSG)|(NOTICE)|(TOPIC)) +['+RE+']+ +:?.*$',re.IGNORECASE).search
re_CHANNEL_PRIVMSG_NOTICE_TOPIC = re.compile('^:['+RE+']+![~'+RE+'.]+@['+RE+'.]+ +((PRIVMSG)|(NOTICE)|(TOPIC)) +[#&!+]['+RE+']+ +:?.*$',re.IGNORECASE).search
re_322_332 = re.compile('^:['+RE+'.]+ +((322)|(332)) +['+RE+']+ +[#&!+]['+RE+']+ ?([0-9]+)? +:?.*$',re.IGNORECASE).search
re_BUFFER_CTCP_DCC = re.compile('\x01(?!ACTION )',re.IGNORECASE).sub
re_BUFFER_COLOUR = re.compile('(\x03[0-9][0-9]?((?<=[0-9]),[0-9]?[0-9]?)?)|[\x02\x03\x0f\x1d\x1f]',re.IGNORECASE).sub
def oksrctaia(n,taia,taia_now):
if nacltaia.taia_okseconds(n,taia)<1: return 0
if nacltaia.taia_new(taia,taias[src])<1:
return 1 if taia_now == taias[src] else 0
return 1
def cached(h):
if h in hashcache: return 1
hashcache.append(h)
return 0
def ret_322_332_msg(cmd,buffer):
try:
return re_SPLIT_BRACKETS(re_SPLIT_SPACE_COLON(buffer,5)[5],2)[2][1:] if cmd == '322' else re_SPLIT_SPACE_COLON(buffer,4)[4]
except:
return re_SPLIT_SPACE_COLON(buffer,5)[5] if cmd == '322' else re_SPLIT_SPACE_COLON(buffer,4)[4]
uid, gid = pwd.getpwnam('nacltaia-otr')[2:4]
os.chdir('crypto/')
os.chroot(os.getcwd())
os.setgid(gid)
os.setuid(uid)
del uid, gid
ipc=socket.socket(socket.AF_UNIX,socket.SOCK_STREAM) # contains potential race condition
for n in range(0,9):
if n == 8: sys.exit(128+111)
try:
ipc.connect('socket')
del n
break
except: time.sleep(0.1)
ipc_poll=select.poll()
ipc_poll.register(ipc.fileno(),select.POLLIN|select.POLLPRI)
ipc_poll=ipc_poll.poll
poll=select.poll()
poll.register(ipc.fileno(),select.POLLIN|select.POLLPRI)
poll.register(0,select.POLLIN|select.POLLPRI)
poll=poll.poll
DEBUG = int(open('DEBUG','rb').read().split('\n')[0]) if os.path.exists('DEBUG') else 0
COLOUR = int(open('COLOUR','rb').read().split('\n')[0]) if os.path.exists('COLOUR') else 0
UNICODE = int(open('UNICODE','rb').read().split('\n')[0]) if os.path.exists('UNICODE') else 0
HASH_LOG = int(open('HASH_LOG','rb').read().split('\n')[0]) if os.path.exists('HASH_LOG') else 256
OK_SECONDS = int(open('OK_SECONDS','rb').read().split('\n')[0]) if os.path.exists('OK_SECONDS') else 128
NAMELESS = '\|' if os.path.exists('NAMELESS') and int(open('NAMELESS','rb').read().split('\n')[0]) else str()
re_SPLIT_NAMELESS = re.compile(NAMELESS,re.IGNORECASE).split
hashcache = collections.deque([],HASH_LOG)
while 1:
if len(poll(-1)) < 2 and ipc_poll(0):
h = ipc.recv(32)
if len(h) < 32: sys.exit(128+32)
cached(h)
continue
buffer = str()
while 1:
byte = os.read(0,1)
if byte == '': sys.exit(0)
if byte == '\n': break
if byte != '\r' and len(buffer)<1024: buffer += byte
while ipc_poll(0):
h = ipc.recv(32)
if len(h) < 32: sys.exit(128+32)
cached(h)
if re_CRYPTOSERV(buffer):
if DEBUG: os.write(2,'nacltaia-otr: error: re_CRYPTOSERV(buffer)\n')
continue
taia_now = nacltaia.taia_now_pack()
if re_NICK_PRIVMSG_NOTICE_TOPIC(buffer):
src = re_SPLIT_NAMELESS( buffer[1:].split('!',1)[0].lower() )[0]
if src in os.listdir('dstkey/'):
c = base91a.decode(re_SPLIT_SPACE_COLON(buffer,3)[3])
if not c:
if DEBUG: os.write(2,'nacltaia-otr: error: base91a.decode(re_SPLIT_SPACE_COLON(buffer,3)[3])\n')
continue
n = c[:24]
c = c[24:]
pk = base91a.hex2bin(open('dstkey/'+src,'rb').read(64))
sk = base91a.hex2bin(open('seckey','rb').read(64))
c = nacltaia.crypto_box_open(c,n,pk,sk)
if c == 0:
if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.crypto_box_open(c,n,pk,sk)\n')
continue
m = 0
taia = n[:16]
if len(c) >= 32:
pk = c[:32]
sk = open('tmpkey/'+src+'/sk','rb').read(32)
m = nacltaia.crypto_box_open(c[32:],n,pk,sk)
if open('tmpkey/'+src+'/tk','rb').read(32) != pk: open('tmpkey/'+src+'/tk','wb').write(pk)
else:
if DEBUG: os.write(2,'nacltaia-otr: error: len(c) < 32\n')
continue
if not src in taias.keys(): taias[src] = taia_now
if not oksrctaia(OK_SECONDS,taia,taia_now):
if DEBUG: os.write(2,'nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\n')
continue
taias[src] = taia
if m == 0:
os.write(1,':' + buffer[1:].split('!',1)[0] + '!nacltaia-otr@service NOTICE ' + re_SPLIT_SPACE(buffer,3)[2] + ' :unable to decrypt message\a\n')
continue
else: buffer = ' '.join(re_SPLIT_SPACE(buffer,3)[:3]) + ' :' + m.split('\n',1)[0]
elif re_CHANNEL_PRIVMSG_NOTICE_TOPIC(buffer):
src = re_SPLIT_NAMELESS( buffer[1:].split('!',1)[0].lower() )[0]
dst = re_SPLIT_SPACE(buffer,3)[2].lower()[1:]
m = re_SPLIT_SPACE_COLON(buffer,3)[3]
h = nacltaia.crypto_hash_sha256(m)
if dst in os.listdir('chnkey/'):
c = base91a.decode(m)
if not c:
if DEBUG: os.write(2,'nacltaia-otr: error: base91a.decode(m)\n')
continue
n = c[:24]
c = c[24:]
k = base91a.hex2bin(open('chnkey/'+dst,'rb').read(64))
m = nacltaia.crypto_secretbox_open(c,n,k)
if m == 0:
if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.crypto_secretbox_open(c,n,k)\n')
continue
taia = n[:16]
if taia == '\x00'*16 and len(c) >= 32 + 64 + 24:
pk = m[:32]
m = nacltaia.crypto_sign_open(m[32:],pk)
if m == 0:
if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.crypto_sign_open(m[32:],pk)\n')
continue
if n != m[:24]:
if DEBUG: os.write(2,'nacltaia-otr: error: n != m[:24]\n')
continue
m = m[24:]
taia = n[16:] + '\x00'*8
if dst in os.listdir('unsign/') and src in os.listdir('unsign/'+dst+'/'):
if pk != base91a.hex2bin(open('unsign/'+dst+'/'+src,'rb').read(64)):
if DEBUG: os.write(2,'nacltaia-otr: error: pk != base91a.hex2bin(open(\'unsign/\'+dst+\'/\'+src,\'rb\').read(64))\n')
continue
if not src in taias.keys(): taias[src] = taia_now
if not oksrctaia(OK_SECONDS,taia,taia_now):
if DEBUG: os.write(2,'nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\n')
continue
taias[src] = taia
elif nacltaia.taia_okseconds(OK_SECONDS,taia)<1:
if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\n')
continue
elif cached(h):
if DEBUG: os.write(2,'nacltaia-otr: error: cached(h)\n')
continue
elif dst in os.listdir('unsign/') and src in os.listdir('unsign/'+dst+'/'):
if DEBUG: os.write(2,'nacltaia-otr: error: dst in os.listdir(\'unsign/\') and src in os.listdir(\'unsign/\'+dst+\'/\')\n')
continue
elif nacltaia.taia_okseconds(OK_SECONDS,taia)<1:
if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\n')
continue
elif cached(h):
if DEBUG: os.write(2,'nacltaia-otr: error: cached(h)\n')
continue
buffer = ' '.join(re_SPLIT_SPACE(buffer,3)[:3]) + ' :' + m.split('\n',1)[0]
elif dst in os.listdir('unsign/') and src in os.listdir('unsign/'+dst+'/'):
m = base91a.decode(m)
pk = m[24:56]
n = m[:24]
m = nacltaia.crypto_sign_open(m[56:],pk)
if m == 0:
if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.crypto_sign_open(m[56:],pk)\n')
continue
if n != m[:24]:
if DEBUG: os.write(2,'nacltaia-otr: error: n != m[:24]\n')
continue
m = m[24:]
taia = n[:16]
if pk != base91a.hex2bin(open('unsign/'+dst+'/'+src,'rb').read(64)):
if DEBUG: os.write(2,'nacltaia-otr: error: pk != base91a.hex2bin(open(\'unsign/\'+dst+\'/\'+src\'rb\').read(64))\n')
continue
if not src in taias.keys(): taias[src] = taia_now
if not oksrctaia(OK_SECONDS,taia,taia_now):
if DEBUG: os.write(2,'nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\n')
continue
taias[src] = taia
buffer = ' '.join(re_SPLIT_SPACE(buffer,3)[:3]) + ' :' + m.split('\n',1)[0]
elif len(m) >= 56 + 64 and not ' ' in m:
m = re_SPLIT_SPACE_COLON(buffer,3)[3]
h = nacltaia.crypto_hash_sha256(m)
m = base91a.decode(re_SPLIT_SPACE_COLON(buffer,3)[3])
if m[16:24] == '\x00'*8:
n = m[:24]
pk = m[24:56]
m = nacltaia.crypto_sign_open(m[56:],pk)
if m == 0:
if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.crypto_sign_open(m[56:],pk)\n')
continue
if n != m[:24]:
if DEBUG: os.write(2,'nacltaia-otr: error: n != m[:24]\n')
continue
m = m[24:]
taia = n[:16]
if nacltaia.taia_okseconds(OK_SECONDS,taia)<1:
if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\n')
continue
elif cached(h):
if DEBUG: os.write(2,'nacltaia-otr: error: cached(h)\n')
continue
else: m = re_SPLIT_SPACE_COLON(buffer,3)[3]
buffer = ' '.join(re_SPLIT_SPACE(buffer,3)[:3]) + ' :' + m.split('\n',1)[0]
elif re_322_332(buffer):
dst = re_SPLIT_SPACE(buffer,4)[3].lower()[1:]
cmd = re_SPLIT_SPACE(buffer,2)[1]
m = ret_322_332_msg(cmd,buffer)
if dst in os.listdir('chnkey/'):
c = base91a.decode(m)
c = str() if c == 0 else c
n = c[:24]
c = c[24:]
k = base91a.hex2bin(open('chnkey/'+dst,'rb').read(64))
m = nacltaia.crypto_secretbox_open(c,n,k)
m = str() if m == 0 else m
taia = n[:16]
if len(n) >= 16 and taia == '\x00'*16:
pk = m[:32]
m = nacltaia.crypto_sign_open(m[32:],pk)
m = str() if m == 0 else m
m = m[24:]
elif len(m) >= 56 + 64 and not ' ' in m:
m = base91a.decode(m)
if m[16:24] == '\x00'*8:
pk = m[24:56]
n = m[:24]
m = nacltaia.crypto_sign_open(m[56:],pk)
m = str() if m == 0 else m
m = m[24:]
else: m = ret_322_332_msg(cmd,buffer)
else: m = ret_322_332_msg(cmd,buffer)
if cmd == '322':
try: m = '[' + re_SPLIT_BRACKETS(re_SPLIT_SPACE_COLON(buffer,5)[5],2)[1] + '] ' + m
except: pass
buffer = ' '.join(re_SPLIT_SPACE(buffer,5)[:5]) + ' :' + m.split('\n',1)[0]
elif cmd == '332': buffer = ' '.join(re_SPLIT_SPACE(buffer,4)[:4]) + ' :' + m.split('\n',1)[0]
buffer = re_BUFFER_CTCP_DCC('',buffer) + '\x01' if '\x01ACTION ' in buffer.upper() else buffer.replace('\x01','')
if not COLOUR: buffer = re_BUFFER_COLOUR('',buffer)
if not UNICODE:
buffer = codecs.ascii_encode(unicodedata.normalize('NFKD',unicode(buffer,'utf-8','replace')),'ignore')[0]
buffer = ''.join(byte for byte in buffer if 127 > ord(byte) > 31 or byte in ['\x01','\x02','\x03','\x0f','\x1d','\x1f'])
os.write(1,buffer+'\n')
|
[
"#!/usr/bin/env python\nimport sys, os ; sys.path.append(os.getcwd())\nimport unicodedata\nimport collections\nimport nacltaia\nimport base91a\nimport codecs\nimport select\nimport socket\nimport time\nimport pwd\nimport re\n\ntaias = dict()\nRE = 'a-zA-Z0-9^(\\)\\-_{\\}[\\]|'\nre_SPLIT_SPACE = re.compile(' +',re.IGNORECASE).split\nre_SPLIT_SPACE_COLON = re.compile(' +:?',re.IGNORECASE).split\nre_SPLIT_BRACKETS = re.compile('\\[|]',re.IGNORECASE).split\nre_CRYPTOSERV = re.compile('^:['+RE+']+!nacltaia-otr@service',re.IGNORECASE).search\nre_NICK_PRIVMSG_NOTICE_TOPIC = re.compile('^:['+RE+']+![~'+RE+'.]+@['+RE+'.]+ +((PRIVMSG)|(NOTICE)|(TOPIC)) +['+RE+']+ +:?.*$',re.IGNORECASE).search\nre_CHANNEL_PRIVMSG_NOTICE_TOPIC = re.compile('^:['+RE+']+![~'+RE+'.]+@['+RE+'.]+ +((PRIVMSG)|(NOTICE)|(TOPIC)) +[#&!+]['+RE+']+ +:?.*$',re.IGNORECASE).search\nre_322_332 = re.compile('^:['+RE+'.]+ +((322)|(332)) +['+RE+']+ +[#&!+]['+RE+']+ ?([0-9]+)? +:?.*$',re.IGNORECASE).search\nre_BUFFER_CTCP_DCC = re.compile('\\x01(?!ACTION )',re.IGNORECASE).sub\nre_BUFFER_COLOUR = re.compile('(\\x03[0-9][0-9]?((?<=[0-9]),[0-9]?[0-9]?)?)|[\\x02\\x03\\x0f\\x1d\\x1f]',re.IGNORECASE).sub\n\ndef oksrctaia(n,taia,taia_now):\n if nacltaia.taia_okseconds(n,taia)<1: return 0\n if nacltaia.taia_new(taia,taias[src])<1:\n return 1 if taia_now == taias[src] else 0\n return 1\n\ndef cached(h):\n if h in hashcache: return 1\n hashcache.append(h)\n return 0\n\ndef ret_322_332_msg(cmd,buffer):\n try:\n return re_SPLIT_BRACKETS(re_SPLIT_SPACE_COLON(buffer,5)[5],2)[2][1:] if cmd == '322' else re_SPLIT_SPACE_COLON(buffer,4)[4]\n except:\n return re_SPLIT_SPACE_COLON(buffer,5)[5] if cmd == '322' else re_SPLIT_SPACE_COLON(buffer,4)[4]\n\nuid, gid = pwd.getpwnam('nacltaia-otr')[2:4]\nos.chdir('crypto/')\nos.chroot(os.getcwd())\nos.setgid(gid)\nos.setuid(uid)\ndel uid, gid\n\nipc=socket.socket(socket.AF_UNIX,socket.SOCK_STREAM) # contains potential race condition\nfor n in range(0,9):\n if n == 8: sys.exit(128+111)\n try:\n ipc.connect('socket')\n del n\n break\n except: time.sleep(0.1)\nipc_poll=select.poll()\nipc_poll.register(ipc.fileno(),select.POLLIN|select.POLLPRI)\nipc_poll=ipc_poll.poll\n\npoll=select.poll()\npoll.register(ipc.fileno(),select.POLLIN|select.POLLPRI)\npoll.register(0,select.POLLIN|select.POLLPRI)\npoll=poll.poll\n\nDEBUG = int(open('DEBUG','rb').read().split('\\n')[0]) if os.path.exists('DEBUG') else 0\nCOLOUR = int(open('COLOUR','rb').read().split('\\n')[0]) if os.path.exists('COLOUR') else 0\nUNICODE = int(open('UNICODE','rb').read().split('\\n')[0]) if os.path.exists('UNICODE') else 0\nHASH_LOG = int(open('HASH_LOG','rb').read().split('\\n')[0]) if os.path.exists('HASH_LOG') else 256\nOK_SECONDS = int(open('OK_SECONDS','rb').read().split('\\n')[0]) if os.path.exists('OK_SECONDS') else 128\nNAMELESS = '\\|' if os.path.exists('NAMELESS') and int(open('NAMELESS','rb').read().split('\\n')[0]) else str()\nre_SPLIT_NAMELESS = re.compile(NAMELESS,re.IGNORECASE).split\nhashcache = collections.deque([],HASH_LOG)\n\nwhile 1:\n\n if len(poll(-1)) < 2 and ipc_poll(0):\n h = ipc.recv(32)\n if len(h) < 32: sys.exit(128+32)\n cached(h)\n continue\n\n buffer = str()\n while 1:\n byte = os.read(0,1)\n if byte == '': sys.exit(0)\n if byte == '\\n': break\n if byte != '\\r' and len(buffer)<1024: buffer += byte\n\n while ipc_poll(0):\n h = ipc.recv(32)\n if len(h) < 32: sys.exit(128+32)\n cached(h)\n\n if re_CRYPTOSERV(buffer):\n if DEBUG: os.write(2,'nacltaia-otr: error: re_CRYPTOSERV(buffer)\\n')\n continue\n\n taia_now = nacltaia.taia_now_pack()\n\n if re_NICK_PRIVMSG_NOTICE_TOPIC(buffer):\n\n src = re_SPLIT_NAMELESS( buffer[1:].split('!',1)[0].lower() )[0]\n\n if src in os.listdir('dstkey/'):\n\n c = base91a.decode(re_SPLIT_SPACE_COLON(buffer,3)[3])\n\n if not c:\n if DEBUG: os.write(2,'nacltaia-otr: error: base91a.decode(re_SPLIT_SPACE_COLON(buffer,3)[3])\\n')\n continue\n\n n = c[:24]\n c = c[24:]\n pk = base91a.hex2bin(open('dstkey/'+src,'rb').read(64))\n sk = base91a.hex2bin(open('seckey','rb').read(64))\n c = nacltaia.crypto_box_open(c,n,pk,sk)\n\n if c == 0:\n if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.crypto_box_open(c,n,pk,sk)\\n')\n continue\n\n m = 0\n taia = n[:16]\n\n if len(c) >= 32:\n pk = c[:32]\n sk = open('tmpkey/'+src+'/sk','rb').read(32)\n m = nacltaia.crypto_box_open(c[32:],n,pk,sk)\n if open('tmpkey/'+src+'/tk','rb').read(32) != pk: open('tmpkey/'+src+'/tk','wb').write(pk)\n\n else:\n if DEBUG: os.write(2,'nacltaia-otr: error: len(c) < 32\\n')\n continue\n\n if not src in taias.keys(): taias[src] = taia_now\n\n if not oksrctaia(OK_SECONDS,taia,taia_now):\n if DEBUG: os.write(2,'nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\\n')\n continue\n\n taias[src] = taia\n\n if m == 0:\n os.write(1,':' + buffer[1:].split('!',1)[0] + '!nacltaia-otr@service NOTICE ' + re_SPLIT_SPACE(buffer,3)[2] + ' :unable to decrypt message\\a\\n')\n continue\n\n else: buffer = ' '.join(re_SPLIT_SPACE(buffer,3)[:3]) + ' :' + m.split('\\n',1)[0]\n\n elif re_CHANNEL_PRIVMSG_NOTICE_TOPIC(buffer):\n\n src = re_SPLIT_NAMELESS( buffer[1:].split('!',1)[0].lower() )[0]\n dst = re_SPLIT_SPACE(buffer,3)[2].lower()[1:]\n m = re_SPLIT_SPACE_COLON(buffer,3)[3]\n h = nacltaia.crypto_hash_sha256(m)\n\n if dst in os.listdir('chnkey/'):\n\n c = base91a.decode(m)\n\n if not c:\n if DEBUG: os.write(2,'nacltaia-otr: error: base91a.decode(m)\\n')\n continue\n\n n = c[:24]\n c = c[24:]\n k = base91a.hex2bin(open('chnkey/'+dst,'rb').read(64))\n m = nacltaia.crypto_secretbox_open(c,n,k)\n\n if m == 0:\n if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.crypto_secretbox_open(c,n,k)\\n')\n continue\n\n taia = n[:16]\n\n if taia == '\\x00'*16 and len(c) >= 32 + 64 + 24:\n\n pk = m[:32]\n m = nacltaia.crypto_sign_open(m[32:],pk)\n\n if m == 0:\n if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.crypto_sign_open(m[32:],pk)\\n')\n continue\n\n if n != m[:24]:\n if DEBUG: os.write(2,'nacltaia-otr: error: n != m[:24]\\n')\n continue\n\n m = m[24:]\n taia = n[16:] + '\\x00'*8\n\n if dst in os.listdir('unsign/') and src in os.listdir('unsign/'+dst+'/'):\n\n if pk != base91a.hex2bin(open('unsign/'+dst+'/'+src,'rb').read(64)):\n if DEBUG: os.write(2,'nacltaia-otr: error: pk != base91a.hex2bin(open(\\'unsign/\\'+dst+\\'/\\'+src,\\'rb\\').read(64))\\n')\n continue\n\n if not src in taias.keys(): taias[src] = taia_now\n\n if not oksrctaia(OK_SECONDS,taia,taia_now):\n if DEBUG: os.write(2,'nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\\n')\n continue\n\n taias[src] = taia\n\n elif nacltaia.taia_okseconds(OK_SECONDS,taia)<1:\n if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\\n')\n continue\n\n elif cached(h):\n if DEBUG: os.write(2,'nacltaia-otr: error: cached(h)\\n')\n continue\n\n elif dst in os.listdir('unsign/') and src in os.listdir('unsign/'+dst+'/'):\n if DEBUG: os.write(2,'nacltaia-otr: error: dst in os.listdir(\\'unsign/\\') and src in os.listdir(\\'unsign/\\'+dst+\\'/\\')\\n')\n continue\n\n elif nacltaia.taia_okseconds(OK_SECONDS,taia)<1:\n if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\\n')\n continue\n\n elif cached(h):\n if DEBUG: os.write(2,'nacltaia-otr: error: cached(h)\\n')\n continue\n\n buffer = ' '.join(re_SPLIT_SPACE(buffer,3)[:3]) + ' :' + m.split('\\n',1)[0]\n\n elif dst in os.listdir('unsign/') and src in os.listdir('unsign/'+dst+'/'):\n\n m = base91a.decode(m)\n pk = m[24:56]\n n = m[:24]\n m = nacltaia.crypto_sign_open(m[56:],pk)\n\n if m == 0:\n if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.crypto_sign_open(m[56:],pk)\\n')\n continue\n\n if n != m[:24]:\n if DEBUG: os.write(2,'nacltaia-otr: error: n != m[:24]\\n')\n continue\n\n m = m[24:]\n\n taia = n[:16]\n\n if pk != base91a.hex2bin(open('unsign/'+dst+'/'+src,'rb').read(64)):\n if DEBUG: os.write(2,'nacltaia-otr: error: pk != base91a.hex2bin(open(\\'unsign/\\'+dst+\\'/\\'+src\\'rb\\').read(64))\\n')\n continue\n\n if not src in taias.keys(): taias[src] = taia_now\n\n if not oksrctaia(OK_SECONDS,taia,taia_now):\n if DEBUG: os.write(2,'nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\\n')\n continue\n\n taias[src] = taia\n\n buffer = ' '.join(re_SPLIT_SPACE(buffer,3)[:3]) + ' :' + m.split('\\n',1)[0]\n\n elif len(m) >= 56 + 64 and not ' ' in m:\n\n m = re_SPLIT_SPACE_COLON(buffer,3)[3]\n h = nacltaia.crypto_hash_sha256(m)\n m = base91a.decode(re_SPLIT_SPACE_COLON(buffer,3)[3])\n\n if m[16:24] == '\\x00'*8:\n\n n = m[:24]\n pk = m[24:56]\n\n m = nacltaia.crypto_sign_open(m[56:],pk)\n\n if m == 0:\n if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.crypto_sign_open(m[56:],pk)\\n')\n continue\n\n if n != m[:24]:\n if DEBUG: os.write(2,'nacltaia-otr: error: n != m[:24]\\n')\n continue\n\n m = m[24:]\n\n taia = n[:16]\n\n if nacltaia.taia_okseconds(OK_SECONDS,taia)<1:\n if DEBUG: os.write(2,'nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\\n')\n continue\n\n elif cached(h):\n if DEBUG: os.write(2,'nacltaia-otr: error: cached(h)\\n')\n continue\n\n else: m = re_SPLIT_SPACE_COLON(buffer,3)[3]\n\n buffer = ' '.join(re_SPLIT_SPACE(buffer,3)[:3]) + ' :' + m.split('\\n',1)[0]\n\n elif re_322_332(buffer):\n\n dst = re_SPLIT_SPACE(buffer,4)[3].lower()[1:]\n cmd = re_SPLIT_SPACE(buffer,2)[1]\n m = ret_322_332_msg(cmd,buffer)\n\n if dst in os.listdir('chnkey/'):\n\n c = base91a.decode(m)\n\n c = str() if c == 0 else c\n\n n = c[:24]\n c = c[24:]\n k = base91a.hex2bin(open('chnkey/'+dst,'rb').read(64))\n m = nacltaia.crypto_secretbox_open(c,n,k)\n\n m = str() if m == 0 else m\n\n taia = n[:16]\n\n if len(n) >= 16 and taia == '\\x00'*16:\n pk = m[:32]\n m = nacltaia.crypto_sign_open(m[32:],pk)\n m = str() if m == 0 else m\n m = m[24:]\n\n elif len(m) >= 56 + 64 and not ' ' in m:\n\n m = base91a.decode(m)\n\n if m[16:24] == '\\x00'*8:\n pk = m[24:56]\n n = m[:24]\n m = nacltaia.crypto_sign_open(m[56:],pk)\n m = str() if m == 0 else m\n m = m[24:]\n\n else: m = ret_322_332_msg(cmd,buffer)\n\n else: m = ret_322_332_msg(cmd,buffer)\n\n if cmd == '322':\n\n try: m = '[' + re_SPLIT_BRACKETS(re_SPLIT_SPACE_COLON(buffer,5)[5],2)[1] + '] ' + m\n except: pass\n\n buffer = ' '.join(re_SPLIT_SPACE(buffer,5)[:5]) + ' :' + m.split('\\n',1)[0]\n\n elif cmd == '332': buffer = ' '.join(re_SPLIT_SPACE(buffer,4)[:4]) + ' :' + m.split('\\n',1)[0]\n\n buffer = re_BUFFER_CTCP_DCC('',buffer) + '\\x01' if '\\x01ACTION ' in buffer.upper() else buffer.replace('\\x01','')\n if not COLOUR: buffer = re_BUFFER_COLOUR('',buffer)\n if not UNICODE:\n buffer = codecs.ascii_encode(unicodedata.normalize('NFKD',unicode(buffer,'utf-8','replace')),'ignore')[0]\n buffer = ''.join(byte for byte in buffer if 127 > ord(byte) > 31 or byte in ['\\x01','\\x02','\\x03','\\x0f','\\x1d','\\x1f'])\n os.write(1,buffer+'\\n')\n",
"import sys, os\nsys.path.append(os.getcwd())\nimport unicodedata\nimport collections\nimport nacltaia\nimport base91a\nimport codecs\nimport select\nimport socket\nimport time\nimport pwd\nimport re\ntaias = dict()\nRE = 'a-zA-Z0-9^(\\\\)\\\\-_{\\\\}[\\\\]|'\nre_SPLIT_SPACE = re.compile(' +', re.IGNORECASE).split\nre_SPLIT_SPACE_COLON = re.compile(' +:?', re.IGNORECASE).split\nre_SPLIT_BRACKETS = re.compile('\\\\[|]', re.IGNORECASE).split\nre_CRYPTOSERV = re.compile('^:[' + RE + ']+!nacltaia-otr@service', re.\n IGNORECASE).search\nre_NICK_PRIVMSG_NOTICE_TOPIC = re.compile('^:[' + RE + ']+![~' + RE +\n '.]+@[' + RE + '.]+ +((PRIVMSG)|(NOTICE)|(TOPIC)) +[' + RE +\n ']+ +:?.*$', re.IGNORECASE).search\nre_CHANNEL_PRIVMSG_NOTICE_TOPIC = re.compile('^:[' + RE + ']+![~' + RE +\n '.]+@[' + RE + '.]+ +((PRIVMSG)|(NOTICE)|(TOPIC)) +[#&!+][' + RE +\n ']+ +:?.*$', re.IGNORECASE).search\nre_322_332 = re.compile('^:[' + RE + '.]+ +((322)|(332)) +[' + RE +\n ']+ +[#&!+][' + RE + ']+ ?([0-9]+)? +:?.*$', re.IGNORECASE).search\nre_BUFFER_CTCP_DCC = re.compile('\\x01(?!ACTION )', re.IGNORECASE).sub\nre_BUFFER_COLOUR = re.compile(\n '(\\x03[0-9][0-9]?((?<=[0-9]),[0-9]?[0-9]?)?)|[\\x02\\x03\\x0f\\x1d\\x1f]',\n re.IGNORECASE).sub\n\n\ndef oksrctaia(n, taia, taia_now):\n if nacltaia.taia_okseconds(n, taia) < 1:\n return 0\n if nacltaia.taia_new(taia, taias[src]) < 1:\n return 1 if taia_now == taias[src] else 0\n return 1\n\n\ndef cached(h):\n if h in hashcache:\n return 1\n hashcache.append(h)\n return 0\n\n\ndef ret_322_332_msg(cmd, buffer):\n try:\n return re_SPLIT_BRACKETS(re_SPLIT_SPACE_COLON(buffer, 5)[5], 2)[2][1:\n ] if cmd == '322' else re_SPLIT_SPACE_COLON(buffer, 4)[4]\n except:\n return re_SPLIT_SPACE_COLON(buffer, 5)[5\n ] if cmd == '322' else re_SPLIT_SPACE_COLON(buffer, 4)[4]\n\n\nuid, gid = pwd.getpwnam('nacltaia-otr')[2:4]\nos.chdir('crypto/')\nos.chroot(os.getcwd())\nos.setgid(gid)\nos.setuid(uid)\ndel uid, gid\nipc = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\nfor n in range(0, 9):\n if n == 8:\n sys.exit(128 + 111)\n try:\n ipc.connect('socket')\n del n\n break\n except:\n time.sleep(0.1)\nipc_poll = select.poll()\nipc_poll.register(ipc.fileno(), select.POLLIN | select.POLLPRI)\nipc_poll = ipc_poll.poll\npoll = select.poll()\npoll.register(ipc.fileno(), select.POLLIN | select.POLLPRI)\npoll.register(0, select.POLLIN | select.POLLPRI)\npoll = poll.poll\nDEBUG = int(open('DEBUG', 'rb').read().split('\\n')[0]) if os.path.exists(\n 'DEBUG') else 0\nCOLOUR = int(open('COLOUR', 'rb').read().split('\\n')[0]) if os.path.exists(\n 'COLOUR') else 0\nUNICODE = int(open('UNICODE', 'rb').read().split('\\n')[0]) if os.path.exists(\n 'UNICODE') else 0\nHASH_LOG = int(open('HASH_LOG', 'rb').read().split('\\n')[0]) if os.path.exists(\n 'HASH_LOG') else 256\nOK_SECONDS = int(open('OK_SECONDS', 'rb').read().split('\\n')[0]\n ) if os.path.exists('OK_SECONDS') else 128\nNAMELESS = '\\\\|' if os.path.exists('NAMELESS') and int(open('NAMELESS',\n 'rb').read().split('\\n')[0]) else str()\nre_SPLIT_NAMELESS = re.compile(NAMELESS, re.IGNORECASE).split\nhashcache = collections.deque([], HASH_LOG)\nwhile 1:\n if len(poll(-1)) < 2 and ipc_poll(0):\n h = ipc.recv(32)\n if len(h) < 32:\n sys.exit(128 + 32)\n cached(h)\n continue\n buffer = str()\n while 1:\n byte = os.read(0, 1)\n if byte == '':\n sys.exit(0)\n if byte == '\\n':\n break\n if byte != '\\r' and len(buffer) < 1024:\n buffer += byte\n while ipc_poll(0):\n h = ipc.recv(32)\n if len(h) < 32:\n sys.exit(128 + 32)\n cached(h)\n if re_CRYPTOSERV(buffer):\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: re_CRYPTOSERV(buffer)\\n')\n continue\n taia_now = nacltaia.taia_now_pack()\n if re_NICK_PRIVMSG_NOTICE_TOPIC(buffer):\n src = re_SPLIT_NAMELESS(buffer[1:].split('!', 1)[0].lower())[0]\n if src in os.listdir('dstkey/'):\n c = base91a.decode(re_SPLIT_SPACE_COLON(buffer, 3)[3])\n if not c:\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: base91a.decode(re_SPLIT_SPACE_COLON(buffer,3)[3])\n\"\"\"\n )\n continue\n n = c[:24]\n c = c[24:]\n pk = base91a.hex2bin(open('dstkey/' + src, 'rb').read(64))\n sk = base91a.hex2bin(open('seckey', 'rb').read(64))\n c = nacltaia.crypto_box_open(c, n, pk, sk)\n if c == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_box_open(c,n,pk,sk)\\n'\n )\n continue\n m = 0\n taia = n[:16]\n if len(c) >= 32:\n pk = c[:32]\n sk = open('tmpkey/' + src + '/sk', 'rb').read(32)\n m = nacltaia.crypto_box_open(c[32:], n, pk, sk)\n if open('tmpkey/' + src + '/tk', 'rb').read(32) != pk:\n open('tmpkey/' + src + '/tk', 'wb').write(pk)\n else:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: len(c) < 32\\n')\n continue\n if not src in taias.keys():\n taias[src] = taia_now\n if not oksrctaia(OK_SECONDS, taia, taia_now):\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\\n'\n )\n continue\n taias[src] = taia\n if m == 0:\n os.write(1, ':' + buffer[1:].split('!', 1)[0] +\n '!nacltaia-otr@service NOTICE ' + re_SPLIT_SPACE(buffer,\n 3)[2] + \"\"\" :unable to decrypt message\u0007\n\"\"\")\n continue\n else:\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 3)[:3]\n ) + ' :' + m.split('\\n', 1)[0]\n elif re_CHANNEL_PRIVMSG_NOTICE_TOPIC(buffer):\n src = re_SPLIT_NAMELESS(buffer[1:].split('!', 1)[0].lower())[0]\n dst = re_SPLIT_SPACE(buffer, 3)[2].lower()[1:]\n m = re_SPLIT_SPACE_COLON(buffer, 3)[3]\n h = nacltaia.crypto_hash_sha256(m)\n if dst in os.listdir('chnkey/'):\n c = base91a.decode(m)\n if not c:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: base91a.decode(m)\\n')\n continue\n n = c[:24]\n c = c[24:]\n k = base91a.hex2bin(open('chnkey/' + dst, 'rb').read(64))\n m = nacltaia.crypto_secretbox_open(c, n, k)\n if m == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_secretbox_open(c,n,k)\\n'\n )\n continue\n taia = n[:16]\n if taia == '\\x00' * 16 and len(c) >= 32 + 64 + 24:\n pk = m[:32]\n m = nacltaia.crypto_sign_open(m[32:], pk)\n if m == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_sign_open(m[32:],pk)\\n'\n )\n continue\n if n != m[:24]:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: n != m[:24]\\n')\n continue\n m = m[24:]\n taia = n[16:] + '\\x00' * 8\n if dst in os.listdir('unsign/') and src in os.listdir(\n 'unsign/' + dst + '/'):\n if pk != base91a.hex2bin(open('unsign/' + dst + '/' +\n src, 'rb').read(64)):\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: pk != base91a.hex2bin(open('unsign/'+dst+'/'+src,'rb').read(64))\n\"\"\"\n )\n continue\n if not src in taias.keys():\n taias[src] = taia_now\n if not oksrctaia(OK_SECONDS, taia, taia_now):\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\n\"\"\"\n )\n continue\n taias[src] = taia\n elif nacltaia.taia_okseconds(OK_SECONDS, taia) < 1:\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\n\"\"\"\n )\n continue\n elif cached(h):\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: cached(h)\\n')\n continue\n elif dst in os.listdir('unsign/') and src in os.listdir(\n 'unsign/' + dst + '/'):\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: dst in os.listdir('unsign/') and src in os.listdir('unsign/'+dst+'/')\n\"\"\"\n )\n continue\n elif nacltaia.taia_okseconds(OK_SECONDS, taia) < 1:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\\n'\n )\n continue\n elif cached(h):\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: cached(h)\\n')\n continue\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 3)[:3]) + ' :' + m.split(\n '\\n', 1)[0]\n elif dst in os.listdir('unsign/') and src in os.listdir('unsign/' +\n dst + '/'):\n m = base91a.decode(m)\n pk = m[24:56]\n n = m[:24]\n m = nacltaia.crypto_sign_open(m[56:], pk)\n if m == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_sign_open(m[56:],pk)\\n'\n )\n continue\n if n != m[:24]:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: n != m[:24]\\n')\n continue\n m = m[24:]\n taia = n[:16]\n if pk != base91a.hex2bin(open('unsign/' + dst + '/' + src, 'rb'\n ).read(64)):\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: pk != base91a.hex2bin(open('unsign/'+dst+'/'+src'rb').read(64))\n\"\"\"\n )\n continue\n if not src in taias.keys():\n taias[src] = taia_now\n if not oksrctaia(OK_SECONDS, taia, taia_now):\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\\n'\n )\n continue\n taias[src] = taia\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 3)[:3]) + ' :' + m.split(\n '\\n', 1)[0]\n elif len(m) >= 56 + 64 and not ' ' in m:\n m = re_SPLIT_SPACE_COLON(buffer, 3)[3]\n h = nacltaia.crypto_hash_sha256(m)\n m = base91a.decode(re_SPLIT_SPACE_COLON(buffer, 3)[3])\n if m[16:24] == '\\x00' * 8:\n n = m[:24]\n pk = m[24:56]\n m = nacltaia.crypto_sign_open(m[56:], pk)\n if m == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_sign_open(m[56:],pk)\\n'\n )\n continue\n if n != m[:24]:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: n != m[:24]\\n')\n continue\n m = m[24:]\n taia = n[:16]\n if nacltaia.taia_okseconds(OK_SECONDS, taia) < 1:\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\n\"\"\"\n )\n continue\n elif cached(h):\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: cached(h)\\n')\n continue\n else:\n m = re_SPLIT_SPACE_COLON(buffer, 3)[3]\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 3)[:3]) + ' :' + m.split(\n '\\n', 1)[0]\n elif re_322_332(buffer):\n dst = re_SPLIT_SPACE(buffer, 4)[3].lower()[1:]\n cmd = re_SPLIT_SPACE(buffer, 2)[1]\n m = ret_322_332_msg(cmd, buffer)\n if dst in os.listdir('chnkey/'):\n c = base91a.decode(m)\n c = str() if c == 0 else c\n n = c[:24]\n c = c[24:]\n k = base91a.hex2bin(open('chnkey/' + dst, 'rb').read(64))\n m = nacltaia.crypto_secretbox_open(c, n, k)\n m = str() if m == 0 else m\n taia = n[:16]\n if len(n) >= 16 and taia == '\\x00' * 16:\n pk = m[:32]\n m = nacltaia.crypto_sign_open(m[32:], pk)\n m = str() if m == 0 else m\n m = m[24:]\n elif len(m) >= 56 + 64 and not ' ' in m:\n m = base91a.decode(m)\n if m[16:24] == '\\x00' * 8:\n pk = m[24:56]\n n = m[:24]\n m = nacltaia.crypto_sign_open(m[56:], pk)\n m = str() if m == 0 else m\n m = m[24:]\n else:\n m = ret_322_332_msg(cmd, buffer)\n else:\n m = ret_322_332_msg(cmd, buffer)\n if cmd == '322':\n try:\n m = '[' + re_SPLIT_BRACKETS(re_SPLIT_SPACE_COLON(buffer, 5)\n [5], 2)[1] + '] ' + m\n except:\n pass\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 5)[:5]) + ' :' + m.split(\n '\\n', 1)[0]\n elif cmd == '332':\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 4)[:4]) + ' :' + m.split(\n '\\n', 1)[0]\n buffer = re_BUFFER_CTCP_DCC('', buffer\n ) + '\\x01' if '\\x01ACTION ' in buffer.upper() else buffer.replace(\n '\\x01', '')\n if not COLOUR:\n buffer = re_BUFFER_COLOUR('', buffer)\n if not UNICODE:\n buffer = codecs.ascii_encode(unicodedata.normalize('NFKD', unicode(\n buffer, 'utf-8', 'replace')), 'ignore')[0]\n buffer = ''.join(byte for byte in buffer if 127 > ord(byte) > 31 or\n byte in ['\\x01', '\\x02', '\\x03', '\\x0f', '\\x1d', '\\x1f'])\n os.write(1, buffer + '\\n')\n",
"<import token>\nsys.path.append(os.getcwd())\n<import token>\ntaias = dict()\nRE = 'a-zA-Z0-9^(\\\\)\\\\-_{\\\\}[\\\\]|'\nre_SPLIT_SPACE = re.compile(' +', re.IGNORECASE).split\nre_SPLIT_SPACE_COLON = re.compile(' +:?', re.IGNORECASE).split\nre_SPLIT_BRACKETS = re.compile('\\\\[|]', re.IGNORECASE).split\nre_CRYPTOSERV = re.compile('^:[' + RE + ']+!nacltaia-otr@service', re.\n IGNORECASE).search\nre_NICK_PRIVMSG_NOTICE_TOPIC = re.compile('^:[' + RE + ']+![~' + RE +\n '.]+@[' + RE + '.]+ +((PRIVMSG)|(NOTICE)|(TOPIC)) +[' + RE +\n ']+ +:?.*$', re.IGNORECASE).search\nre_CHANNEL_PRIVMSG_NOTICE_TOPIC = re.compile('^:[' + RE + ']+![~' + RE +\n '.]+@[' + RE + '.]+ +((PRIVMSG)|(NOTICE)|(TOPIC)) +[#&!+][' + RE +\n ']+ +:?.*$', re.IGNORECASE).search\nre_322_332 = re.compile('^:[' + RE + '.]+ +((322)|(332)) +[' + RE +\n ']+ +[#&!+][' + RE + ']+ ?([0-9]+)? +:?.*$', re.IGNORECASE).search\nre_BUFFER_CTCP_DCC = re.compile('\\x01(?!ACTION )', re.IGNORECASE).sub\nre_BUFFER_COLOUR = re.compile(\n '(\\x03[0-9][0-9]?((?<=[0-9]),[0-9]?[0-9]?)?)|[\\x02\\x03\\x0f\\x1d\\x1f]',\n re.IGNORECASE).sub\n\n\ndef oksrctaia(n, taia, taia_now):\n if nacltaia.taia_okseconds(n, taia) < 1:\n return 0\n if nacltaia.taia_new(taia, taias[src]) < 1:\n return 1 if taia_now == taias[src] else 0\n return 1\n\n\ndef cached(h):\n if h in hashcache:\n return 1\n hashcache.append(h)\n return 0\n\n\ndef ret_322_332_msg(cmd, buffer):\n try:\n return re_SPLIT_BRACKETS(re_SPLIT_SPACE_COLON(buffer, 5)[5], 2)[2][1:\n ] if cmd == '322' else re_SPLIT_SPACE_COLON(buffer, 4)[4]\n except:\n return re_SPLIT_SPACE_COLON(buffer, 5)[5\n ] if cmd == '322' else re_SPLIT_SPACE_COLON(buffer, 4)[4]\n\n\nuid, gid = pwd.getpwnam('nacltaia-otr')[2:4]\nos.chdir('crypto/')\nos.chroot(os.getcwd())\nos.setgid(gid)\nos.setuid(uid)\ndel uid, gid\nipc = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\nfor n in range(0, 9):\n if n == 8:\n sys.exit(128 + 111)\n try:\n ipc.connect('socket')\n del n\n break\n except:\n time.sleep(0.1)\nipc_poll = select.poll()\nipc_poll.register(ipc.fileno(), select.POLLIN | select.POLLPRI)\nipc_poll = ipc_poll.poll\npoll = select.poll()\npoll.register(ipc.fileno(), select.POLLIN | select.POLLPRI)\npoll.register(0, select.POLLIN | select.POLLPRI)\npoll = poll.poll\nDEBUG = int(open('DEBUG', 'rb').read().split('\\n')[0]) if os.path.exists(\n 'DEBUG') else 0\nCOLOUR = int(open('COLOUR', 'rb').read().split('\\n')[0]) if os.path.exists(\n 'COLOUR') else 0\nUNICODE = int(open('UNICODE', 'rb').read().split('\\n')[0]) if os.path.exists(\n 'UNICODE') else 0\nHASH_LOG = int(open('HASH_LOG', 'rb').read().split('\\n')[0]) if os.path.exists(\n 'HASH_LOG') else 256\nOK_SECONDS = int(open('OK_SECONDS', 'rb').read().split('\\n')[0]\n ) if os.path.exists('OK_SECONDS') else 128\nNAMELESS = '\\\\|' if os.path.exists('NAMELESS') and int(open('NAMELESS',\n 'rb').read().split('\\n')[0]) else str()\nre_SPLIT_NAMELESS = re.compile(NAMELESS, re.IGNORECASE).split\nhashcache = collections.deque([], HASH_LOG)\nwhile 1:\n if len(poll(-1)) < 2 and ipc_poll(0):\n h = ipc.recv(32)\n if len(h) < 32:\n sys.exit(128 + 32)\n cached(h)\n continue\n buffer = str()\n while 1:\n byte = os.read(0, 1)\n if byte == '':\n sys.exit(0)\n if byte == '\\n':\n break\n if byte != '\\r' and len(buffer) < 1024:\n buffer += byte\n while ipc_poll(0):\n h = ipc.recv(32)\n if len(h) < 32:\n sys.exit(128 + 32)\n cached(h)\n if re_CRYPTOSERV(buffer):\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: re_CRYPTOSERV(buffer)\\n')\n continue\n taia_now = nacltaia.taia_now_pack()\n if re_NICK_PRIVMSG_NOTICE_TOPIC(buffer):\n src = re_SPLIT_NAMELESS(buffer[1:].split('!', 1)[0].lower())[0]\n if src in os.listdir('dstkey/'):\n c = base91a.decode(re_SPLIT_SPACE_COLON(buffer, 3)[3])\n if not c:\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: base91a.decode(re_SPLIT_SPACE_COLON(buffer,3)[3])\n\"\"\"\n )\n continue\n n = c[:24]\n c = c[24:]\n pk = base91a.hex2bin(open('dstkey/' + src, 'rb').read(64))\n sk = base91a.hex2bin(open('seckey', 'rb').read(64))\n c = nacltaia.crypto_box_open(c, n, pk, sk)\n if c == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_box_open(c,n,pk,sk)\\n'\n )\n continue\n m = 0\n taia = n[:16]\n if len(c) >= 32:\n pk = c[:32]\n sk = open('tmpkey/' + src + '/sk', 'rb').read(32)\n m = nacltaia.crypto_box_open(c[32:], n, pk, sk)\n if open('tmpkey/' + src + '/tk', 'rb').read(32) != pk:\n open('tmpkey/' + src + '/tk', 'wb').write(pk)\n else:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: len(c) < 32\\n')\n continue\n if not src in taias.keys():\n taias[src] = taia_now\n if not oksrctaia(OK_SECONDS, taia, taia_now):\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\\n'\n )\n continue\n taias[src] = taia\n if m == 0:\n os.write(1, ':' + buffer[1:].split('!', 1)[0] +\n '!nacltaia-otr@service NOTICE ' + re_SPLIT_SPACE(buffer,\n 3)[2] + \"\"\" :unable to decrypt message\u0007\n\"\"\")\n continue\n else:\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 3)[:3]\n ) + ' :' + m.split('\\n', 1)[0]\n elif re_CHANNEL_PRIVMSG_NOTICE_TOPIC(buffer):\n src = re_SPLIT_NAMELESS(buffer[1:].split('!', 1)[0].lower())[0]\n dst = re_SPLIT_SPACE(buffer, 3)[2].lower()[1:]\n m = re_SPLIT_SPACE_COLON(buffer, 3)[3]\n h = nacltaia.crypto_hash_sha256(m)\n if dst in os.listdir('chnkey/'):\n c = base91a.decode(m)\n if not c:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: base91a.decode(m)\\n')\n continue\n n = c[:24]\n c = c[24:]\n k = base91a.hex2bin(open('chnkey/' + dst, 'rb').read(64))\n m = nacltaia.crypto_secretbox_open(c, n, k)\n if m == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_secretbox_open(c,n,k)\\n'\n )\n continue\n taia = n[:16]\n if taia == '\\x00' * 16 and len(c) >= 32 + 64 + 24:\n pk = m[:32]\n m = nacltaia.crypto_sign_open(m[32:], pk)\n if m == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_sign_open(m[32:],pk)\\n'\n )\n continue\n if n != m[:24]:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: n != m[:24]\\n')\n continue\n m = m[24:]\n taia = n[16:] + '\\x00' * 8\n if dst in os.listdir('unsign/') and src in os.listdir(\n 'unsign/' + dst + '/'):\n if pk != base91a.hex2bin(open('unsign/' + dst + '/' +\n src, 'rb').read(64)):\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: pk != base91a.hex2bin(open('unsign/'+dst+'/'+src,'rb').read(64))\n\"\"\"\n )\n continue\n if not src in taias.keys():\n taias[src] = taia_now\n if not oksrctaia(OK_SECONDS, taia, taia_now):\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\n\"\"\"\n )\n continue\n taias[src] = taia\n elif nacltaia.taia_okseconds(OK_SECONDS, taia) < 1:\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\n\"\"\"\n )\n continue\n elif cached(h):\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: cached(h)\\n')\n continue\n elif dst in os.listdir('unsign/') and src in os.listdir(\n 'unsign/' + dst + '/'):\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: dst in os.listdir('unsign/') and src in os.listdir('unsign/'+dst+'/')\n\"\"\"\n )\n continue\n elif nacltaia.taia_okseconds(OK_SECONDS, taia) < 1:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\\n'\n )\n continue\n elif cached(h):\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: cached(h)\\n')\n continue\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 3)[:3]) + ' :' + m.split(\n '\\n', 1)[0]\n elif dst in os.listdir('unsign/') and src in os.listdir('unsign/' +\n dst + '/'):\n m = base91a.decode(m)\n pk = m[24:56]\n n = m[:24]\n m = nacltaia.crypto_sign_open(m[56:], pk)\n if m == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_sign_open(m[56:],pk)\\n'\n )\n continue\n if n != m[:24]:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: n != m[:24]\\n')\n continue\n m = m[24:]\n taia = n[:16]\n if pk != base91a.hex2bin(open('unsign/' + dst + '/' + src, 'rb'\n ).read(64)):\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: pk != base91a.hex2bin(open('unsign/'+dst+'/'+src'rb').read(64))\n\"\"\"\n )\n continue\n if not src in taias.keys():\n taias[src] = taia_now\n if not oksrctaia(OK_SECONDS, taia, taia_now):\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\\n'\n )\n continue\n taias[src] = taia\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 3)[:3]) + ' :' + m.split(\n '\\n', 1)[0]\n elif len(m) >= 56 + 64 and not ' ' in m:\n m = re_SPLIT_SPACE_COLON(buffer, 3)[3]\n h = nacltaia.crypto_hash_sha256(m)\n m = base91a.decode(re_SPLIT_SPACE_COLON(buffer, 3)[3])\n if m[16:24] == '\\x00' * 8:\n n = m[:24]\n pk = m[24:56]\n m = nacltaia.crypto_sign_open(m[56:], pk)\n if m == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_sign_open(m[56:],pk)\\n'\n )\n continue\n if n != m[:24]:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: n != m[:24]\\n')\n continue\n m = m[24:]\n taia = n[:16]\n if nacltaia.taia_okseconds(OK_SECONDS, taia) < 1:\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\n\"\"\"\n )\n continue\n elif cached(h):\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: cached(h)\\n')\n continue\n else:\n m = re_SPLIT_SPACE_COLON(buffer, 3)[3]\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 3)[:3]) + ' :' + m.split(\n '\\n', 1)[0]\n elif re_322_332(buffer):\n dst = re_SPLIT_SPACE(buffer, 4)[3].lower()[1:]\n cmd = re_SPLIT_SPACE(buffer, 2)[1]\n m = ret_322_332_msg(cmd, buffer)\n if dst in os.listdir('chnkey/'):\n c = base91a.decode(m)\n c = str() if c == 0 else c\n n = c[:24]\n c = c[24:]\n k = base91a.hex2bin(open('chnkey/' + dst, 'rb').read(64))\n m = nacltaia.crypto_secretbox_open(c, n, k)\n m = str() if m == 0 else m\n taia = n[:16]\n if len(n) >= 16 and taia == '\\x00' * 16:\n pk = m[:32]\n m = nacltaia.crypto_sign_open(m[32:], pk)\n m = str() if m == 0 else m\n m = m[24:]\n elif len(m) >= 56 + 64 and not ' ' in m:\n m = base91a.decode(m)\n if m[16:24] == '\\x00' * 8:\n pk = m[24:56]\n n = m[:24]\n m = nacltaia.crypto_sign_open(m[56:], pk)\n m = str() if m == 0 else m\n m = m[24:]\n else:\n m = ret_322_332_msg(cmd, buffer)\n else:\n m = ret_322_332_msg(cmd, buffer)\n if cmd == '322':\n try:\n m = '[' + re_SPLIT_BRACKETS(re_SPLIT_SPACE_COLON(buffer, 5)\n [5], 2)[1] + '] ' + m\n except:\n pass\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 5)[:5]) + ' :' + m.split(\n '\\n', 1)[0]\n elif cmd == '332':\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 4)[:4]) + ' :' + m.split(\n '\\n', 1)[0]\n buffer = re_BUFFER_CTCP_DCC('', buffer\n ) + '\\x01' if '\\x01ACTION ' in buffer.upper() else buffer.replace(\n '\\x01', '')\n if not COLOUR:\n buffer = re_BUFFER_COLOUR('', buffer)\n if not UNICODE:\n buffer = codecs.ascii_encode(unicodedata.normalize('NFKD', unicode(\n buffer, 'utf-8', 'replace')), 'ignore')[0]\n buffer = ''.join(byte for byte in buffer if 127 > ord(byte) > 31 or\n byte in ['\\x01', '\\x02', '\\x03', '\\x0f', '\\x1d', '\\x1f'])\n os.write(1, buffer + '\\n')\n",
"<import token>\nsys.path.append(os.getcwd())\n<import token>\n<assignment token>\n\n\ndef oksrctaia(n, taia, taia_now):\n if nacltaia.taia_okseconds(n, taia) < 1:\n return 0\n if nacltaia.taia_new(taia, taias[src]) < 1:\n return 1 if taia_now == taias[src] else 0\n return 1\n\n\ndef cached(h):\n if h in hashcache:\n return 1\n hashcache.append(h)\n return 0\n\n\ndef ret_322_332_msg(cmd, buffer):\n try:\n return re_SPLIT_BRACKETS(re_SPLIT_SPACE_COLON(buffer, 5)[5], 2)[2][1:\n ] if cmd == '322' else re_SPLIT_SPACE_COLON(buffer, 4)[4]\n except:\n return re_SPLIT_SPACE_COLON(buffer, 5)[5\n ] if cmd == '322' else re_SPLIT_SPACE_COLON(buffer, 4)[4]\n\n\n<assignment token>\nos.chdir('crypto/')\nos.chroot(os.getcwd())\nos.setgid(gid)\nos.setuid(uid)\ndel uid, gid\n<assignment token>\nfor n in range(0, 9):\n if n == 8:\n sys.exit(128 + 111)\n try:\n ipc.connect('socket')\n del n\n break\n except:\n time.sleep(0.1)\n<assignment token>\nipc_poll.register(ipc.fileno(), select.POLLIN | select.POLLPRI)\n<assignment token>\npoll.register(ipc.fileno(), select.POLLIN | select.POLLPRI)\npoll.register(0, select.POLLIN | select.POLLPRI)\n<assignment token>\nwhile 1:\n if len(poll(-1)) < 2 and ipc_poll(0):\n h = ipc.recv(32)\n if len(h) < 32:\n sys.exit(128 + 32)\n cached(h)\n continue\n buffer = str()\n while 1:\n byte = os.read(0, 1)\n if byte == '':\n sys.exit(0)\n if byte == '\\n':\n break\n if byte != '\\r' and len(buffer) < 1024:\n buffer += byte\n while ipc_poll(0):\n h = ipc.recv(32)\n if len(h) < 32:\n sys.exit(128 + 32)\n cached(h)\n if re_CRYPTOSERV(buffer):\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: re_CRYPTOSERV(buffer)\\n')\n continue\n taia_now = nacltaia.taia_now_pack()\n if re_NICK_PRIVMSG_NOTICE_TOPIC(buffer):\n src = re_SPLIT_NAMELESS(buffer[1:].split('!', 1)[0].lower())[0]\n if src in os.listdir('dstkey/'):\n c = base91a.decode(re_SPLIT_SPACE_COLON(buffer, 3)[3])\n if not c:\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: base91a.decode(re_SPLIT_SPACE_COLON(buffer,3)[3])\n\"\"\"\n )\n continue\n n = c[:24]\n c = c[24:]\n pk = base91a.hex2bin(open('dstkey/' + src, 'rb').read(64))\n sk = base91a.hex2bin(open('seckey', 'rb').read(64))\n c = nacltaia.crypto_box_open(c, n, pk, sk)\n if c == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_box_open(c,n,pk,sk)\\n'\n )\n continue\n m = 0\n taia = n[:16]\n if len(c) >= 32:\n pk = c[:32]\n sk = open('tmpkey/' + src + '/sk', 'rb').read(32)\n m = nacltaia.crypto_box_open(c[32:], n, pk, sk)\n if open('tmpkey/' + src + '/tk', 'rb').read(32) != pk:\n open('tmpkey/' + src + '/tk', 'wb').write(pk)\n else:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: len(c) < 32\\n')\n continue\n if not src in taias.keys():\n taias[src] = taia_now\n if not oksrctaia(OK_SECONDS, taia, taia_now):\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\\n'\n )\n continue\n taias[src] = taia\n if m == 0:\n os.write(1, ':' + buffer[1:].split('!', 1)[0] +\n '!nacltaia-otr@service NOTICE ' + re_SPLIT_SPACE(buffer,\n 3)[2] + \"\"\" :unable to decrypt message\u0007\n\"\"\")\n continue\n else:\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 3)[:3]\n ) + ' :' + m.split('\\n', 1)[0]\n elif re_CHANNEL_PRIVMSG_NOTICE_TOPIC(buffer):\n src = re_SPLIT_NAMELESS(buffer[1:].split('!', 1)[0].lower())[0]\n dst = re_SPLIT_SPACE(buffer, 3)[2].lower()[1:]\n m = re_SPLIT_SPACE_COLON(buffer, 3)[3]\n h = nacltaia.crypto_hash_sha256(m)\n if dst in os.listdir('chnkey/'):\n c = base91a.decode(m)\n if not c:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: base91a.decode(m)\\n')\n continue\n n = c[:24]\n c = c[24:]\n k = base91a.hex2bin(open('chnkey/' + dst, 'rb').read(64))\n m = nacltaia.crypto_secretbox_open(c, n, k)\n if m == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_secretbox_open(c,n,k)\\n'\n )\n continue\n taia = n[:16]\n if taia == '\\x00' * 16 and len(c) >= 32 + 64 + 24:\n pk = m[:32]\n m = nacltaia.crypto_sign_open(m[32:], pk)\n if m == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_sign_open(m[32:],pk)\\n'\n )\n continue\n if n != m[:24]:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: n != m[:24]\\n')\n continue\n m = m[24:]\n taia = n[16:] + '\\x00' * 8\n if dst in os.listdir('unsign/') and src in os.listdir(\n 'unsign/' + dst + '/'):\n if pk != base91a.hex2bin(open('unsign/' + dst + '/' +\n src, 'rb').read(64)):\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: pk != base91a.hex2bin(open('unsign/'+dst+'/'+src,'rb').read(64))\n\"\"\"\n )\n continue\n if not src in taias.keys():\n taias[src] = taia_now\n if not oksrctaia(OK_SECONDS, taia, taia_now):\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\n\"\"\"\n )\n continue\n taias[src] = taia\n elif nacltaia.taia_okseconds(OK_SECONDS, taia) < 1:\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\n\"\"\"\n )\n continue\n elif cached(h):\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: cached(h)\\n')\n continue\n elif dst in os.listdir('unsign/') and src in os.listdir(\n 'unsign/' + dst + '/'):\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: dst in os.listdir('unsign/') and src in os.listdir('unsign/'+dst+'/')\n\"\"\"\n )\n continue\n elif nacltaia.taia_okseconds(OK_SECONDS, taia) < 1:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\\n'\n )\n continue\n elif cached(h):\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: cached(h)\\n')\n continue\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 3)[:3]) + ' :' + m.split(\n '\\n', 1)[0]\n elif dst in os.listdir('unsign/') and src in os.listdir('unsign/' +\n dst + '/'):\n m = base91a.decode(m)\n pk = m[24:56]\n n = m[:24]\n m = nacltaia.crypto_sign_open(m[56:], pk)\n if m == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_sign_open(m[56:],pk)\\n'\n )\n continue\n if n != m[:24]:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: n != m[:24]\\n')\n continue\n m = m[24:]\n taia = n[:16]\n if pk != base91a.hex2bin(open('unsign/' + dst + '/' + src, 'rb'\n ).read(64)):\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: pk != base91a.hex2bin(open('unsign/'+dst+'/'+src'rb').read(64))\n\"\"\"\n )\n continue\n if not src in taias.keys():\n taias[src] = taia_now\n if not oksrctaia(OK_SECONDS, taia, taia_now):\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: oksrctaia(OK_SECONDS,taia,taia_now)\\n'\n )\n continue\n taias[src] = taia\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 3)[:3]) + ' :' + m.split(\n '\\n', 1)[0]\n elif len(m) >= 56 + 64 and not ' ' in m:\n m = re_SPLIT_SPACE_COLON(buffer, 3)[3]\n h = nacltaia.crypto_hash_sha256(m)\n m = base91a.decode(re_SPLIT_SPACE_COLON(buffer, 3)[3])\n if m[16:24] == '\\x00' * 8:\n n = m[:24]\n pk = m[24:56]\n m = nacltaia.crypto_sign_open(m[56:], pk)\n if m == 0:\n if DEBUG:\n os.write(2,\n 'nacltaia-otr: error: nacltaia.crypto_sign_open(m[56:],pk)\\n'\n )\n continue\n if n != m[:24]:\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: n != m[:24]\\n')\n continue\n m = m[24:]\n taia = n[:16]\n if nacltaia.taia_okseconds(OK_SECONDS, taia) < 1:\n if DEBUG:\n os.write(2,\n \"\"\"nacltaia-otr: error: nacltaia.taia_okseconds(OK_SECONDS,taia)\n\"\"\"\n )\n continue\n elif cached(h):\n if DEBUG:\n os.write(2, 'nacltaia-otr: error: cached(h)\\n')\n continue\n else:\n m = re_SPLIT_SPACE_COLON(buffer, 3)[3]\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 3)[:3]) + ' :' + m.split(\n '\\n', 1)[0]\n elif re_322_332(buffer):\n dst = re_SPLIT_SPACE(buffer, 4)[3].lower()[1:]\n cmd = re_SPLIT_SPACE(buffer, 2)[1]\n m = ret_322_332_msg(cmd, buffer)\n if dst in os.listdir('chnkey/'):\n c = base91a.decode(m)\n c = str() if c == 0 else c\n n = c[:24]\n c = c[24:]\n k = base91a.hex2bin(open('chnkey/' + dst, 'rb').read(64))\n m = nacltaia.crypto_secretbox_open(c, n, k)\n m = str() if m == 0 else m\n taia = n[:16]\n if len(n) >= 16 and taia == '\\x00' * 16:\n pk = m[:32]\n m = nacltaia.crypto_sign_open(m[32:], pk)\n m = str() if m == 0 else m\n m = m[24:]\n elif len(m) >= 56 + 64 and not ' ' in m:\n m = base91a.decode(m)\n if m[16:24] == '\\x00' * 8:\n pk = m[24:56]\n n = m[:24]\n m = nacltaia.crypto_sign_open(m[56:], pk)\n m = str() if m == 0 else m\n m = m[24:]\n else:\n m = ret_322_332_msg(cmd, buffer)\n else:\n m = ret_322_332_msg(cmd, buffer)\n if cmd == '322':\n try:\n m = '[' + re_SPLIT_BRACKETS(re_SPLIT_SPACE_COLON(buffer, 5)\n [5], 2)[1] + '] ' + m\n except:\n pass\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 5)[:5]) + ' :' + m.split(\n '\\n', 1)[0]\n elif cmd == '332':\n buffer = ' '.join(re_SPLIT_SPACE(buffer, 4)[:4]) + ' :' + m.split(\n '\\n', 1)[0]\n buffer = re_BUFFER_CTCP_DCC('', buffer\n ) + '\\x01' if '\\x01ACTION ' in buffer.upper() else buffer.replace(\n '\\x01', '')\n if not COLOUR:\n buffer = re_BUFFER_COLOUR('', buffer)\n if not UNICODE:\n buffer = codecs.ascii_encode(unicodedata.normalize('NFKD', unicode(\n buffer, 'utf-8', 'replace')), 'ignore')[0]\n buffer = ''.join(byte for byte in buffer if 127 > ord(byte) > 31 or\n byte in ['\\x01', '\\x02', '\\x03', '\\x0f', '\\x1d', '\\x1f'])\n os.write(1, buffer + '\\n')\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef oksrctaia(n, taia, taia_now):\n if nacltaia.taia_okseconds(n, taia) < 1:\n return 0\n if nacltaia.taia_new(taia, taias[src]) < 1:\n return 1 if taia_now == taias[src] else 0\n return 1\n\n\ndef cached(h):\n if h in hashcache:\n return 1\n hashcache.append(h)\n return 0\n\n\ndef ret_322_332_msg(cmd, buffer):\n try:\n return re_SPLIT_BRACKETS(re_SPLIT_SPACE_COLON(buffer, 5)[5], 2)[2][1:\n ] if cmd == '322' else re_SPLIT_SPACE_COLON(buffer, 4)[4]\n except:\n return re_SPLIT_SPACE_COLON(buffer, 5)[5\n ] if cmd == '322' else re_SPLIT_SPACE_COLON(buffer, 4)[4]\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef oksrctaia(n, taia, taia_now):\n if nacltaia.taia_okseconds(n, taia) < 1:\n return 0\n if nacltaia.taia_new(taia, taias[src]) < 1:\n return 1 if taia_now == taias[src] else 0\n return 1\n\n\ndef cached(h):\n if h in hashcache:\n return 1\n hashcache.append(h)\n return 0\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef oksrctaia(n, taia, taia_now):\n if nacltaia.taia_okseconds(n, taia) < 1:\n return 0\n if nacltaia.taia_new(taia, taias[src]) < 1:\n return 1 if taia_now == taias[src] else 0\n return 1\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,857 |
19ac4344c7de6ba3581600e4cf2b934a06005656
|
# Copyright © 2021 Ingram Micro Inc. All rights reserved.
import sys
from dj_cqrs.registries import MasterRegistry
from django.core.management.base import BaseCommand, CommandError
from django.db import connection
import ujson
GET_NON_EXISTING_PKS_SQL_TEMPLATE = """
SELECT t.pk
FROM (
WITH t0(pk) AS (
VALUES {values}
)
SELECT *
FROM t0
) t
LEFT JOIN {table} m ON m.{pk_field} = t.pk
WHERE m.{pk_field} IS NULL
"""
class Command(BaseCommand):
help = 'Diff of deleted CQRS models pks from master diff stream.'
@classmethod
def serialize_out(cls, package):
return ujson.dumps(package)
@classmethod
def deserialize_in(cls, package_line):
return ujson.loads(package_line)
def handle(self, *args, **options):
with sys.stdin as f:
first_line = f.readline()
model = self._get_model(first_line)
self.stdout.write(first_line.strip())
with connection.cursor() as cursor:
for package_line in f:
master_data = self.deserialize_in(package_line)
sql = GET_NON_EXISTING_PKS_SQL_TEMPLATE.format(
values=','.join(["({0})".format(pk) for pk in master_data]),
table=model._meta.db_table,
pk_field=model._meta.pk.attname,
)
cursor.execute(sql)
diff_ids = [r[0] for r in cursor.fetchall()]
if diff_ids:
self.stdout.write(self.serialize_out(diff_ids))
self.stderr.write('PK to delete: {0}'.format(str(diff_ids)))
@staticmethod
def _get_model(first_line):
cqrs_id = first_line.split(',')[0]
model = MasterRegistry.get_model_by_cqrs_id(cqrs_id)
if not model:
raise CommandError('Wrong CQRS ID: {0}!'.format(cqrs_id))
return model
|
[
"# Copyright © 2021 Ingram Micro Inc. All rights reserved.\n\nimport sys\n\nfrom dj_cqrs.registries import MasterRegistry\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db import connection\n\nimport ujson\n\n\nGET_NON_EXISTING_PKS_SQL_TEMPLATE = \"\"\"\nSELECT t.pk\nFROM (\n WITH t0(pk) AS (\n VALUES {values}\n )\n SELECT *\n FROM t0\n ) t\nLEFT JOIN {table} m ON m.{pk_field} = t.pk\nWHERE m.{pk_field} IS NULL\n\"\"\"\n\n\nclass Command(BaseCommand):\n help = 'Diff of deleted CQRS models pks from master diff stream.'\n\n @classmethod\n def serialize_out(cls, package):\n return ujson.dumps(package)\n\n @classmethod\n def deserialize_in(cls, package_line):\n return ujson.loads(package_line)\n\n def handle(self, *args, **options):\n with sys.stdin as f:\n first_line = f.readline()\n model = self._get_model(first_line)\n self.stdout.write(first_line.strip())\n\n with connection.cursor() as cursor:\n for package_line in f:\n master_data = self.deserialize_in(package_line)\n\n sql = GET_NON_EXISTING_PKS_SQL_TEMPLATE.format(\n values=','.join([\"({0})\".format(pk) for pk in master_data]),\n table=model._meta.db_table,\n pk_field=model._meta.pk.attname,\n )\n\n cursor.execute(sql)\n diff_ids = [r[0] for r in cursor.fetchall()]\n if diff_ids:\n self.stdout.write(self.serialize_out(diff_ids))\n self.stderr.write('PK to delete: {0}'.format(str(diff_ids)))\n\n @staticmethod\n def _get_model(first_line):\n cqrs_id = first_line.split(',')[0]\n model = MasterRegistry.get_model_by_cqrs_id(cqrs_id)\n\n if not model:\n raise CommandError('Wrong CQRS ID: {0}!'.format(cqrs_id))\n\n return model\n",
"import sys\nfrom dj_cqrs.registries import MasterRegistry\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db import connection\nimport ujson\nGET_NON_EXISTING_PKS_SQL_TEMPLATE = \"\"\"\nSELECT t.pk\nFROM (\n WITH t0(pk) AS (\n VALUES {values}\n )\n SELECT *\n FROM t0\n ) t\nLEFT JOIN {table} m ON m.{pk_field} = t.pk\nWHERE m.{pk_field} IS NULL\n\"\"\"\n\n\nclass Command(BaseCommand):\n help = 'Diff of deleted CQRS models pks from master diff stream.'\n\n @classmethod\n def serialize_out(cls, package):\n return ujson.dumps(package)\n\n @classmethod\n def deserialize_in(cls, package_line):\n return ujson.loads(package_line)\n\n def handle(self, *args, **options):\n with sys.stdin as f:\n first_line = f.readline()\n model = self._get_model(first_line)\n self.stdout.write(first_line.strip())\n with connection.cursor() as cursor:\n for package_line in f:\n master_data = self.deserialize_in(package_line)\n sql = GET_NON_EXISTING_PKS_SQL_TEMPLATE.format(values=\n ','.join(['({0})'.format(pk) for pk in master_data]\n ), table=model._meta.db_table, pk_field=model._meta\n .pk.attname)\n cursor.execute(sql)\n diff_ids = [r[0] for r in cursor.fetchall()]\n if diff_ids:\n self.stdout.write(self.serialize_out(diff_ids))\n self.stderr.write('PK to delete: {0}'.format(str(\n diff_ids)))\n\n @staticmethod\n def _get_model(first_line):\n cqrs_id = first_line.split(',')[0]\n model = MasterRegistry.get_model_by_cqrs_id(cqrs_id)\n if not model:\n raise CommandError('Wrong CQRS ID: {0}!'.format(cqrs_id))\n return model\n",
"<import token>\nGET_NON_EXISTING_PKS_SQL_TEMPLATE = \"\"\"\nSELECT t.pk\nFROM (\n WITH t0(pk) AS (\n VALUES {values}\n )\n SELECT *\n FROM t0\n ) t\nLEFT JOIN {table} m ON m.{pk_field} = t.pk\nWHERE m.{pk_field} IS NULL\n\"\"\"\n\n\nclass Command(BaseCommand):\n help = 'Diff of deleted CQRS models pks from master diff stream.'\n\n @classmethod\n def serialize_out(cls, package):\n return ujson.dumps(package)\n\n @classmethod\n def deserialize_in(cls, package_line):\n return ujson.loads(package_line)\n\n def handle(self, *args, **options):\n with sys.stdin as f:\n first_line = f.readline()\n model = self._get_model(first_line)\n self.stdout.write(first_line.strip())\n with connection.cursor() as cursor:\n for package_line in f:\n master_data = self.deserialize_in(package_line)\n sql = GET_NON_EXISTING_PKS_SQL_TEMPLATE.format(values=\n ','.join(['({0})'.format(pk) for pk in master_data]\n ), table=model._meta.db_table, pk_field=model._meta\n .pk.attname)\n cursor.execute(sql)\n diff_ids = [r[0] for r in cursor.fetchall()]\n if diff_ids:\n self.stdout.write(self.serialize_out(diff_ids))\n self.stderr.write('PK to delete: {0}'.format(str(\n diff_ids)))\n\n @staticmethod\n def _get_model(first_line):\n cqrs_id = first_line.split(',')[0]\n model = MasterRegistry.get_model_by_cqrs_id(cqrs_id)\n if not model:\n raise CommandError('Wrong CQRS ID: {0}!'.format(cqrs_id))\n return model\n",
"<import token>\n<assignment token>\n\n\nclass Command(BaseCommand):\n help = 'Diff of deleted CQRS models pks from master diff stream.'\n\n @classmethod\n def serialize_out(cls, package):\n return ujson.dumps(package)\n\n @classmethod\n def deserialize_in(cls, package_line):\n return ujson.loads(package_line)\n\n def handle(self, *args, **options):\n with sys.stdin as f:\n first_line = f.readline()\n model = self._get_model(first_line)\n self.stdout.write(first_line.strip())\n with connection.cursor() as cursor:\n for package_line in f:\n master_data = self.deserialize_in(package_line)\n sql = GET_NON_EXISTING_PKS_SQL_TEMPLATE.format(values=\n ','.join(['({0})'.format(pk) for pk in master_data]\n ), table=model._meta.db_table, pk_field=model._meta\n .pk.attname)\n cursor.execute(sql)\n diff_ids = [r[0] for r in cursor.fetchall()]\n if diff_ids:\n self.stdout.write(self.serialize_out(diff_ids))\n self.stderr.write('PK to delete: {0}'.format(str(\n diff_ids)))\n\n @staticmethod\n def _get_model(first_line):\n cqrs_id = first_line.split(',')[0]\n model = MasterRegistry.get_model_by_cqrs_id(cqrs_id)\n if not model:\n raise CommandError('Wrong CQRS ID: {0}!'.format(cqrs_id))\n return model\n",
"<import token>\n<assignment token>\n\n\nclass Command(BaseCommand):\n <assignment token>\n\n @classmethod\n def serialize_out(cls, package):\n return ujson.dumps(package)\n\n @classmethod\n def deserialize_in(cls, package_line):\n return ujson.loads(package_line)\n\n def handle(self, *args, **options):\n with sys.stdin as f:\n first_line = f.readline()\n model = self._get_model(first_line)\n self.stdout.write(first_line.strip())\n with connection.cursor() as cursor:\n for package_line in f:\n master_data = self.deserialize_in(package_line)\n sql = GET_NON_EXISTING_PKS_SQL_TEMPLATE.format(values=\n ','.join(['({0})'.format(pk) for pk in master_data]\n ), table=model._meta.db_table, pk_field=model._meta\n .pk.attname)\n cursor.execute(sql)\n diff_ids = [r[0] for r in cursor.fetchall()]\n if diff_ids:\n self.stdout.write(self.serialize_out(diff_ids))\n self.stderr.write('PK to delete: {0}'.format(str(\n diff_ids)))\n\n @staticmethod\n def _get_model(first_line):\n cqrs_id = first_line.split(',')[0]\n model = MasterRegistry.get_model_by_cqrs_id(cqrs_id)\n if not model:\n raise CommandError('Wrong CQRS ID: {0}!'.format(cqrs_id))\n return model\n",
"<import token>\n<assignment token>\n\n\nclass Command(BaseCommand):\n <assignment token>\n\n @classmethod\n def serialize_out(cls, package):\n return ujson.dumps(package)\n\n @classmethod\n def deserialize_in(cls, package_line):\n return ujson.loads(package_line)\n\n def handle(self, *args, **options):\n with sys.stdin as f:\n first_line = f.readline()\n model = self._get_model(first_line)\n self.stdout.write(first_line.strip())\n with connection.cursor() as cursor:\n for package_line in f:\n master_data = self.deserialize_in(package_line)\n sql = GET_NON_EXISTING_PKS_SQL_TEMPLATE.format(values=\n ','.join(['({0})'.format(pk) for pk in master_data]\n ), table=model._meta.db_table, pk_field=model._meta\n .pk.attname)\n cursor.execute(sql)\n diff_ids = [r[0] for r in cursor.fetchall()]\n if diff_ids:\n self.stdout.write(self.serialize_out(diff_ids))\n self.stderr.write('PK to delete: {0}'.format(str(\n diff_ids)))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Command(BaseCommand):\n <assignment token>\n <function token>\n\n @classmethod\n def deserialize_in(cls, package_line):\n return ujson.loads(package_line)\n\n def handle(self, *args, **options):\n with sys.stdin as f:\n first_line = f.readline()\n model = self._get_model(first_line)\n self.stdout.write(first_line.strip())\n with connection.cursor() as cursor:\n for package_line in f:\n master_data = self.deserialize_in(package_line)\n sql = GET_NON_EXISTING_PKS_SQL_TEMPLATE.format(values=\n ','.join(['({0})'.format(pk) for pk in master_data]\n ), table=model._meta.db_table, pk_field=model._meta\n .pk.attname)\n cursor.execute(sql)\n diff_ids = [r[0] for r in cursor.fetchall()]\n if diff_ids:\n self.stdout.write(self.serialize_out(diff_ids))\n self.stderr.write('PK to delete: {0}'.format(str(\n diff_ids)))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Command(BaseCommand):\n <assignment token>\n <function token>\n <function token>\n\n def handle(self, *args, **options):\n with sys.stdin as f:\n first_line = f.readline()\n model = self._get_model(first_line)\n self.stdout.write(first_line.strip())\n with connection.cursor() as cursor:\n for package_line in f:\n master_data = self.deserialize_in(package_line)\n sql = GET_NON_EXISTING_PKS_SQL_TEMPLATE.format(values=\n ','.join(['({0})'.format(pk) for pk in master_data]\n ), table=model._meta.db_table, pk_field=model._meta\n .pk.attname)\n cursor.execute(sql)\n diff_ids = [r[0] for r in cursor.fetchall()]\n if diff_ids:\n self.stdout.write(self.serialize_out(diff_ids))\n self.stderr.write('PK to delete: {0}'.format(str(\n diff_ids)))\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass Command(BaseCommand):\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n"
] | false |
98,858 |
def30114baa2bfb8064b0f98c9aa7bfa6d8be368
|
#!/usr/bin/python
# remediator_python.py - version 1.55 9/13/07
# Copyright 2007, Jeffrey J. Headd and Robert Immormino
# revision 1.55 - JJH 070808 - added support for DU DNA base
# - JJH 070808 - added compiled RE object for HN2 RES special case
# - JJH 070815 - updated name of hash dictionary file
# - JJH 070823 - added support for CNS Xplor and Coot RNA names
# - JJH 070908 - added REMARK 4 comment addition
# - JJH 070913 - added support for left-justified RNA/DNA old names
# - JJH 070913 - added support for all left-justified residue names
#
# SAS - corrected 22.01.2008 to fix an error: the original script stripped spaces
# at the ends of all raws
# SAS - corrected 04.06.2008 to fix an error: the original script did not change
# coordinate lines with alter codes
import sys
import getopt
import os
import string
import re
masterhash="master_hash.txt"
def usage():
print """
************************************
remediator_python.py: version 1.55 8/8/07
Copyright 2007, Jeffrey J. Headd and Robert Immormino
remediator.py: bug fixes by Sergei Spirin (2008)
For a log of changes, view remediator.py in your favorite text editor
USAGE: remediator_sas.py [--options] input_file > output_file
options:
--help outputs this help message
--pdb takes a .pdb formatted file as input
--old output file will use the PDBv2.3 naming conventions
--remediated output file will use the remediated naming conventions (default)
remediator is generally inteded to convert from PDBv2.3 to PDBv3.0.
This changes files from the pre-wwPDB format into the wwPDB remediated format.
Output is directed to standard out.
EXAMPLE: remediator_sas.py --pdb --old 404D.pdb > 404D_old.pdb
"""
try:
opts, args = getopt.getopt( sys.argv[1:], 'hpor',['help', 'pdb', 'old', 'remediated'] )
except getopt.GetoptError:
usage()
sys.exit()
old_out = False
remediated_out = False
dopdb = False
#dokin = False
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-p", "--pdb"):
dopdb = True
#if o in ("-k", "--kin"):
# dokin = True
if o in ("-o", "--old"):
old_out = True
if o in ("-r", "--remediated"):
remediated_out = True
if len(args) < 1:
sys.stderr.write("\n**REMEDIATOR ERROR: User must specify input filename\n")
sys.exit(usage())
if len(args) > 1:
sys.stderr.write("\n**REMEDIATOR ERROR: too many input files specified\n")
sys.exit(usage())
#if dopdb == True and dokin == True:
# usage()
# sys.exit("REMEDIATOR ERROR: specify only one input file type")
if old_out == True and remediated_out == True:
sys.stderr.write("\n**REMEDIATOR ERROR: cannot output old and remediated names simultaneously\n")
sys.exit(usage())
if dopdb == False:
#print "REMEDIATOR: Assuming PDB input file"
dopdb = True
if old_out == False and remediated_out == False:
remediated_out = True
filename = args[0]
assert os.path.isfile(filename),\
"\n**REMEDIATOR ERROR: cannot find %s" %(filename)
basename = os.path.basename(filename)
#--Build Hash Table------------------------------------------------
atom_exch = {}
f = open(masterhash)
#f = open("master_hash.txt")
if remediated_out == True: #converting to remediated
for line in f:
line=line.rstrip()
new, old = line.split(':')
atom_exch[old] = new
remark4 = "REMARK 4 REMEDIATOR VALIDATED PDB VERSION 3.0 COMPLIANT"
else: #converting to old
for line in f:
new, old = line.split(':')
atom_exch[new] = old
remark4 = "REMARK 4 REMEDIATOR VALIDATED PDB VERSION 2.3 COMPLIANT"
f.close()
#------------------------------------------------------------------
#----PDB routine---------------------------------------------------
previous = None
current = None
print_line = ""
remark_flag = False
pdb_file = open(filename)
aa_re = re.compile(' HN2 (ALA|ARG|ASN|ASP|ASX|CSE|CYS|GLN|GLU|GLX|GLY|HIS|ILE|LEU|LYS|MET|MSE|PHE|PRO|SER|THR|TRP|UNK|TYR|VAL)')
for line in pdb_file:
# line=line.rstrip()
type_test = line[0:6]
if remark_flag == False:
if type_test == "REMARK":
if re.search(remark4,line):
remark_flag = True
elif re.match('REMARK 4 REMEDIATOR',line):
continue
elif int('0' + line[6:10].strip()) > 4:
print_line += remark4 + "\n"
remark_flag = True
if type_test in ("ATOM ", "HETATM", "TER ", "ANISOU", "SIGATM", "SIGUIJ", "LINK "):
if remark_flag == False:
print_line += remark4 + "\n"
remark_flag = True
#--pre-screen for CNS Xplor RNA base names and Coot RNA base names--------
if re.match(r'.{17}(GUA|ADE|CYT|THY|URI)',line):
line = re.sub(r'\A(.{17})(.)..',r'\g<1> \g<2>',line)
elif re.match(r'.{17}(OIP| Ar| Gr| Cr| Ur)',line):
line = re.sub(r'\A(.{17}).(.).',r'\g<1> \g<2>',line)
#-------------------------------------------------------------------------
#REMOVED FROM THE CODE IN FAVOR OF THE GENERIC BLOCK BELOW
#--pre-screen for left-justified RNA/DNA base names-----------------------
#if re.match(r'.{17}(G |A |C |T |U |I )',line):
# line = re.sub(r'\A(.{17})(.)\s\s',r'\g<1> \g<2>',line)
#-------------------------------------------------------------------------
#--make any left-justified residue names right-justified------------------
if re.match(r'.{17}([a-zA-Z]) ',line):
line = re.sub(r'\A(.{17})(.)\s\s',r'\g<1> \g<2>',line)
elif re.match(r'.{17}([a-zA-Z][a-zA-Z]) ',line):
line = re.sub(r'\A(.{17})(..)\s',r'\g<1> \g<2>',line)
#-------------------------------------------------------------------------
entry = line[12:20]
previous = current
current = line[18:26]
clean_entry = entry[0:4] + " " + entry[5:8]
if atom_exch.has_key(clean_entry):
line = string.replace(line,clean_entry[0:4],atom_exch[clean_entry][0:4])
if previous == None:
previous = current
if previous == current:
print_line += line
elif previous != current:
if re.search(r'.\S..[A-Z ] .[ACTGIU]',print_line):
if re.search(r'O2[\'|\*] .',print_line) == None:
DNA_base = previous[1]
if remediated_out == True:
print_line = re.sub(r'(.\S..[A-Z ]) '+DNA_base+' ',r'\g<1> D'+DNA_base+' ',print_line)
print_line = re.sub(r'(TER.{15}) '+DNA_base+' ',r'\g<1>D'+DNA_base+' ',print_line)
elif old_out == True:
print_line = re.sub(r'(.\S..[A-Z ]) D'+DNA_base+' ',r'\g<1> '+DNA_base+' ',print_line)
print_line = re.sub(r'(TER.{15})D'+DNA_base+' ',r'\g<1> '+DNA_base+' ',print_line)
if old_out == True:
m = aa_re.search(print_line)
if m:
res = m.group(1)
if re.search('1H '+res,print_line) or re.search('2H '+res,print_line):
print_line = re.sub(' HN2 '+res,'2H '+res,print_line)
# print_line=print_line.rstrip()
print_line=print_line.rstrip("\r\n")
print print_line
# print print_line[0:-2]
print_line = line
pdb_file.close()
if re.search(r'.\S..[A-Z ] .[ACTGIU]',print_line):
if re.search(r'O2[\'|\*][A-Z ] .',print_line) == None:
DNA_base = previous[1]
if remediated_out == True:
print_line = re.sub(r'(.\S..[A-Z ]) '+DNA_base,r'\g<1> D'+DNA_base,print_line)
print_line = re.sub(r'(TER.{15}) '+DNA_base+' ',r'\g<1>D'+DNA_base+' ',print_line)
elif old_out == True:
print_line = re.sub(r'(.\S..[A-Z ]) D'+DNA_base,r'\g<1> '+DNA_base,print_line)
print_line = re.sub(r'(TER.{15})D'+DNA_base+' ',r'\g<1> '+DNA_base+' ',print_line)
if old_out == True:
m = aa_re.search(print_line)
if m:
res = m.group(1)
if re.search('1H '+res,print_line) or re.search('2H '+res,print_line):
print_line = re.sub(' HN2 '+res,'2H '+res,print_line)
print_line=print_line.rstrip("\r\n")
print print_line
|
[
"#!/usr/bin/python\n# remediator_python.py - version 1.55 9/13/07\n# Copyright 2007, Jeffrey J. Headd and Robert Immormino\n\n# revision 1.55 - JJH 070808 - added support for DU DNA base\n# - JJH 070808 - added compiled RE object for HN2 RES special case\n#\t\t- JJH 070815 - updated name of hash dictionary file\n#\t\t- JJH 070823 - added support for CNS Xplor and Coot RNA names\n#\t\t- JJH 070908 - added REMARK 4 comment addition\n# - JJH 070913 - added support for left-justified RNA/DNA old names\n#\t\t- JJH 070913 - added support for all left-justified residue names\n#\n# SAS - corrected 22.01.2008 to fix an error: the original script stripped spaces \n# at the ends of all raws\n# SAS - corrected 04.06.2008 to fix an error: the original script did not change \n# coordinate lines with alter codes\n\nimport sys\nimport getopt\nimport os\nimport string\nimport re\n\nmasterhash=\"master_hash.txt\"\n\ndef usage():\n\tprint \"\"\"\n\t************************************\n\tremediator_python.py: version 1.55 8/8/07\n\tCopyright 2007, Jeffrey J. Headd and Robert Immormino\n\tremediator.py: bug fixes by Sergei Spirin (2008)\n\tFor a log of changes, view remediator.py in your favorite text editor \n\n\tUSAGE: remediator_sas.py [--options] input_file > output_file\n\n\toptions:\n\t --help\toutputs this help message\n\t --pdb\t\ttakes a .pdb formatted file as input\n\t --old\t \toutput file will use the PDBv2.3 naming conventions\n\t --remediated \toutput file will use the remediated naming conventions (default)\n\n\tremediator is generally inteded to convert from PDBv2.3 to PDBv3.0. \n\tThis changes files from the pre-wwPDB format into the wwPDB remediated format.\n\tOutput is directed to standard out.\n\n\tEXAMPLE: remediator_sas.py --pdb --old 404D.pdb > 404D_old.pdb \n \"\"\"\n\ntry:\n\topts, args = getopt.getopt( sys.argv[1:], 'hpor',['help', 'pdb', 'old', 'remediated'] )\nexcept getopt.GetoptError:\n\tusage()\n\tsys.exit()\n\nold_out = False\nremediated_out = False\ndopdb = False\n#dokin = False\n\nfor o, a in opts:\n\tif o in (\"-h\", \"--help\"):\n\t\tusage()\n\t\tsys.exit()\n\tif o in (\"-p\", \"--pdb\"):\n\t\tdopdb = True\n\t#if o in (\"-k\", \"--kin\"):\n\t#\tdokin = True\n\tif o in (\"-o\", \"--old\"):\n\t\told_out = True\n\tif o in (\"-r\", \"--remediated\"):\n\t\tremediated_out = True\n\nif len(args) < 1:\n\tsys.stderr.write(\"\\n**REMEDIATOR ERROR: User must specify input filename\\n\")\n\tsys.exit(usage())\nif len(args) > 1:\n\tsys.stderr.write(\"\\n**REMEDIATOR ERROR: too many input files specified\\n\")\n\tsys.exit(usage())\n\n#if dopdb == True and dokin == True:\n#\tusage()\n#\tsys.exit(\"REMEDIATOR ERROR: specify only one input file type\")\nif old_out == True and remediated_out == True:\n\tsys.stderr.write(\"\\n**REMEDIATOR ERROR: cannot output old and remediated names simultaneously\\n\")\n\tsys.exit(usage())\n\nif dopdb == False:\n\t#print \"REMEDIATOR: Assuming PDB input file\"\n\tdopdb = True\nif old_out == False and remediated_out == False:\n\tremediated_out = True\n\nfilename = args[0]\nassert os.path.isfile(filename),\\\n\t\"\\n**REMEDIATOR ERROR: cannot find %s\" %(filename)\nbasename = os.path.basename(filename)\n\n#--Build Hash Table------------------------------------------------\natom_exch = {}\nf = open(masterhash)\n#f = open(\"master_hash.txt\")\nif remediated_out == True: #converting to remediated\n\tfor line in f:\n\t\tline=line.rstrip()\n\t\tnew, old = line.split(':')\n\t\tatom_exch[old] = new\n\tremark4 = \"REMARK 4 REMEDIATOR VALIDATED PDB VERSION 3.0 COMPLIANT\"\nelse: #converting to old\n\tfor line in f:\n\t\tnew, old = line.split(':')\n\t\tatom_exch[new] = old\n\tremark4 = \"REMARK 4 REMEDIATOR VALIDATED PDB VERSION 2.3 COMPLIANT\"\nf.close()\n#------------------------------------------------------------------\n\n\n#----PDB routine---------------------------------------------------\n\nprevious = None\ncurrent = None\nprint_line = \"\"\nremark_flag = False\n\npdb_file = open(filename)\n\naa_re = re.compile(' HN2 (ALA|ARG|ASN|ASP|ASX|CSE|CYS|GLN|GLU|GLX|GLY|HIS|ILE|LEU|LYS|MET|MSE|PHE|PRO|SER|THR|TRP|UNK|TYR|VAL)')\n\nfor line in pdb_file:\n#\tline=line.rstrip()\n\ttype_test = line[0:6]\n\tif remark_flag == False:\n\t\tif type_test == \"REMARK\":\n\t\t\tif re.search(remark4,line):\n\t\t\t\tremark_flag = True\n\t\t\telif re.match('REMARK 4 REMEDIATOR',line):\n\t\t\t\tcontinue\n\t\t\telif int('0' + line[6:10].strip()) > 4:\n\t\t\t\tprint_line += remark4 + \"\\n\"\n\t\t\t\tremark_flag = True\n\t\t\t\n\tif type_test in (\"ATOM \", \"HETATM\", \"TER \", \"ANISOU\", \"SIGATM\", \"SIGUIJ\", \"LINK \"):\n\t\tif remark_flag == False:\n\t\t\tprint_line += remark4 + \"\\n\"\n\t\t\tremark_flag = True\n\t\t#--pre-screen for CNS Xplor RNA base names and Coot RNA base names--------\n\t\tif re.match(r'.{17}(GUA|ADE|CYT|THY|URI)',line):\n\t\t\tline = re.sub(r'\\A(.{17})(.)..',r'\\g<1> \\g<2>',line)\n\t\telif re.match(r'.{17}(OIP| Ar| Gr| Cr| Ur)',line):\n\t\t\tline = re.sub(r'\\A(.{17}).(.).',r'\\g<1> \\g<2>',line)\n\t\t#-------------------------------------------------------------------------\n\n\t\t#REMOVED FROM THE CODE IN FAVOR OF THE GENERIC BLOCK BELOW\n #--pre-screen for left-justified RNA/DNA base names-----------------------\n\t\t#if re.match(r'.{17}(G |A |C |T |U |I )',line):\n\t\t#\tline = re.sub(r'\\A(.{17})(.)\\s\\s',r'\\g<1> \\g<2>',line)\n #-------------------------------------------------------------------------\n\t\t\n\t\t#--make any left-justified residue names right-justified------------------\n\t\tif re.match(r'.{17}([a-zA-Z]) ',line):\n\t\t\tline = re.sub(r'\\A(.{17})(.)\\s\\s',r'\\g<1> \\g<2>',line)\n\t\telif re.match(r'.{17}([a-zA-Z][a-zA-Z]) ',line):\n\t\t\tline = re.sub(r'\\A(.{17})(..)\\s',r'\\g<1> \\g<2>',line)\n\t\t#-------------------------------------------------------------------------\n\t\tentry = line[12:20]\n\t\tprevious = current\n\t\tcurrent = line[18:26]\n\t\tclean_entry = entry[0:4] + \" \" + entry[5:8]\n\t\tif atom_exch.has_key(clean_entry):\n\t\t\tline = string.replace(line,clean_entry[0:4],atom_exch[clean_entry][0:4])\n\tif previous == None:\n\t\tprevious = current\n\tif previous == current:\n\t\tprint_line += line\n\telif previous != current:\n\t\tif re.search(r'.\\S..[A-Z ] .[ACTGIU]',print_line):\n\t\t\tif re.search(r'O2[\\'|\\*] .',print_line) == None:\n\t\t\t\tDNA_base = previous[1]\n\t\t\t\tif remediated_out == True:\n\t\t\t\t\tprint_line = re.sub(r'(.\\S..[A-Z ]) '+DNA_base+' ',r'\\g<1> D'+DNA_base+' ',print_line)\n\t\t\t\t\tprint_line = re.sub(r'(TER.{15}) '+DNA_base+' ',r'\\g<1>D'+DNA_base+' ',print_line)\n\t\t\t\telif old_out == True:\n\t\t\t\t\tprint_line = re.sub(r'(.\\S..[A-Z ]) D'+DNA_base+' ',r'\\g<1> '+DNA_base+' ',print_line)\n\t\t\t\t\tprint_line = re.sub(r'(TER.{15})D'+DNA_base+' ',r'\\g<1> '+DNA_base+' ',print_line)\n\t\t\n\t\tif old_out == True:\n\t\t\tm = aa_re.search(print_line)\n\t\t\tif m:\n\t\t\t\tres = m.group(1)\n\t\t\t\tif re.search('1H '+res,print_line) or re.search('2H '+res,print_line):\n\t\t\t\t\tprint_line = re.sub(' HN2 '+res,'2H '+res,print_line)\n#\t\tprint_line=print_line.rstrip()\n\t\tprint_line=print_line.rstrip(\"\\r\\n\")\n\t\tprint print_line\n#\t\tprint print_line[0:-2]\n\t\tprint_line = line\npdb_file.close()\n\nif re.search(r'.\\S..[A-Z ] .[ACTGIU]',print_line):\n\tif re.search(r'O2[\\'|\\*][A-Z ] .',print_line) == None:\n\t\tDNA_base = previous[1]\n\t\tif remediated_out == True:\n\t\t\tprint_line = re.sub(r'(.\\S..[A-Z ]) '+DNA_base,r'\\g<1> D'+DNA_base,print_line)\n\t\t\tprint_line = re.sub(r'(TER.{15}) '+DNA_base+' ',r'\\g<1>D'+DNA_base+' ',print_line)\n\t\telif old_out == True:\n\t\t\tprint_line = re.sub(r'(.\\S..[A-Z ]) D'+DNA_base,r'\\g<1> '+DNA_base,print_line)\n\t\t\tprint_line = re.sub(r'(TER.{15})D'+DNA_base+' ',r'\\g<1> '+DNA_base+' ',print_line)\n\t\n\tif old_out == True:\n\t\tm = aa_re.search(print_line)\n\t\tif m:\n\t\t\tres = m.group(1)\n\t\t\tif re.search('1H '+res,print_line) or re.search('2H '+res,print_line):\n\t\t\t\tprint_line = re.sub(' HN2 '+res,'2H '+res,print_line)\n\nprint_line=print_line.rstrip(\"\\r\\n\")\nprint print_line\n"
] | true |
98,859 |
0d7a6ff4a3a47b3e2dacf7aecc7f3649c4ad0507
|
import pytest
from check_market_maker import best_price
def test_minimal_sell_price():
order_book = {
10.0: 5,
11.0: 5
}
result = best_price(order_book)
assert result == 10.0
def test_maximal_price():
order_book = {
10.0: 5,
11.0: 5
}
result = best_price(order_book, is_buy_price=True)
assert result == 11.0
|
[
"import pytest\nfrom check_market_maker import best_price\n\n\ndef test_minimal_sell_price():\n order_book = {\n 10.0: 5,\n 11.0: 5\n }\n result = best_price(order_book)\n assert result == 10.0\n\n\ndef test_maximal_price():\n order_book = {\n 10.0: 5,\n 11.0: 5\n }\n result = best_price(order_book, is_buy_price=True)\n assert result == 11.0\n",
"import pytest\nfrom check_market_maker import best_price\n\n\ndef test_minimal_sell_price():\n order_book = {(10.0): 5, (11.0): 5}\n result = best_price(order_book)\n assert result == 10.0\n\n\ndef test_maximal_price():\n order_book = {(10.0): 5, (11.0): 5}\n result = best_price(order_book, is_buy_price=True)\n assert result == 11.0\n",
"<import token>\n\n\ndef test_minimal_sell_price():\n order_book = {(10.0): 5, (11.0): 5}\n result = best_price(order_book)\n assert result == 10.0\n\n\ndef test_maximal_price():\n order_book = {(10.0): 5, (11.0): 5}\n result = best_price(order_book, is_buy_price=True)\n assert result == 11.0\n",
"<import token>\n\n\ndef test_minimal_sell_price():\n order_book = {(10.0): 5, (11.0): 5}\n result = best_price(order_book)\n assert result == 10.0\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n"
] | false |
98,860 |
228bea01e8550db4de4bbdeb5add7c7e0e297e85
|
# Sudoku-programming
import cv2
import pytesseract
import imutils
import re
def replace_chars(text):
list_of_numbers = re.findall(r'\d+', text)
result_number = ''.join(list_of_numbers)
return result_number
def xuat(a):
img = cv2.imread(path) #test.png is your original image
img = imutils.resize(img, width=900, height=900)
x = 0
y = -100
fontScale = 2.3
# Blue color in BGR
color = (255, 0, 0)
# Line thickness of 2 px
thickness = 2
for un in a:
print(un)
for row in range(0, 9):
x += 100
y = 0
for col in range (0,9):
y += 100
cv2.putText(img, str(a[int((x-100)/100)][int((y-100)/100)]) , (y-80,x-30), cv2.FONT_HERSHEY_SIMPLEX, fontScale, color, thickness, cv2.LINE_AA)
cv2.imshow("Bai giai Sodoku", img)
cv2.waitKey()
path2 = path.split(".")[0] + "_OUT.jpg"
cv2.imwrite(path2, img)
exit()
def process(k):
# print(k)
while (a[int(k/9)][int(k%9)] != 0):
k = k + 1
# print(k)
i = int(k/9)
j = k%9
# print(i, j)
for x in range(1, 10):
# print(x)
if isOK(i, j, x):
a[i][j] = x
if k == lastK:
# print(k, lastK)
print("Bai giai:")
xuat(a)
break
else:
process(k+1)
a[i][j] = 0
return 0
def isOK(i, j, x):
# print(x)
for t in range(0, 9):
if a[i][t] == x:
return False
for t in range(0, 9):
if a[t][j] == x:
return False
tmpX = i%3
tmpY = j%3
for u in range(i-tmpX, i-tmpX+3):
for t in range(j-tmpY, j-tmpY+3):
if a[u][t] == x:
return False
return True
def findLastK():
for i in range(8, 0, -1):
for j in range(8, 0, -1):
if a[i][j] == 0:
return i*9 + j
return 0
print("Nhap ten anh can giai (bao gom duoi):")
path = str(input())
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
img = cv2.imread(path) #test.png is your original image
img = imutils.resize(img, width=900, height=900)
x = 0
y = -100
digits = []
for i in range(0, 9):
x += 100
y = 0
for j in range (0,9):
y += 100
# print(x, y)
crop = img[x-95:x-20, y-95:y-20]
gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)
thresh = 255 - cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
thresh = cv2.GaussianBlur(thresh, (3,3), 0)
data = pytesseract.image_to_string(thresh, lang='eng', config='--psm 6')
data = replace_chars(data.strip()).strip()
if len(data) == 0:
data = "0"
digits.append(int(data))
# print(data)
# cv2.imshow('crop', thresh)
# cv2.waitKey()
k = -1
a = []
b = []
# print(len(digits))
for i in range(0,9):
for j in range(0, 9):
k+=1
b.append(digits[k])
a.append(b)
b = []
print("Sogoku:")
for un in a:
print(un)
lastK = 0
lastK=int(findLastK())
process(0)
print("De sai hoac anh khong dung yeu cau!")
|
[
"# Sudoku-programming\nimport cv2\nimport pytesseract\nimport imutils\nimport re\n\ndef replace_chars(text):\n list_of_numbers = re.findall(r'\\d+', text)\n result_number = ''.join(list_of_numbers)\n return result_number\n\ndef xuat(a):\n img = cv2.imread(path) #test.png is your original image\n img = imutils.resize(img, width=900, height=900)\n x = 0\n y = -100\n fontScale = 2.3 \n # Blue color in BGR \n color = (255, 0, 0) \n # Line thickness of 2 px \n thickness = 2\n for un in a:\n print(un)\n for row in range(0, 9):\n x += 100\n y = 0\n for col in range (0,9):\n y += 100\n cv2.putText(img, str(a[int((x-100)/100)][int((y-100)/100)]) , (y-80,x-30), cv2.FONT_HERSHEY_SIMPLEX, fontScale, color, thickness, cv2.LINE_AA)\n cv2.imshow(\"Bai giai Sodoku\", img)\n cv2.waitKey()\n path2 = path.split(\".\")[0] + \"_OUT.jpg\"\n cv2.imwrite(path2, img)\n exit()\n\ndef process(k):\n # print(k)\n while (a[int(k/9)][int(k%9)] != 0):\n k = k + 1\n # print(k)\n i = int(k/9)\n j = k%9\n # print(i, j)\n for x in range(1, 10):\n # print(x)\n if isOK(i, j, x):\n a[i][j] = x\n if k == lastK:\n # print(k, lastK)\n print(\"Bai giai:\")\n xuat(a)\n break\n else:\n process(k+1)\n a[i][j] = 0\n\n return 0\n\ndef isOK(i, j, x):\n # print(x)\n for t in range(0, 9):\n if a[i][t] == x:\n return False\n for t in range(0, 9):\n if a[t][j] == x:\n return False\n tmpX = i%3\n tmpY = j%3\n for u in range(i-tmpX, i-tmpX+3):\n for t in range(j-tmpY, j-tmpY+3):\n if a[u][t] == x:\n return False\n return True\n\ndef findLastK():\n for i in range(8, 0, -1):\n for j in range(8, 0, -1):\n if a[i][j] == 0:\n return i*9 + j\n return 0\n\nprint(\"Nhap ten anh can giai (bao gom duoi):\")\npath = str(input())\n\npytesseract.pytesseract.tesseract_cmd = r\"C:\\Program Files\\Tesseract-OCR\\tesseract.exe\"\nimg = cv2.imread(path) #test.png is your original image\nimg = imutils.resize(img, width=900, height=900)\nx = 0\ny = -100\n\ndigits = []\nfor i in range(0, 9):\n x += 100\n y = 0\n for j in range (0,9):\n y += 100\n # print(x, y)\n crop = img[x-95:x-20, y-95:y-20]\n gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)\n thresh = 255 - cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n thresh = cv2.GaussianBlur(thresh, (3,3), 0)\n data = pytesseract.image_to_string(thresh, lang='eng', config='--psm 6')\n data = replace_chars(data.strip()).strip()\n if len(data) == 0:\n data = \"0\"\n digits.append(int(data))\n # print(data)\n # cv2.imshow('crop', thresh)\n # cv2.waitKey()\nk = -1\na = []\nb = []\n# print(len(digits))\nfor i in range(0,9):\n for j in range(0, 9):\n k+=1\n b.append(digits[k])\n a.append(b)\n b = []\n\nprint(\"Sogoku:\")\nfor un in a:\n print(un)\nlastK = 0\nlastK=int(findLastK())\nprocess(0)\nprint(\"De sai hoac anh khong dung yeu cau!\")\n",
"import cv2\nimport pytesseract\nimport imutils\nimport re\n\n\ndef replace_chars(text):\n list_of_numbers = re.findall('\\\\d+', text)\n result_number = ''.join(list_of_numbers)\n return result_number\n\n\ndef xuat(a):\n img = cv2.imread(path)\n img = imutils.resize(img, width=900, height=900)\n x = 0\n y = -100\n fontScale = 2.3\n color = 255, 0, 0\n thickness = 2\n for un in a:\n print(un)\n for row in range(0, 9):\n x += 100\n y = 0\n for col in range(0, 9):\n y += 100\n cv2.putText(img, str(a[int((x - 100) / 100)][int((y - 100) / \n 100)]), (y - 80, x - 30), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale, color, thickness, cv2.LINE_AA)\n cv2.imshow('Bai giai Sodoku', img)\n cv2.waitKey()\n path2 = path.split('.')[0] + '_OUT.jpg'\n cv2.imwrite(path2, img)\n exit()\n\n\ndef process(k):\n while a[int(k / 9)][int(k % 9)] != 0:\n k = k + 1\n i = int(k / 9)\n j = k % 9\n for x in range(1, 10):\n if isOK(i, j, x):\n a[i][j] = x\n if k == lastK:\n print('Bai giai:')\n xuat(a)\n break\n else:\n process(k + 1)\n a[i][j] = 0\n return 0\n\n\ndef isOK(i, j, x):\n for t in range(0, 9):\n if a[i][t] == x:\n return False\n for t in range(0, 9):\n if a[t][j] == x:\n return False\n tmpX = i % 3\n tmpY = j % 3\n for u in range(i - tmpX, i - tmpX + 3):\n for t in range(j - tmpY, j - tmpY + 3):\n if a[u][t] == x:\n return False\n return True\n\n\ndef findLastK():\n for i in range(8, 0, -1):\n for j in range(8, 0, -1):\n if a[i][j] == 0:\n return i * 9 + j\n return 0\n\n\nprint('Nhap ten anh can giai (bao gom duoi):')\npath = str(input())\npytesseract.pytesseract.tesseract_cmd = (\n 'C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe')\nimg = cv2.imread(path)\nimg = imutils.resize(img, width=900, height=900)\nx = 0\ny = -100\ndigits = []\nfor i in range(0, 9):\n x += 100\n y = 0\n for j in range(0, 9):\n y += 100\n crop = img[x - 95:x - 20, y - 95:y - 20]\n gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)\n thresh = 255 - cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV +\n cv2.THRESH_OTSU)[1]\n thresh = cv2.GaussianBlur(thresh, (3, 3), 0)\n data = pytesseract.image_to_string(thresh, lang='eng', config='--psm 6'\n )\n data = replace_chars(data.strip()).strip()\n if len(data) == 0:\n data = '0'\n digits.append(int(data))\nk = -1\na = []\nb = []\nfor i in range(0, 9):\n for j in range(0, 9):\n k += 1\n b.append(digits[k])\n a.append(b)\n b = []\nprint('Sogoku:')\nfor un in a:\n print(un)\nlastK = 0\nlastK = int(findLastK())\nprocess(0)\nprint('De sai hoac anh khong dung yeu cau!')\n",
"<import token>\n\n\ndef replace_chars(text):\n list_of_numbers = re.findall('\\\\d+', text)\n result_number = ''.join(list_of_numbers)\n return result_number\n\n\ndef xuat(a):\n img = cv2.imread(path)\n img = imutils.resize(img, width=900, height=900)\n x = 0\n y = -100\n fontScale = 2.3\n color = 255, 0, 0\n thickness = 2\n for un in a:\n print(un)\n for row in range(0, 9):\n x += 100\n y = 0\n for col in range(0, 9):\n y += 100\n cv2.putText(img, str(a[int((x - 100) / 100)][int((y - 100) / \n 100)]), (y - 80, x - 30), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale, color, thickness, cv2.LINE_AA)\n cv2.imshow('Bai giai Sodoku', img)\n cv2.waitKey()\n path2 = path.split('.')[0] + '_OUT.jpg'\n cv2.imwrite(path2, img)\n exit()\n\n\ndef process(k):\n while a[int(k / 9)][int(k % 9)] != 0:\n k = k + 1\n i = int(k / 9)\n j = k % 9\n for x in range(1, 10):\n if isOK(i, j, x):\n a[i][j] = x\n if k == lastK:\n print('Bai giai:')\n xuat(a)\n break\n else:\n process(k + 1)\n a[i][j] = 0\n return 0\n\n\ndef isOK(i, j, x):\n for t in range(0, 9):\n if a[i][t] == x:\n return False\n for t in range(0, 9):\n if a[t][j] == x:\n return False\n tmpX = i % 3\n tmpY = j % 3\n for u in range(i - tmpX, i - tmpX + 3):\n for t in range(j - tmpY, j - tmpY + 3):\n if a[u][t] == x:\n return False\n return True\n\n\ndef findLastK():\n for i in range(8, 0, -1):\n for j in range(8, 0, -1):\n if a[i][j] == 0:\n return i * 9 + j\n return 0\n\n\nprint('Nhap ten anh can giai (bao gom duoi):')\npath = str(input())\npytesseract.pytesseract.tesseract_cmd = (\n 'C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe')\nimg = cv2.imread(path)\nimg = imutils.resize(img, width=900, height=900)\nx = 0\ny = -100\ndigits = []\nfor i in range(0, 9):\n x += 100\n y = 0\n for j in range(0, 9):\n y += 100\n crop = img[x - 95:x - 20, y - 95:y - 20]\n gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)\n thresh = 255 - cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV +\n cv2.THRESH_OTSU)[1]\n thresh = cv2.GaussianBlur(thresh, (3, 3), 0)\n data = pytesseract.image_to_string(thresh, lang='eng', config='--psm 6'\n )\n data = replace_chars(data.strip()).strip()\n if len(data) == 0:\n data = '0'\n digits.append(int(data))\nk = -1\na = []\nb = []\nfor i in range(0, 9):\n for j in range(0, 9):\n k += 1\n b.append(digits[k])\n a.append(b)\n b = []\nprint('Sogoku:')\nfor un in a:\n print(un)\nlastK = 0\nlastK = int(findLastK())\nprocess(0)\nprint('De sai hoac anh khong dung yeu cau!')\n",
"<import token>\n\n\ndef replace_chars(text):\n list_of_numbers = re.findall('\\\\d+', text)\n result_number = ''.join(list_of_numbers)\n return result_number\n\n\ndef xuat(a):\n img = cv2.imread(path)\n img = imutils.resize(img, width=900, height=900)\n x = 0\n y = -100\n fontScale = 2.3\n color = 255, 0, 0\n thickness = 2\n for un in a:\n print(un)\n for row in range(0, 9):\n x += 100\n y = 0\n for col in range(0, 9):\n y += 100\n cv2.putText(img, str(a[int((x - 100) / 100)][int((y - 100) / \n 100)]), (y - 80, x - 30), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale, color, thickness, cv2.LINE_AA)\n cv2.imshow('Bai giai Sodoku', img)\n cv2.waitKey()\n path2 = path.split('.')[0] + '_OUT.jpg'\n cv2.imwrite(path2, img)\n exit()\n\n\ndef process(k):\n while a[int(k / 9)][int(k % 9)] != 0:\n k = k + 1\n i = int(k / 9)\n j = k % 9\n for x in range(1, 10):\n if isOK(i, j, x):\n a[i][j] = x\n if k == lastK:\n print('Bai giai:')\n xuat(a)\n break\n else:\n process(k + 1)\n a[i][j] = 0\n return 0\n\n\ndef isOK(i, j, x):\n for t in range(0, 9):\n if a[i][t] == x:\n return False\n for t in range(0, 9):\n if a[t][j] == x:\n return False\n tmpX = i % 3\n tmpY = j % 3\n for u in range(i - tmpX, i - tmpX + 3):\n for t in range(j - tmpY, j - tmpY + 3):\n if a[u][t] == x:\n return False\n return True\n\n\ndef findLastK():\n for i in range(8, 0, -1):\n for j in range(8, 0, -1):\n if a[i][j] == 0:\n return i * 9 + j\n return 0\n\n\nprint('Nhap ten anh can giai (bao gom duoi):')\n<assignment token>\nfor i in range(0, 9):\n x += 100\n y = 0\n for j in range(0, 9):\n y += 100\n crop = img[x - 95:x - 20, y - 95:y - 20]\n gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY)\n thresh = 255 - cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV +\n cv2.THRESH_OTSU)[1]\n thresh = cv2.GaussianBlur(thresh, (3, 3), 0)\n data = pytesseract.image_to_string(thresh, lang='eng', config='--psm 6'\n )\n data = replace_chars(data.strip()).strip()\n if len(data) == 0:\n data = '0'\n digits.append(int(data))\n<assignment token>\nfor i in range(0, 9):\n for j in range(0, 9):\n k += 1\n b.append(digits[k])\n a.append(b)\n b = []\nprint('Sogoku:')\nfor un in a:\n print(un)\n<assignment token>\nprocess(0)\nprint('De sai hoac anh khong dung yeu cau!')\n",
"<import token>\n\n\ndef replace_chars(text):\n list_of_numbers = re.findall('\\\\d+', text)\n result_number = ''.join(list_of_numbers)\n return result_number\n\n\ndef xuat(a):\n img = cv2.imread(path)\n img = imutils.resize(img, width=900, height=900)\n x = 0\n y = -100\n fontScale = 2.3\n color = 255, 0, 0\n thickness = 2\n for un in a:\n print(un)\n for row in range(0, 9):\n x += 100\n y = 0\n for col in range(0, 9):\n y += 100\n cv2.putText(img, str(a[int((x - 100) / 100)][int((y - 100) / \n 100)]), (y - 80, x - 30), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale, color, thickness, cv2.LINE_AA)\n cv2.imshow('Bai giai Sodoku', img)\n cv2.waitKey()\n path2 = path.split('.')[0] + '_OUT.jpg'\n cv2.imwrite(path2, img)\n exit()\n\n\ndef process(k):\n while a[int(k / 9)][int(k % 9)] != 0:\n k = k + 1\n i = int(k / 9)\n j = k % 9\n for x in range(1, 10):\n if isOK(i, j, x):\n a[i][j] = x\n if k == lastK:\n print('Bai giai:')\n xuat(a)\n break\n else:\n process(k + 1)\n a[i][j] = 0\n return 0\n\n\ndef isOK(i, j, x):\n for t in range(0, 9):\n if a[i][t] == x:\n return False\n for t in range(0, 9):\n if a[t][j] == x:\n return False\n tmpX = i % 3\n tmpY = j % 3\n for u in range(i - tmpX, i - tmpX + 3):\n for t in range(j - tmpY, j - tmpY + 3):\n if a[u][t] == x:\n return False\n return True\n\n\ndef findLastK():\n for i in range(8, 0, -1):\n for j in range(8, 0, -1):\n if a[i][j] == 0:\n return i * 9 + j\n return 0\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n\n\ndef replace_chars(text):\n list_of_numbers = re.findall('\\\\d+', text)\n result_number = ''.join(list_of_numbers)\n return result_number\n\n\ndef xuat(a):\n img = cv2.imread(path)\n img = imutils.resize(img, width=900, height=900)\n x = 0\n y = -100\n fontScale = 2.3\n color = 255, 0, 0\n thickness = 2\n for un in a:\n print(un)\n for row in range(0, 9):\n x += 100\n y = 0\n for col in range(0, 9):\n y += 100\n cv2.putText(img, str(a[int((x - 100) / 100)][int((y - 100) / \n 100)]), (y - 80, x - 30), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale, color, thickness, cv2.LINE_AA)\n cv2.imshow('Bai giai Sodoku', img)\n cv2.waitKey()\n path2 = path.split('.')[0] + '_OUT.jpg'\n cv2.imwrite(path2, img)\n exit()\n\n\ndef process(k):\n while a[int(k / 9)][int(k % 9)] != 0:\n k = k + 1\n i = int(k / 9)\n j = k % 9\n for x in range(1, 10):\n if isOK(i, j, x):\n a[i][j] = x\n if k == lastK:\n print('Bai giai:')\n xuat(a)\n break\n else:\n process(k + 1)\n a[i][j] = 0\n return 0\n\n\n<function token>\n\n\ndef findLastK():\n for i in range(8, 0, -1):\n for j in range(8, 0, -1):\n if a[i][j] == 0:\n return i * 9 + j\n return 0\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n\n\ndef xuat(a):\n img = cv2.imread(path)\n img = imutils.resize(img, width=900, height=900)\n x = 0\n y = -100\n fontScale = 2.3\n color = 255, 0, 0\n thickness = 2\n for un in a:\n print(un)\n for row in range(0, 9):\n x += 100\n y = 0\n for col in range(0, 9):\n y += 100\n cv2.putText(img, str(a[int((x - 100) / 100)][int((y - 100) / \n 100)]), (y - 80, x - 30), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale, color, thickness, cv2.LINE_AA)\n cv2.imshow('Bai giai Sodoku', img)\n cv2.waitKey()\n path2 = path.split('.')[0] + '_OUT.jpg'\n cv2.imwrite(path2, img)\n exit()\n\n\ndef process(k):\n while a[int(k / 9)][int(k % 9)] != 0:\n k = k + 1\n i = int(k / 9)\n j = k % 9\n for x in range(1, 10):\n if isOK(i, j, x):\n a[i][j] = x\n if k == lastK:\n print('Bai giai:')\n xuat(a)\n break\n else:\n process(k + 1)\n a[i][j] = 0\n return 0\n\n\n<function token>\n\n\ndef findLastK():\n for i in range(8, 0, -1):\n for j in range(8, 0, -1):\n if a[i][j] == 0:\n return i * 9 + j\n return 0\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n\n\ndef xuat(a):\n img = cv2.imread(path)\n img = imutils.resize(img, width=900, height=900)\n x = 0\n y = -100\n fontScale = 2.3\n color = 255, 0, 0\n thickness = 2\n for un in a:\n print(un)\n for row in range(0, 9):\n x += 100\n y = 0\n for col in range(0, 9):\n y += 100\n cv2.putText(img, str(a[int((x - 100) / 100)][int((y - 100) / \n 100)]), (y - 80, x - 30), cv2.FONT_HERSHEY_SIMPLEX,\n fontScale, color, thickness, cv2.LINE_AA)\n cv2.imshow('Bai giai Sodoku', img)\n cv2.waitKey()\n path2 = path.split('.')[0] + '_OUT.jpg'\n cv2.imwrite(path2, img)\n exit()\n\n\ndef process(k):\n while a[int(k / 9)][int(k % 9)] != 0:\n k = k + 1\n i = int(k / 9)\n j = k % 9\n for x in range(1, 10):\n if isOK(i, j, x):\n a[i][j] = x\n if k == lastK:\n print('Bai giai:')\n xuat(a)\n break\n else:\n process(k + 1)\n a[i][j] = 0\n return 0\n\n\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef process(k):\n while a[int(k / 9)][int(k % 9)] != 0:\n k = k + 1\n i = int(k / 9)\n j = k % 9\n for x in range(1, 10):\n if isOK(i, j, x):\n a[i][j] = x\n if k == lastK:\n print('Bai giai:')\n xuat(a)\n break\n else:\n process(k + 1)\n a[i][j] = 0\n return 0\n\n\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,861 |
43ba469a23d155e33aeb96ef40a72c253f49730c
|
# Search Space for DyNet
# NOTE: No Batch_norm since DyNet has not supported batch norm
import dynet as dy
import numpy as np
from deep_architect.helpers.dynet_support import DyParameterCollection, siso_dynet_module
import deep_architect.modules as mo
import deep_architect.hyperparameters as hp
M = DyParameterCollection()
D = hp.Discrete
def flatten():
def compile_fn(di, dh):
shape = di['in'].dim()
n = np.product(shape[0])
Flatten = dy.reshape
def fn(di):
return {'out': Flatten(di['in'], (n,))}
return fn
return siso_dynet_module('Flatten', compile_fn, {})
def dense(h_u):
def compile_fn(di, dh):
shape = di['in'].dim() # ((r, c), batch_dim)
m, n = dh['units'], shape[0][0]
pW = M.get_collection().add_parameters((m, n))
pb = M.get_collection().add_parameters((m, 1))
Dense = dy.affine_transform
def fn(di):
In = di['in']
W, b = pW.expr(), pb.expr()
# return {'out': W*In + b}
return {'out': Dense([b, W, In])}
return fn
return siso_dynet_module('Dense', compile_fn, {'units': h_u})
# just put here to streamline everything
def nonlinearity(h_nonlin_name):
def compile_fn(di, dh):
def fn(di):
nonlin_name = dh['nonlin_name']
if nonlin_name == 'relu':
Out = dy.rectify(di['in'])
elif nonlin_name == 'elu':
Out = dy.elu(di['in'])
elif nonlin_name == 'tanh':
Out = dy.tanh(di['in'])
else:
raise ValueError
return {'out': Out}
return fn
return siso_dynet_module('Nonlinearity', compile_fn,
{'nonlin_name': h_nonlin_name})
def dropout(h_keep_prob):
def compile_fn(di, dh):
p = dh['keep_prop']
Dropout = dy.dropout
def fn(di):
return {'out': Dropout(di['in'], p)}
return fn
return siso_dynet_module('Dropout', compile_fn, {'keep_prop': h_keep_prob})
def dnn_net_simple(num_classes):
# declaring hyperparameter
h_nonlin_name = D(['relu', 'tanh',
'elu']) # nonlinearity function names to choose from
h_opt_drop = D(
[0, 1]) # dropout optional hyperparameter; 0 is exclude, 1 is include
h_drop_keep_prob = D([0.25, 0.5,
0.75]) # dropout probability to choose from
h_num_hidden = D([64, 128, 256, 512, 1024
]) # number of hidden units for affine transform module
h_num_repeats = D([1, 2]) # 1 is appearing once, 2 is appearing twice
# defining search space topology
model = mo.siso_sequential([
flatten(),
mo.siso_repeat(
lambda: mo.siso_sequential([
dense(h_num_hidden),
nonlinearity(h_nonlin_name),
mo.siso_optional(lambda: dropout(h_drop_keep_prob), h_opt_drop),
]), h_num_repeats),
dense(D([num_classes]))
])
return model
def dnn_cell(h_num_hidden, h_nonlin_name, h_opt_drop, h_drop_keep_prob):
return mo.siso_sequential([
dense(h_num_hidden),
nonlinearity(h_nonlin_name),
mo.siso_optional(lambda: dropout(h_drop_keep_prob), h_opt_drop)
])
def dnn_net(num_classes):
h_nonlin_name = D(['relu', 'tanh', 'elu'])
h_opt_drop = D([0, 1])
return mo.siso_sequential([
flatten(),
mo.siso_repeat(
lambda: dnn_cell(D([64, 128, 256, 512, 1024]), h_nonlin_name,
h_opt_drop, D([0.25, 0.5, 0.75])), D([1, 2])),
dense(D([num_classes]))
])
# Main/Searcher
# Getting and reading mnist data adapted from here:
# https://github.com/clab/dynet/blob/master/examples/mnist/mnist-autobatch.py
import deep_architect.searchers.random as se
import deep_architect.core as co
from deep_architect.contrib.misc.datasets.loaders import load_mnist
def get_search_space(num_classes):
def fn():
co.Scope.reset_default_scope()
inputs, outputs = dnn_net(num_classes)
return inputs, outputs, {}
return fn
def main():
num_classes = 10
num_samples = 3 # number of architecture to sample
best_val_acc, best_architecture = 0., -1
# donwload and normalize data, using test as val for simplicity
X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',
normalize_range=True)
# defining evaluator
evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val),
num_classes,
max_num_training_epochs=5,
log_output_to_terminal=True)
searcher = se.RandomSearcher(get_search_space(num_classes))
for i in xrange(num_samples):
print("Sampling architecture %d" % i)
M.renew_collection()
inputs, outputs, _, searcher_eval_token = searcher.sample()
val_acc = evaluator.evaluate(
inputs,
outputs)['val_acc'] # evaluate and return validation accuracy
print("Finished evaluating architecture %d, validation accuracy is %f" %
(i, val_acc))
if val_acc > best_val_acc:
best_val_acc = val_acc
best_architecture = i
searcher.update(val_acc, searcher_eval_token)
print("Best validation accuracy is %f with architecture %d" %
(best_val_acc, best_architecture))
# Evaluator
import random
class SimpleClassifierEvaluator:
def __init__(self,
train_dataset,
val_dataset,
num_classes,
max_num_training_epochs=10,
batch_size=16,
learning_rate=1e-3,
display_step=1,
log_output_to_terminal=True):
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.num_classes = num_classes
self.max_num_training_epochs = max_num_training_epochs
self.learning_rate = learning_rate
self.batch_size = batch_size
self.log_output_to_terminal = log_output_to_terminal
self.display_step = display_step
def compute_accuracy(self, inputs, outputs):
correct = 0
for (label, img) in self.val_dataset:
dy.renew_cg()
x = dy.inputVector(img)
co.forward({inputs['in']: x})
logits = outputs['out'].val
pred = np.argmax(logits.npvalue())
if (label == pred): correct += 1
return (1.0 * correct / len(self.val_dataset))
def evaluate(self, inputs, outputs):
params = M.get_collection()
optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)
num_batches = int(len(self.train_dataset) / self.batch_size)
for epoch in range(self.max_num_training_epochs):
random.shuffle(self.train_dataset)
i = 0
total_loss = 0
while (i < len(self.train_dataset)):
dy.renew_cg()
mbsize = min(self.batch_size, len(self.train_dataset) - i)
minibatch = self.train_dataset[i:i + mbsize]
losses = []
for (label, img) in minibatch:
x = dy.inputVector(img)
co.forward({inputs['in']: x})
logits = outputs['out'].val
loss = dy.pickneglogsoftmax(logits, label)
losses.append(loss)
mbloss = dy.esum(losses) / mbsize
mbloss.backward()
optimizer.update()
total_loss += mbloss.scalar_value()
i += mbsize
val_acc = self.compute_accuracy(inputs, outputs)
if self.log_output_to_terminal and epoch % self.display_step == 0:
print("epoch:", '%d' % (epoch + 1), "loss:",
"{:.9f}".format(total_loss / num_batches),
"validation_accuracy:", "%.5f" % val_acc)
val_acc = self.compute_accuracy(inputs, outputs)
return {'val_acc': val_acc}
if __name__ == "__main__":
main()
|
[
"# Search Space for DyNet\n# NOTE: No Batch_norm since DyNet has not supported batch norm\n\nimport dynet as dy\nimport numpy as np\n\nfrom deep_architect.helpers.dynet_support import DyParameterCollection, siso_dynet_module\nimport deep_architect.modules as mo\nimport deep_architect.hyperparameters as hp\n\nM = DyParameterCollection()\nD = hp.Discrete\n\n\ndef flatten():\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n n = np.product(shape[0])\n Flatten = dy.reshape\n\n def fn(di):\n return {'out': Flatten(di['in'], (n,))}\n\n return fn\n\n return siso_dynet_module('Flatten', compile_fn, {})\n\n\ndef dense(h_u):\n\n def compile_fn(di, dh):\n shape = di['in'].dim() # ((r, c), batch_dim)\n m, n = dh['units'], shape[0][0]\n pW = M.get_collection().add_parameters((m, n))\n pb = M.get_collection().add_parameters((m, 1))\n Dense = dy.affine_transform\n\n def fn(di):\n In = di['in']\n W, b = pW.expr(), pb.expr()\n # return {'out': W*In + b}\n return {'out': Dense([b, W, In])}\n\n return fn\n\n return siso_dynet_module('Dense', compile_fn, {'units': h_u})\n\n\n# just put here to streamline everything\ndef nonlinearity(h_nonlin_name):\n\n def compile_fn(di, dh):\n\n def fn(di):\n nonlin_name = dh['nonlin_name']\n if nonlin_name == 'relu':\n Out = dy.rectify(di['in'])\n elif nonlin_name == 'elu':\n Out = dy.elu(di['in'])\n elif nonlin_name == 'tanh':\n Out = dy.tanh(di['in'])\n else:\n raise ValueError\n return {'out': Out}\n\n return fn\n\n return siso_dynet_module('Nonlinearity', compile_fn,\n {'nonlin_name': h_nonlin_name})\n\n\ndef dropout(h_keep_prob):\n\n def compile_fn(di, dh):\n p = dh['keep_prop']\n Dropout = dy.dropout\n\n def fn(di):\n return {'out': Dropout(di['in'], p)}\n\n return fn\n\n return siso_dynet_module('Dropout', compile_fn, {'keep_prop': h_keep_prob})\n\n\ndef dnn_net_simple(num_classes):\n\n # declaring hyperparameter\n h_nonlin_name = D(['relu', 'tanh',\n 'elu']) # nonlinearity function names to choose from\n h_opt_drop = D(\n [0, 1]) # dropout optional hyperparameter; 0 is exclude, 1 is include\n h_drop_keep_prob = D([0.25, 0.5,\n 0.75]) # dropout probability to choose from\n h_num_hidden = D([64, 128, 256, 512, 1024\n ]) # number of hidden units for affine transform module\n h_num_repeats = D([1, 2]) # 1 is appearing once, 2 is appearing twice\n\n # defining search space topology\n model = mo.siso_sequential([\n flatten(),\n mo.siso_repeat(\n lambda: mo.siso_sequential([\n dense(h_num_hidden),\n nonlinearity(h_nonlin_name),\n mo.siso_optional(lambda: dropout(h_drop_keep_prob), h_opt_drop),\n ]), h_num_repeats),\n dense(D([num_classes]))\n ])\n\n return model\n\n\ndef dnn_cell(h_num_hidden, h_nonlin_name, h_opt_drop, h_drop_keep_prob):\n return mo.siso_sequential([\n dense(h_num_hidden),\n nonlinearity(h_nonlin_name),\n mo.siso_optional(lambda: dropout(h_drop_keep_prob), h_opt_drop)\n ])\n\n\ndef dnn_net(num_classes):\n h_nonlin_name = D(['relu', 'tanh', 'elu'])\n h_opt_drop = D([0, 1])\n return mo.siso_sequential([\n flatten(),\n mo.siso_repeat(\n lambda: dnn_cell(D([64, 128, 256, 512, 1024]), h_nonlin_name,\n h_opt_drop, D([0.25, 0.5, 0.75])), D([1, 2])),\n dense(D([num_classes]))\n ])\n\n\n# Main/Searcher\n# Getting and reading mnist data adapted from here:\n# https://github.com/clab/dynet/blob/master/examples/mnist/mnist-autobatch.py\nimport deep_architect.searchers.random as se\nimport deep_architect.core as co\nfrom deep_architect.contrib.misc.datasets.loaders import load_mnist\n\n\ndef get_search_space(num_classes):\n\n def fn():\n co.Scope.reset_default_scope()\n inputs, outputs = dnn_net(num_classes)\n return inputs, outputs, {}\n\n return fn\n\n\ndef main():\n\n num_classes = 10\n num_samples = 3 # number of architecture to sample\n best_val_acc, best_architecture = 0., -1\n\n # donwload and normalize data, using test as val for simplicity\n X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',\n normalize_range=True)\n\n # defining evaluator\n evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val),\n num_classes,\n max_num_training_epochs=5,\n log_output_to_terminal=True)\n searcher = se.RandomSearcher(get_search_space(num_classes))\n for i in xrange(num_samples):\n print(\"Sampling architecture %d\" % i)\n M.renew_collection()\n inputs, outputs, _, searcher_eval_token = searcher.sample()\n val_acc = evaluator.evaluate(\n inputs,\n outputs)['val_acc'] # evaluate and return validation accuracy\n print(\"Finished evaluating architecture %d, validation accuracy is %f\" %\n (i, val_acc))\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_architecture = i\n searcher.update(val_acc, searcher_eval_token)\n print(\"Best validation accuracy is %f with architecture %d\" %\n (best_val_acc, best_architecture))\n\n\n# Evaluator\nimport random\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self,\n train_dataset,\n val_dataset,\n num_classes,\n max_num_training_epochs=10,\n batch_size=16,\n learning_rate=1e-3,\n display_step=1,\n log_output_to_terminal=True):\n\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n\n def compute_accuracy(self, inputs, outputs):\n correct = 0\n for (label, img) in self.val_dataset:\n dy.renew_cg()\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n pred = np.argmax(logits.npvalue())\n if (label == pred): correct += 1\n return (1.0 * correct / len(self.val_dataset))\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while (i < len(self.train_dataset)):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for (label, img) in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print(\"epoch:\", '%d' % (epoch + 1), \"loss:\",\n \"{:.9f}\".format(total_loss / num_batches),\n \"validation_accuracy:\", \"%.5f\" % val_acc)\n\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\nif __name__ == \"__main__\":\n main()",
"import dynet as dy\nimport numpy as np\nfrom deep_architect.helpers.dynet_support import DyParameterCollection, siso_dynet_module\nimport deep_architect.modules as mo\nimport deep_architect.hyperparameters as hp\nM = DyParameterCollection()\nD = hp.Discrete\n\n\ndef flatten():\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n n = np.product(shape[0])\n Flatten = dy.reshape\n\n def fn(di):\n return {'out': Flatten(di['in'], (n,))}\n return fn\n return siso_dynet_module('Flatten', compile_fn, {})\n\n\ndef dense(h_u):\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n m, n = dh['units'], shape[0][0]\n pW = M.get_collection().add_parameters((m, n))\n pb = M.get_collection().add_parameters((m, 1))\n Dense = dy.affine_transform\n\n def fn(di):\n In = di['in']\n W, b = pW.expr(), pb.expr()\n return {'out': Dense([b, W, In])}\n return fn\n return siso_dynet_module('Dense', compile_fn, {'units': h_u})\n\n\ndef nonlinearity(h_nonlin_name):\n\n def compile_fn(di, dh):\n\n def fn(di):\n nonlin_name = dh['nonlin_name']\n if nonlin_name == 'relu':\n Out = dy.rectify(di['in'])\n elif nonlin_name == 'elu':\n Out = dy.elu(di['in'])\n elif nonlin_name == 'tanh':\n Out = dy.tanh(di['in'])\n else:\n raise ValueError\n return {'out': Out}\n return fn\n return siso_dynet_module('Nonlinearity', compile_fn, {'nonlin_name':\n h_nonlin_name})\n\n\ndef dropout(h_keep_prob):\n\n def compile_fn(di, dh):\n p = dh['keep_prop']\n Dropout = dy.dropout\n\n def fn(di):\n return {'out': Dropout(di['in'], p)}\n return fn\n return siso_dynet_module('Dropout', compile_fn, {'keep_prop': h_keep_prob})\n\n\ndef dnn_net_simple(num_classes):\n h_nonlin_name = D(['relu', 'tanh', 'elu'])\n h_opt_drop = D([0, 1])\n h_drop_keep_prob = D([0.25, 0.5, 0.75])\n h_num_hidden = D([64, 128, 256, 512, 1024])\n h_num_repeats = D([1, 2])\n model = mo.siso_sequential([flatten(), mo.siso_repeat(lambda : mo.\n siso_sequential([dense(h_num_hidden), nonlinearity(h_nonlin_name),\n mo.siso_optional(lambda : dropout(h_drop_keep_prob), h_opt_drop)]),\n h_num_repeats), dense(D([num_classes]))])\n return model\n\n\ndef dnn_cell(h_num_hidden, h_nonlin_name, h_opt_drop, h_drop_keep_prob):\n return mo.siso_sequential([dense(h_num_hidden), nonlinearity(\n h_nonlin_name), mo.siso_optional(lambda : dropout(h_drop_keep_prob),\n h_opt_drop)])\n\n\ndef dnn_net(num_classes):\n h_nonlin_name = D(['relu', 'tanh', 'elu'])\n h_opt_drop = D([0, 1])\n return mo.siso_sequential([flatten(), mo.siso_repeat(lambda : dnn_cell(\n D([64, 128, 256, 512, 1024]), h_nonlin_name, h_opt_drop, D([0.25, \n 0.5, 0.75])), D([1, 2])), dense(D([num_classes]))])\n\n\nimport deep_architect.searchers.random as se\nimport deep_architect.core as co\nfrom deep_architect.contrib.misc.datasets.loaders import load_mnist\n\n\ndef get_search_space(num_classes):\n\n def fn():\n co.Scope.reset_default_scope()\n inputs, outputs = dnn_net(num_classes)\n return inputs, outputs, {}\n return fn\n\n\ndef main():\n num_classes = 10\n num_samples = 3\n best_val_acc, best_architecture = 0.0, -1\n X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',\n normalize_range=True)\n evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val\n ), num_classes, max_num_training_epochs=5, log_output_to_terminal=True)\n searcher = se.RandomSearcher(get_search_space(num_classes))\n for i in xrange(num_samples):\n print('Sampling architecture %d' % i)\n M.renew_collection()\n inputs, outputs, _, searcher_eval_token = searcher.sample()\n val_acc = evaluator.evaluate(inputs, outputs)['val_acc']\n print(\n 'Finished evaluating architecture %d, validation accuracy is %f' %\n (i, val_acc))\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_architecture = i\n searcher.update(val_acc, searcher_eval_token)\n print('Best validation accuracy is %f with architecture %d' % (\n best_val_acc, best_architecture))\n\n\nimport random\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self, train_dataset, val_dataset, num_classes,\n max_num_training_epochs=10, batch_size=16, learning_rate=0.001,\n display_step=1, log_output_to_terminal=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n\n def compute_accuracy(self, inputs, outputs):\n correct = 0\n for label, img in self.val_dataset:\n dy.renew_cg()\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n pred = np.argmax(logits.npvalue())\n if label == pred:\n correct += 1\n return 1.0 * correct / len(self.val_dataset)\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\nM = DyParameterCollection()\nD = hp.Discrete\n\n\ndef flatten():\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n n = np.product(shape[0])\n Flatten = dy.reshape\n\n def fn(di):\n return {'out': Flatten(di['in'], (n,))}\n return fn\n return siso_dynet_module('Flatten', compile_fn, {})\n\n\ndef dense(h_u):\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n m, n = dh['units'], shape[0][0]\n pW = M.get_collection().add_parameters((m, n))\n pb = M.get_collection().add_parameters((m, 1))\n Dense = dy.affine_transform\n\n def fn(di):\n In = di['in']\n W, b = pW.expr(), pb.expr()\n return {'out': Dense([b, W, In])}\n return fn\n return siso_dynet_module('Dense', compile_fn, {'units': h_u})\n\n\ndef nonlinearity(h_nonlin_name):\n\n def compile_fn(di, dh):\n\n def fn(di):\n nonlin_name = dh['nonlin_name']\n if nonlin_name == 'relu':\n Out = dy.rectify(di['in'])\n elif nonlin_name == 'elu':\n Out = dy.elu(di['in'])\n elif nonlin_name == 'tanh':\n Out = dy.tanh(di['in'])\n else:\n raise ValueError\n return {'out': Out}\n return fn\n return siso_dynet_module('Nonlinearity', compile_fn, {'nonlin_name':\n h_nonlin_name})\n\n\ndef dropout(h_keep_prob):\n\n def compile_fn(di, dh):\n p = dh['keep_prop']\n Dropout = dy.dropout\n\n def fn(di):\n return {'out': Dropout(di['in'], p)}\n return fn\n return siso_dynet_module('Dropout', compile_fn, {'keep_prop': h_keep_prob})\n\n\ndef dnn_net_simple(num_classes):\n h_nonlin_name = D(['relu', 'tanh', 'elu'])\n h_opt_drop = D([0, 1])\n h_drop_keep_prob = D([0.25, 0.5, 0.75])\n h_num_hidden = D([64, 128, 256, 512, 1024])\n h_num_repeats = D([1, 2])\n model = mo.siso_sequential([flatten(), mo.siso_repeat(lambda : mo.\n siso_sequential([dense(h_num_hidden), nonlinearity(h_nonlin_name),\n mo.siso_optional(lambda : dropout(h_drop_keep_prob), h_opt_drop)]),\n h_num_repeats), dense(D([num_classes]))])\n return model\n\n\ndef dnn_cell(h_num_hidden, h_nonlin_name, h_opt_drop, h_drop_keep_prob):\n return mo.siso_sequential([dense(h_num_hidden), nonlinearity(\n h_nonlin_name), mo.siso_optional(lambda : dropout(h_drop_keep_prob),\n h_opt_drop)])\n\n\ndef dnn_net(num_classes):\n h_nonlin_name = D(['relu', 'tanh', 'elu'])\n h_opt_drop = D([0, 1])\n return mo.siso_sequential([flatten(), mo.siso_repeat(lambda : dnn_cell(\n D([64, 128, 256, 512, 1024]), h_nonlin_name, h_opt_drop, D([0.25, \n 0.5, 0.75])), D([1, 2])), dense(D([num_classes]))])\n\n\n<import token>\n\n\ndef get_search_space(num_classes):\n\n def fn():\n co.Scope.reset_default_scope()\n inputs, outputs = dnn_net(num_classes)\n return inputs, outputs, {}\n return fn\n\n\ndef main():\n num_classes = 10\n num_samples = 3\n best_val_acc, best_architecture = 0.0, -1\n X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',\n normalize_range=True)\n evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val\n ), num_classes, max_num_training_epochs=5, log_output_to_terminal=True)\n searcher = se.RandomSearcher(get_search_space(num_classes))\n for i in xrange(num_samples):\n print('Sampling architecture %d' % i)\n M.renew_collection()\n inputs, outputs, _, searcher_eval_token = searcher.sample()\n val_acc = evaluator.evaluate(inputs, outputs)['val_acc']\n print(\n 'Finished evaluating architecture %d, validation accuracy is %f' %\n (i, val_acc))\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_architecture = i\n searcher.update(val_acc, searcher_eval_token)\n print('Best validation accuracy is %f with architecture %d' % (\n best_val_acc, best_architecture))\n\n\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self, train_dataset, val_dataset, num_classes,\n max_num_training_epochs=10, batch_size=16, learning_rate=0.001,\n display_step=1, log_output_to_terminal=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n\n def compute_accuracy(self, inputs, outputs):\n correct = 0\n for label, img in self.val_dataset:\n dy.renew_cg()\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n pred = np.argmax(logits.npvalue())\n if label == pred:\n correct += 1\n return 1.0 * correct / len(self.val_dataset)\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<assignment token>\n\n\ndef flatten():\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n n = np.product(shape[0])\n Flatten = dy.reshape\n\n def fn(di):\n return {'out': Flatten(di['in'], (n,))}\n return fn\n return siso_dynet_module('Flatten', compile_fn, {})\n\n\ndef dense(h_u):\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n m, n = dh['units'], shape[0][0]\n pW = M.get_collection().add_parameters((m, n))\n pb = M.get_collection().add_parameters((m, 1))\n Dense = dy.affine_transform\n\n def fn(di):\n In = di['in']\n W, b = pW.expr(), pb.expr()\n return {'out': Dense([b, W, In])}\n return fn\n return siso_dynet_module('Dense', compile_fn, {'units': h_u})\n\n\ndef nonlinearity(h_nonlin_name):\n\n def compile_fn(di, dh):\n\n def fn(di):\n nonlin_name = dh['nonlin_name']\n if nonlin_name == 'relu':\n Out = dy.rectify(di['in'])\n elif nonlin_name == 'elu':\n Out = dy.elu(di['in'])\n elif nonlin_name == 'tanh':\n Out = dy.tanh(di['in'])\n else:\n raise ValueError\n return {'out': Out}\n return fn\n return siso_dynet_module('Nonlinearity', compile_fn, {'nonlin_name':\n h_nonlin_name})\n\n\ndef dropout(h_keep_prob):\n\n def compile_fn(di, dh):\n p = dh['keep_prop']\n Dropout = dy.dropout\n\n def fn(di):\n return {'out': Dropout(di['in'], p)}\n return fn\n return siso_dynet_module('Dropout', compile_fn, {'keep_prop': h_keep_prob})\n\n\ndef dnn_net_simple(num_classes):\n h_nonlin_name = D(['relu', 'tanh', 'elu'])\n h_opt_drop = D([0, 1])\n h_drop_keep_prob = D([0.25, 0.5, 0.75])\n h_num_hidden = D([64, 128, 256, 512, 1024])\n h_num_repeats = D([1, 2])\n model = mo.siso_sequential([flatten(), mo.siso_repeat(lambda : mo.\n siso_sequential([dense(h_num_hidden), nonlinearity(h_nonlin_name),\n mo.siso_optional(lambda : dropout(h_drop_keep_prob), h_opt_drop)]),\n h_num_repeats), dense(D([num_classes]))])\n return model\n\n\ndef dnn_cell(h_num_hidden, h_nonlin_name, h_opt_drop, h_drop_keep_prob):\n return mo.siso_sequential([dense(h_num_hidden), nonlinearity(\n h_nonlin_name), mo.siso_optional(lambda : dropout(h_drop_keep_prob),\n h_opt_drop)])\n\n\ndef dnn_net(num_classes):\n h_nonlin_name = D(['relu', 'tanh', 'elu'])\n h_opt_drop = D([0, 1])\n return mo.siso_sequential([flatten(), mo.siso_repeat(lambda : dnn_cell(\n D([64, 128, 256, 512, 1024]), h_nonlin_name, h_opt_drop, D([0.25, \n 0.5, 0.75])), D([1, 2])), dense(D([num_classes]))])\n\n\n<import token>\n\n\ndef get_search_space(num_classes):\n\n def fn():\n co.Scope.reset_default_scope()\n inputs, outputs = dnn_net(num_classes)\n return inputs, outputs, {}\n return fn\n\n\ndef main():\n num_classes = 10\n num_samples = 3\n best_val_acc, best_architecture = 0.0, -1\n X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',\n normalize_range=True)\n evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val\n ), num_classes, max_num_training_epochs=5, log_output_to_terminal=True)\n searcher = se.RandomSearcher(get_search_space(num_classes))\n for i in xrange(num_samples):\n print('Sampling architecture %d' % i)\n M.renew_collection()\n inputs, outputs, _, searcher_eval_token = searcher.sample()\n val_acc = evaluator.evaluate(inputs, outputs)['val_acc']\n print(\n 'Finished evaluating architecture %d, validation accuracy is %f' %\n (i, val_acc))\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_architecture = i\n searcher.update(val_acc, searcher_eval_token)\n print('Best validation accuracy is %f with architecture %d' % (\n best_val_acc, best_architecture))\n\n\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self, train_dataset, val_dataset, num_classes,\n max_num_training_epochs=10, batch_size=16, learning_rate=0.001,\n display_step=1, log_output_to_terminal=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n\n def compute_accuracy(self, inputs, outputs):\n correct = 0\n for label, img in self.val_dataset:\n dy.renew_cg()\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n pred = np.argmax(logits.npvalue())\n if label == pred:\n correct += 1\n return 1.0 * correct / len(self.val_dataset)\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<assignment token>\n\n\ndef flatten():\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n n = np.product(shape[0])\n Flatten = dy.reshape\n\n def fn(di):\n return {'out': Flatten(di['in'], (n,))}\n return fn\n return siso_dynet_module('Flatten', compile_fn, {})\n\n\ndef dense(h_u):\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n m, n = dh['units'], shape[0][0]\n pW = M.get_collection().add_parameters((m, n))\n pb = M.get_collection().add_parameters((m, 1))\n Dense = dy.affine_transform\n\n def fn(di):\n In = di['in']\n W, b = pW.expr(), pb.expr()\n return {'out': Dense([b, W, In])}\n return fn\n return siso_dynet_module('Dense', compile_fn, {'units': h_u})\n\n\ndef nonlinearity(h_nonlin_name):\n\n def compile_fn(di, dh):\n\n def fn(di):\n nonlin_name = dh['nonlin_name']\n if nonlin_name == 'relu':\n Out = dy.rectify(di['in'])\n elif nonlin_name == 'elu':\n Out = dy.elu(di['in'])\n elif nonlin_name == 'tanh':\n Out = dy.tanh(di['in'])\n else:\n raise ValueError\n return {'out': Out}\n return fn\n return siso_dynet_module('Nonlinearity', compile_fn, {'nonlin_name':\n h_nonlin_name})\n\n\ndef dropout(h_keep_prob):\n\n def compile_fn(di, dh):\n p = dh['keep_prop']\n Dropout = dy.dropout\n\n def fn(di):\n return {'out': Dropout(di['in'], p)}\n return fn\n return siso_dynet_module('Dropout', compile_fn, {'keep_prop': h_keep_prob})\n\n\ndef dnn_net_simple(num_classes):\n h_nonlin_name = D(['relu', 'tanh', 'elu'])\n h_opt_drop = D([0, 1])\n h_drop_keep_prob = D([0.25, 0.5, 0.75])\n h_num_hidden = D([64, 128, 256, 512, 1024])\n h_num_repeats = D([1, 2])\n model = mo.siso_sequential([flatten(), mo.siso_repeat(lambda : mo.\n siso_sequential([dense(h_num_hidden), nonlinearity(h_nonlin_name),\n mo.siso_optional(lambda : dropout(h_drop_keep_prob), h_opt_drop)]),\n h_num_repeats), dense(D([num_classes]))])\n return model\n\n\ndef dnn_cell(h_num_hidden, h_nonlin_name, h_opt_drop, h_drop_keep_prob):\n return mo.siso_sequential([dense(h_num_hidden), nonlinearity(\n h_nonlin_name), mo.siso_optional(lambda : dropout(h_drop_keep_prob),\n h_opt_drop)])\n\n\ndef dnn_net(num_classes):\n h_nonlin_name = D(['relu', 'tanh', 'elu'])\n h_opt_drop = D([0, 1])\n return mo.siso_sequential([flatten(), mo.siso_repeat(lambda : dnn_cell(\n D([64, 128, 256, 512, 1024]), h_nonlin_name, h_opt_drop, D([0.25, \n 0.5, 0.75])), D([1, 2])), dense(D([num_classes]))])\n\n\n<import token>\n\n\ndef get_search_space(num_classes):\n\n def fn():\n co.Scope.reset_default_scope()\n inputs, outputs = dnn_net(num_classes)\n return inputs, outputs, {}\n return fn\n\n\ndef main():\n num_classes = 10\n num_samples = 3\n best_val_acc, best_architecture = 0.0, -1\n X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',\n normalize_range=True)\n evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val\n ), num_classes, max_num_training_epochs=5, log_output_to_terminal=True)\n searcher = se.RandomSearcher(get_search_space(num_classes))\n for i in xrange(num_samples):\n print('Sampling architecture %d' % i)\n M.renew_collection()\n inputs, outputs, _, searcher_eval_token = searcher.sample()\n val_acc = evaluator.evaluate(inputs, outputs)['val_acc']\n print(\n 'Finished evaluating architecture %d, validation accuracy is %f' %\n (i, val_acc))\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_architecture = i\n searcher.update(val_acc, searcher_eval_token)\n print('Best validation accuracy is %f with architecture %d' % (\n best_val_acc, best_architecture))\n\n\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self, train_dataset, val_dataset, num_classes,\n max_num_training_epochs=10, batch_size=16, learning_rate=0.001,\n display_step=1, log_output_to_terminal=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n\n def compute_accuracy(self, inputs, outputs):\n correct = 0\n for label, img in self.val_dataset:\n dy.renew_cg()\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n pred = np.argmax(logits.npvalue())\n if label == pred:\n correct += 1\n return 1.0 * correct / len(self.val_dataset)\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef flatten():\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n n = np.product(shape[0])\n Flatten = dy.reshape\n\n def fn(di):\n return {'out': Flatten(di['in'], (n,))}\n return fn\n return siso_dynet_module('Flatten', compile_fn, {})\n\n\ndef dense(h_u):\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n m, n = dh['units'], shape[0][0]\n pW = M.get_collection().add_parameters((m, n))\n pb = M.get_collection().add_parameters((m, 1))\n Dense = dy.affine_transform\n\n def fn(di):\n In = di['in']\n W, b = pW.expr(), pb.expr()\n return {'out': Dense([b, W, In])}\n return fn\n return siso_dynet_module('Dense', compile_fn, {'units': h_u})\n\n\ndef nonlinearity(h_nonlin_name):\n\n def compile_fn(di, dh):\n\n def fn(di):\n nonlin_name = dh['nonlin_name']\n if nonlin_name == 'relu':\n Out = dy.rectify(di['in'])\n elif nonlin_name == 'elu':\n Out = dy.elu(di['in'])\n elif nonlin_name == 'tanh':\n Out = dy.tanh(di['in'])\n else:\n raise ValueError\n return {'out': Out}\n return fn\n return siso_dynet_module('Nonlinearity', compile_fn, {'nonlin_name':\n h_nonlin_name})\n\n\ndef dropout(h_keep_prob):\n\n def compile_fn(di, dh):\n p = dh['keep_prop']\n Dropout = dy.dropout\n\n def fn(di):\n return {'out': Dropout(di['in'], p)}\n return fn\n return siso_dynet_module('Dropout', compile_fn, {'keep_prop': h_keep_prob})\n\n\ndef dnn_net_simple(num_classes):\n h_nonlin_name = D(['relu', 'tanh', 'elu'])\n h_opt_drop = D([0, 1])\n h_drop_keep_prob = D([0.25, 0.5, 0.75])\n h_num_hidden = D([64, 128, 256, 512, 1024])\n h_num_repeats = D([1, 2])\n model = mo.siso_sequential([flatten(), mo.siso_repeat(lambda : mo.\n siso_sequential([dense(h_num_hidden), nonlinearity(h_nonlin_name),\n mo.siso_optional(lambda : dropout(h_drop_keep_prob), h_opt_drop)]),\n h_num_repeats), dense(D([num_classes]))])\n return model\n\n\n<function token>\n\n\ndef dnn_net(num_classes):\n h_nonlin_name = D(['relu', 'tanh', 'elu'])\n h_opt_drop = D([0, 1])\n return mo.siso_sequential([flatten(), mo.siso_repeat(lambda : dnn_cell(\n D([64, 128, 256, 512, 1024]), h_nonlin_name, h_opt_drop, D([0.25, \n 0.5, 0.75])), D([1, 2])), dense(D([num_classes]))])\n\n\n<import token>\n\n\ndef get_search_space(num_classes):\n\n def fn():\n co.Scope.reset_default_scope()\n inputs, outputs = dnn_net(num_classes)\n return inputs, outputs, {}\n return fn\n\n\ndef main():\n num_classes = 10\n num_samples = 3\n best_val_acc, best_architecture = 0.0, -1\n X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',\n normalize_range=True)\n evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val\n ), num_classes, max_num_training_epochs=5, log_output_to_terminal=True)\n searcher = se.RandomSearcher(get_search_space(num_classes))\n for i in xrange(num_samples):\n print('Sampling architecture %d' % i)\n M.renew_collection()\n inputs, outputs, _, searcher_eval_token = searcher.sample()\n val_acc = evaluator.evaluate(inputs, outputs)['val_acc']\n print(\n 'Finished evaluating architecture %d, validation accuracy is %f' %\n (i, val_acc))\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_architecture = i\n searcher.update(val_acc, searcher_eval_token)\n print('Best validation accuracy is %f with architecture %d' % (\n best_val_acc, best_architecture))\n\n\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self, train_dataset, val_dataset, num_classes,\n max_num_training_epochs=10, batch_size=16, learning_rate=0.001,\n display_step=1, log_output_to_terminal=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n\n def compute_accuracy(self, inputs, outputs):\n correct = 0\n for label, img in self.val_dataset:\n dy.renew_cg()\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n pred = np.argmax(logits.npvalue())\n if label == pred:\n correct += 1\n return 1.0 * correct / len(self.val_dataset)\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef flatten():\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n n = np.product(shape[0])\n Flatten = dy.reshape\n\n def fn(di):\n return {'out': Flatten(di['in'], (n,))}\n return fn\n return siso_dynet_module('Flatten', compile_fn, {})\n\n\ndef dense(h_u):\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n m, n = dh['units'], shape[0][0]\n pW = M.get_collection().add_parameters((m, n))\n pb = M.get_collection().add_parameters((m, 1))\n Dense = dy.affine_transform\n\n def fn(di):\n In = di['in']\n W, b = pW.expr(), pb.expr()\n return {'out': Dense([b, W, In])}\n return fn\n return siso_dynet_module('Dense', compile_fn, {'units': h_u})\n\n\ndef nonlinearity(h_nonlin_name):\n\n def compile_fn(di, dh):\n\n def fn(di):\n nonlin_name = dh['nonlin_name']\n if nonlin_name == 'relu':\n Out = dy.rectify(di['in'])\n elif nonlin_name == 'elu':\n Out = dy.elu(di['in'])\n elif nonlin_name == 'tanh':\n Out = dy.tanh(di['in'])\n else:\n raise ValueError\n return {'out': Out}\n return fn\n return siso_dynet_module('Nonlinearity', compile_fn, {'nonlin_name':\n h_nonlin_name})\n\n\ndef dropout(h_keep_prob):\n\n def compile_fn(di, dh):\n p = dh['keep_prop']\n Dropout = dy.dropout\n\n def fn(di):\n return {'out': Dropout(di['in'], p)}\n return fn\n return siso_dynet_module('Dropout', compile_fn, {'keep_prop': h_keep_prob})\n\n\n<function token>\n<function token>\n\n\ndef dnn_net(num_classes):\n h_nonlin_name = D(['relu', 'tanh', 'elu'])\n h_opt_drop = D([0, 1])\n return mo.siso_sequential([flatten(), mo.siso_repeat(lambda : dnn_cell(\n D([64, 128, 256, 512, 1024]), h_nonlin_name, h_opt_drop, D([0.25, \n 0.5, 0.75])), D([1, 2])), dense(D([num_classes]))])\n\n\n<import token>\n\n\ndef get_search_space(num_classes):\n\n def fn():\n co.Scope.reset_default_scope()\n inputs, outputs = dnn_net(num_classes)\n return inputs, outputs, {}\n return fn\n\n\ndef main():\n num_classes = 10\n num_samples = 3\n best_val_acc, best_architecture = 0.0, -1\n X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',\n normalize_range=True)\n evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val\n ), num_classes, max_num_training_epochs=5, log_output_to_terminal=True)\n searcher = se.RandomSearcher(get_search_space(num_classes))\n for i in xrange(num_samples):\n print('Sampling architecture %d' % i)\n M.renew_collection()\n inputs, outputs, _, searcher_eval_token = searcher.sample()\n val_acc = evaluator.evaluate(inputs, outputs)['val_acc']\n print(\n 'Finished evaluating architecture %d, validation accuracy is %f' %\n (i, val_acc))\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_architecture = i\n searcher.update(val_acc, searcher_eval_token)\n print('Best validation accuracy is %f with architecture %d' % (\n best_val_acc, best_architecture))\n\n\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self, train_dataset, val_dataset, num_classes,\n max_num_training_epochs=10, batch_size=16, learning_rate=0.001,\n display_step=1, log_output_to_terminal=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n\n def compute_accuracy(self, inputs, outputs):\n correct = 0\n for label, img in self.val_dataset:\n dy.renew_cg()\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n pred = np.argmax(logits.npvalue())\n if label == pred:\n correct += 1\n return 1.0 * correct / len(self.val_dataset)\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef flatten():\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n n = np.product(shape[0])\n Flatten = dy.reshape\n\n def fn(di):\n return {'out': Flatten(di['in'], (n,))}\n return fn\n return siso_dynet_module('Flatten', compile_fn, {})\n\n\n<function token>\n\n\ndef nonlinearity(h_nonlin_name):\n\n def compile_fn(di, dh):\n\n def fn(di):\n nonlin_name = dh['nonlin_name']\n if nonlin_name == 'relu':\n Out = dy.rectify(di['in'])\n elif nonlin_name == 'elu':\n Out = dy.elu(di['in'])\n elif nonlin_name == 'tanh':\n Out = dy.tanh(di['in'])\n else:\n raise ValueError\n return {'out': Out}\n return fn\n return siso_dynet_module('Nonlinearity', compile_fn, {'nonlin_name':\n h_nonlin_name})\n\n\ndef dropout(h_keep_prob):\n\n def compile_fn(di, dh):\n p = dh['keep_prop']\n Dropout = dy.dropout\n\n def fn(di):\n return {'out': Dropout(di['in'], p)}\n return fn\n return siso_dynet_module('Dropout', compile_fn, {'keep_prop': h_keep_prob})\n\n\n<function token>\n<function token>\n\n\ndef dnn_net(num_classes):\n h_nonlin_name = D(['relu', 'tanh', 'elu'])\n h_opt_drop = D([0, 1])\n return mo.siso_sequential([flatten(), mo.siso_repeat(lambda : dnn_cell(\n D([64, 128, 256, 512, 1024]), h_nonlin_name, h_opt_drop, D([0.25, \n 0.5, 0.75])), D([1, 2])), dense(D([num_classes]))])\n\n\n<import token>\n\n\ndef get_search_space(num_classes):\n\n def fn():\n co.Scope.reset_default_scope()\n inputs, outputs = dnn_net(num_classes)\n return inputs, outputs, {}\n return fn\n\n\ndef main():\n num_classes = 10\n num_samples = 3\n best_val_acc, best_architecture = 0.0, -1\n X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',\n normalize_range=True)\n evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val\n ), num_classes, max_num_training_epochs=5, log_output_to_terminal=True)\n searcher = se.RandomSearcher(get_search_space(num_classes))\n for i in xrange(num_samples):\n print('Sampling architecture %d' % i)\n M.renew_collection()\n inputs, outputs, _, searcher_eval_token = searcher.sample()\n val_acc = evaluator.evaluate(inputs, outputs)['val_acc']\n print(\n 'Finished evaluating architecture %d, validation accuracy is %f' %\n (i, val_acc))\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_architecture = i\n searcher.update(val_acc, searcher_eval_token)\n print('Best validation accuracy is %f with architecture %d' % (\n best_val_acc, best_architecture))\n\n\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self, train_dataset, val_dataset, num_classes,\n max_num_training_epochs=10, batch_size=16, learning_rate=0.001,\n display_step=1, log_output_to_terminal=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n\n def compute_accuracy(self, inputs, outputs):\n correct = 0\n for label, img in self.val_dataset:\n dy.renew_cg()\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n pred = np.argmax(logits.npvalue())\n if label == pred:\n correct += 1\n return 1.0 * correct / len(self.val_dataset)\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef flatten():\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n n = np.product(shape[0])\n Flatten = dy.reshape\n\n def fn(di):\n return {'out': Flatten(di['in'], (n,))}\n return fn\n return siso_dynet_module('Flatten', compile_fn, {})\n\n\n<function token>\n\n\ndef nonlinearity(h_nonlin_name):\n\n def compile_fn(di, dh):\n\n def fn(di):\n nonlin_name = dh['nonlin_name']\n if nonlin_name == 'relu':\n Out = dy.rectify(di['in'])\n elif nonlin_name == 'elu':\n Out = dy.elu(di['in'])\n elif nonlin_name == 'tanh':\n Out = dy.tanh(di['in'])\n else:\n raise ValueError\n return {'out': Out}\n return fn\n return siso_dynet_module('Nonlinearity', compile_fn, {'nonlin_name':\n h_nonlin_name})\n\n\ndef dropout(h_keep_prob):\n\n def compile_fn(di, dh):\n p = dh['keep_prop']\n Dropout = dy.dropout\n\n def fn(di):\n return {'out': Dropout(di['in'], p)}\n return fn\n return siso_dynet_module('Dropout', compile_fn, {'keep_prop': h_keep_prob})\n\n\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef get_search_space(num_classes):\n\n def fn():\n co.Scope.reset_default_scope()\n inputs, outputs = dnn_net(num_classes)\n return inputs, outputs, {}\n return fn\n\n\ndef main():\n num_classes = 10\n num_samples = 3\n best_val_acc, best_architecture = 0.0, -1\n X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',\n normalize_range=True)\n evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val\n ), num_classes, max_num_training_epochs=5, log_output_to_terminal=True)\n searcher = se.RandomSearcher(get_search_space(num_classes))\n for i in xrange(num_samples):\n print('Sampling architecture %d' % i)\n M.renew_collection()\n inputs, outputs, _, searcher_eval_token = searcher.sample()\n val_acc = evaluator.evaluate(inputs, outputs)['val_acc']\n print(\n 'Finished evaluating architecture %d, validation accuracy is %f' %\n (i, val_acc))\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_architecture = i\n searcher.update(val_acc, searcher_eval_token)\n print('Best validation accuracy is %f with architecture %d' % (\n best_val_acc, best_architecture))\n\n\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self, train_dataset, val_dataset, num_classes,\n max_num_training_epochs=10, batch_size=16, learning_rate=0.001,\n display_step=1, log_output_to_terminal=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n\n def compute_accuracy(self, inputs, outputs):\n correct = 0\n for label, img in self.val_dataset:\n dy.renew_cg()\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n pred = np.argmax(logits.npvalue())\n if label == pred:\n correct += 1\n return 1.0 * correct / len(self.val_dataset)\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef flatten():\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n n = np.product(shape[0])\n Flatten = dy.reshape\n\n def fn(di):\n return {'out': Flatten(di['in'], (n,))}\n return fn\n return siso_dynet_module('Flatten', compile_fn, {})\n\n\n<function token>\n<function token>\n\n\ndef dropout(h_keep_prob):\n\n def compile_fn(di, dh):\n p = dh['keep_prop']\n Dropout = dy.dropout\n\n def fn(di):\n return {'out': Dropout(di['in'], p)}\n return fn\n return siso_dynet_module('Dropout', compile_fn, {'keep_prop': h_keep_prob})\n\n\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef get_search_space(num_classes):\n\n def fn():\n co.Scope.reset_default_scope()\n inputs, outputs = dnn_net(num_classes)\n return inputs, outputs, {}\n return fn\n\n\ndef main():\n num_classes = 10\n num_samples = 3\n best_val_acc, best_architecture = 0.0, -1\n X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',\n normalize_range=True)\n evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val\n ), num_classes, max_num_training_epochs=5, log_output_to_terminal=True)\n searcher = se.RandomSearcher(get_search_space(num_classes))\n for i in xrange(num_samples):\n print('Sampling architecture %d' % i)\n M.renew_collection()\n inputs, outputs, _, searcher_eval_token = searcher.sample()\n val_acc = evaluator.evaluate(inputs, outputs)['val_acc']\n print(\n 'Finished evaluating architecture %d, validation accuracy is %f' %\n (i, val_acc))\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_architecture = i\n searcher.update(val_acc, searcher_eval_token)\n print('Best validation accuracy is %f with architecture %d' % (\n best_val_acc, best_architecture))\n\n\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self, train_dataset, val_dataset, num_classes,\n max_num_training_epochs=10, batch_size=16, learning_rate=0.001,\n display_step=1, log_output_to_terminal=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n\n def compute_accuracy(self, inputs, outputs):\n correct = 0\n for label, img in self.val_dataset:\n dy.renew_cg()\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n pred = np.argmax(logits.npvalue())\n if label == pred:\n correct += 1\n return 1.0 * correct / len(self.val_dataset)\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef flatten():\n\n def compile_fn(di, dh):\n shape = di['in'].dim()\n n = np.product(shape[0])\n Flatten = dy.reshape\n\n def fn(di):\n return {'out': Flatten(di['in'], (n,))}\n return fn\n return siso_dynet_module('Flatten', compile_fn, {})\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef get_search_space(num_classes):\n\n def fn():\n co.Scope.reset_default_scope()\n inputs, outputs = dnn_net(num_classes)\n return inputs, outputs, {}\n return fn\n\n\ndef main():\n num_classes = 10\n num_samples = 3\n best_val_acc, best_architecture = 0.0, -1\n X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',\n normalize_range=True)\n evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val\n ), num_classes, max_num_training_epochs=5, log_output_to_terminal=True)\n searcher = se.RandomSearcher(get_search_space(num_classes))\n for i in xrange(num_samples):\n print('Sampling architecture %d' % i)\n M.renew_collection()\n inputs, outputs, _, searcher_eval_token = searcher.sample()\n val_acc = evaluator.evaluate(inputs, outputs)['val_acc']\n print(\n 'Finished evaluating architecture %d, validation accuracy is %f' %\n (i, val_acc))\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_architecture = i\n searcher.update(val_acc, searcher_eval_token)\n print('Best validation accuracy is %f with architecture %d' % (\n best_val_acc, best_architecture))\n\n\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self, train_dataset, val_dataset, num_classes,\n max_num_training_epochs=10, batch_size=16, learning_rate=0.001,\n display_step=1, log_output_to_terminal=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n\n def compute_accuracy(self, inputs, outputs):\n correct = 0\n for label, img in self.val_dataset:\n dy.renew_cg()\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n pred = np.argmax(logits.npvalue())\n if label == pred:\n correct += 1\n return 1.0 * correct / len(self.val_dataset)\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef get_search_space(num_classes):\n\n def fn():\n co.Scope.reset_default_scope()\n inputs, outputs = dnn_net(num_classes)\n return inputs, outputs, {}\n return fn\n\n\ndef main():\n num_classes = 10\n num_samples = 3\n best_val_acc, best_architecture = 0.0, -1\n X_train, y_train, X_val, y_val, _, _ = load_mnist('data/mnist',\n normalize_range=True)\n evaluator = SimpleClassifierEvaluator((X_train, y_train), (X_val, y_val\n ), num_classes, max_num_training_epochs=5, log_output_to_terminal=True)\n searcher = se.RandomSearcher(get_search_space(num_classes))\n for i in xrange(num_samples):\n print('Sampling architecture %d' % i)\n M.renew_collection()\n inputs, outputs, _, searcher_eval_token = searcher.sample()\n val_acc = evaluator.evaluate(inputs, outputs)['val_acc']\n print(\n 'Finished evaluating architecture %d, validation accuracy is %f' %\n (i, val_acc))\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_architecture = i\n searcher.update(val_acc, searcher_eval_token)\n print('Best validation accuracy is %f with architecture %d' % (\n best_val_acc, best_architecture))\n\n\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self, train_dataset, val_dataset, num_classes,\n max_num_training_epochs=10, batch_size=16, learning_rate=0.001,\n display_step=1, log_output_to_terminal=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n\n def compute_accuracy(self, inputs, outputs):\n correct = 0\n for label, img in self.val_dataset:\n dy.renew_cg()\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n pred = np.argmax(logits.npvalue())\n if label == pred:\n correct += 1\n return 1.0 * correct / len(self.val_dataset)\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n\n\ndef get_search_space(num_classes):\n\n def fn():\n co.Scope.reset_default_scope()\n inputs, outputs = dnn_net(num_classes)\n return inputs, outputs, {}\n return fn\n\n\n<function token>\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self, train_dataset, val_dataset, num_classes,\n max_num_training_epochs=10, batch_size=16, learning_rate=0.001,\n display_step=1, log_output_to_terminal=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n\n def compute_accuracy(self, inputs, outputs):\n correct = 0\n for label, img in self.val_dataset:\n dy.renew_cg()\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n pred = np.argmax(logits.npvalue())\n if label == pred:\n correct += 1\n return 1.0 * correct / len(self.val_dataset)\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n<function token>\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self, train_dataset, val_dataset, num_classes,\n max_num_training_epochs=10, batch_size=16, learning_rate=0.001,\n display_step=1, log_output_to_terminal=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n\n def compute_accuracy(self, inputs, outputs):\n correct = 0\n for label, img in self.val_dataset:\n dy.renew_cg()\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n pred = np.argmax(logits.npvalue())\n if label == pred:\n correct += 1\n return 1.0 * correct / len(self.val_dataset)\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n<function token>\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n\n def __init__(self, train_dataset, val_dataset, num_classes,\n max_num_training_epochs=10, batch_size=16, learning_rate=0.001,\n display_step=1, log_output_to_terminal=True):\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.num_classes = num_classes\n self.max_num_training_epochs = max_num_training_epochs\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n self.log_output_to_terminal = log_output_to_terminal\n self.display_step = display_step\n <function token>\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n<function token>\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n <function token>\n <function token>\n\n def evaluate(self, inputs, outputs):\n params = M.get_collection()\n optimizer = dy.SimpleSGDTrainer(params, self.learning_rate)\n num_batches = int(len(self.train_dataset) / self.batch_size)\n for epoch in range(self.max_num_training_epochs):\n random.shuffle(self.train_dataset)\n i = 0\n total_loss = 0\n while i < len(self.train_dataset):\n dy.renew_cg()\n mbsize = min(self.batch_size, len(self.train_dataset) - i)\n minibatch = self.train_dataset[i:i + mbsize]\n losses = []\n for label, img in minibatch:\n x = dy.inputVector(img)\n co.forward({inputs['in']: x})\n logits = outputs['out'].val\n loss = dy.pickneglogsoftmax(logits, label)\n losses.append(loss)\n mbloss = dy.esum(losses) / mbsize\n mbloss.backward()\n optimizer.update()\n total_loss += mbloss.scalar_value()\n i += mbsize\n val_acc = self.compute_accuracy(inputs, outputs)\n if self.log_output_to_terminal and epoch % self.display_step == 0:\n print('epoch:', '%d' % (epoch + 1), 'loss:', '{:.9f}'.\n format(total_loss / num_batches),\n 'validation_accuracy:', '%.5f' % val_acc)\n val_acc = self.compute_accuracy(inputs, outputs)\n return {'val_acc': val_acc}\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n<function token>\n<import token>\n\n\nclass SimpleClassifierEvaluator:\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n<function token>\n<import token>\n<class token>\n<code token>\n"
] | false |
98,862 |
46afa707cf17654e812a3ee60fbb78db513cc968
|
#!/usr/bin/env python
import os,sys,fnmatch
import xml.etree.ElementTree as etree
import glob
idir='/home/om/cron/pioneer/data/ADCP/'
maskfile_rec='datasets_mask_ADCP_REC.xml'
maskfile_tel='datasets_mask_ADCP_TEL.xml'
infile='datasets_WFP_MOAS.xml'
outfile='dataset_WFP_MOAS_adcp.xml'
def main(argv):
print 'APPENDING'
tree=etree.parse(infile)
dxml=tree.getroot()
#RECOVERED
files=glob.glob(idir+'PIONEER_ADCP_RECOVERED_*.nc')
for file in files:
print file
parts=file.split('/')
filename=parts[-1:]
print filename[0]
parts2=filename[0].split('.')
Did=parts2[0]
print Did
newelement=etree.parse(maskfile_rec).getroot()
newelement.find('fileNameRegex').text=filename[0]
newelement.set('datasetID',Did)
dxml.append(newelement)
#
#
#TELEMETERED
files=glob.glob(idir+'PIONEER_ADCP_TELEMETERED_*.nc')
for file in files:
print file
parts=file.split('/')
filename=parts[-1:]
print filename[0]
parts2=filename[0].split('.')
Did=parts2[0]
print Did
newelement=etree.parse(maskfile_tel).getroot()
newelement.find('fileNameRegex').text=filename[0]
newelement.set('datasetID',Did)
dxml.append(newelement)
#
tree.write(outfile, encoding="ISO-8859-1", xml_declaration=True)
#
#
# try:
# response=urllib2.urlopen(allurls[0])
# except urllib2.URLError,e:
# print "Error accessing site:",e
#
# print response.read()
#
if __name__ == "__main__":
# parser = optparse.OptionParser()
# parser.add_option('-t', '--type',dest='type',help='MOAS,WFP,SPP,BPRESS',default='MOAS',type='str')
# parser.add_option('-s', '--state',dest='state',help='REC,TEL',default='REC',type='str')
# (opts, args) = parser.parse_args()
print 'RUNNING'
main(sys.argv)
|
[
"#!/usr/bin/env python\n\nimport os,sys,fnmatch\nimport xml.etree.ElementTree as etree\nimport glob\n\n\nidir='/home/om/cron/pioneer/data/ADCP/'\nmaskfile_rec='datasets_mask_ADCP_REC.xml'\nmaskfile_tel='datasets_mask_ADCP_TEL.xml'\ninfile='datasets_WFP_MOAS.xml'\noutfile='dataset_WFP_MOAS_adcp.xml'\n\ndef main(argv):\n print 'APPENDING'\n tree=etree.parse(infile)\n dxml=tree.getroot()\n \n #RECOVERED\n files=glob.glob(idir+'PIONEER_ADCP_RECOVERED_*.nc') \n for file in files:\n print file\n parts=file.split('/')\n filename=parts[-1:]\n print filename[0]\n parts2=filename[0].split('.')\n Did=parts2[0]\n print Did\n newelement=etree.parse(maskfile_rec).getroot()\n newelement.find('fileNameRegex').text=filename[0]\n newelement.set('datasetID',Did)\n dxml.append(newelement) \n# \n# \n\n\n #TELEMETERED\n files=glob.glob(idir+'PIONEER_ADCP_TELEMETERED_*.nc') \n for file in files:\n print file\n parts=file.split('/')\n filename=parts[-1:]\n print filename[0]\n parts2=filename[0].split('.')\n Did=parts2[0]\n print Did\n newelement=etree.parse(maskfile_tel).getroot()\n newelement.find('fileNameRegex').text=filename[0]\n newelement.set('datasetID',Did)\n dxml.append(newelement) \n# \n \n \n tree.write(outfile, encoding=\"ISO-8859-1\", xml_declaration=True)\n \n \n\n# \n# \n \n# try:\n# response=urllib2.urlopen(allurls[0])\n# except urllib2.URLError,e:\n# print \"Error accessing site:\",e\n# \n# print response.read()\n# \nif __name__ == \"__main__\":\n # parser = optparse.OptionParser()\n # parser.add_option('-t', '--type',dest='type',help='MOAS,WFP,SPP,BPRESS',default='MOAS',type='str')\n # parser.add_option('-s', '--state',dest='state',help='REC,TEL',default='REC',type='str')\n # (opts, args) = parser.parse_args()\n \n \n print 'RUNNING'\n main(sys.argv)\n"
] | true |
98,863 |
dec2ce871ff635b50d8dc1b010fc7ca5328dbb93
|
import json
import logging
import time
import traceback
from simplejson import JSONDecodeError
import requests
from eu.softfire.integrationtest.utils.exceptions import MonitoringResourceValidationException
from eu.softfire.integrationtest.utils.utils import get_config_value
from eu.softfire.integrationtest.validators.validators import AbstractValidator
log = logging.getLogger(__name__)
class MonitoringResourceValidator(AbstractValidator):
def validate(self, resource, resource_id, used_resource_id, session):
log.debug('Validate MonitoringResource with resource_id: {}'.format(resource_id))
log.debug('Validate MonitoringResource with resource: {}'.format(resource))
attempts = int(get_config_value('monitoring-resource', 'attempts', '10'))
try:
res = json.loads(resource)
except JSONDecodeError as e:
raise MonitoringResourceValidationException(e.msg)
if not res["floatingIp"]:
raise MonitoringResourceValidationException("Floating ip not available: {}".format(res))
cnt = 1
while cnt <= attempts:
log.debug('Validate attempt: {}'.format(cnt))
try:
r = requests.get(res["url"], timeout=10)
if r.status_code == 200:
if "zabbix.php" in r.text:
log.debug('********SUCCESSS*********')
return
except Exception as e:
if cnt > attempts:
log.error("after %d attempts zabbix is not started yet, considering it failed..." % attempts)
exception_data = traceback.format_exc().splitlines()
exception_text = "Error: {}".format(exception_data[-1])
log.error(exception_text)
raise e # raise exceptions only after X attempts, to allow test passing in slow environments
cnt += 1
time.sleep(5)
raise MonitoringResourceValidationException(res)
|
[
"import json\nimport logging\nimport time\nimport traceback\nfrom simplejson import JSONDecodeError\n\nimport requests\n\nfrom eu.softfire.integrationtest.utils.exceptions import MonitoringResourceValidationException\nfrom eu.softfire.integrationtest.utils.utils import get_config_value\nfrom eu.softfire.integrationtest.validators.validators import AbstractValidator\n\nlog = logging.getLogger(__name__)\n\nclass MonitoringResourceValidator(AbstractValidator):\n def validate(self, resource, resource_id, used_resource_id, session):\n log.debug('Validate MonitoringResource with resource_id: {}'.format(resource_id))\n log.debug('Validate MonitoringResource with resource: {}'.format(resource))\n\n attempts = int(get_config_value('monitoring-resource', 'attempts', '10'))\n\n try:\n res = json.loads(resource)\n except JSONDecodeError as e:\n raise MonitoringResourceValidationException(e.msg)\n\n if not res[\"floatingIp\"]:\n raise MonitoringResourceValidationException(\"Floating ip not available: {}\".format(res))\n \n cnt = 1\n while cnt <= attempts:\n\n log.debug('Validate attempt: {}'.format(cnt))\n\n try:\n r = requests.get(res[\"url\"], timeout=10)\n if r.status_code == 200:\n if \"zabbix.php\" in r.text:\n log.debug('********SUCCESSS*********')\n return\n except Exception as e:\n if cnt > attempts:\n log.error(\"after %d attempts zabbix is not started yet, considering it failed...\" % attempts)\n exception_data = traceback.format_exc().splitlines()\n exception_text = \"Error: {}\".format(exception_data[-1])\n log.error(exception_text)\n raise e # raise exceptions only after X attempts, to allow test passing in slow environments\n\n cnt += 1\n\n time.sleep(5)\n\n raise MonitoringResourceValidationException(res)\n",
"import json\nimport logging\nimport time\nimport traceback\nfrom simplejson import JSONDecodeError\nimport requests\nfrom eu.softfire.integrationtest.utils.exceptions import MonitoringResourceValidationException\nfrom eu.softfire.integrationtest.utils.utils import get_config_value\nfrom eu.softfire.integrationtest.validators.validators import AbstractValidator\nlog = logging.getLogger(__name__)\n\n\nclass MonitoringResourceValidator(AbstractValidator):\n\n def validate(self, resource, resource_id, used_resource_id, session):\n log.debug('Validate MonitoringResource with resource_id: {}'.format\n (resource_id))\n log.debug('Validate MonitoringResource with resource: {}'.format(\n resource))\n attempts = int(get_config_value('monitoring-resource', 'attempts',\n '10'))\n try:\n res = json.loads(resource)\n except JSONDecodeError as e:\n raise MonitoringResourceValidationException(e.msg)\n if not res['floatingIp']:\n raise MonitoringResourceValidationException(\n 'Floating ip not available: {}'.format(res))\n cnt = 1\n while cnt <= attempts:\n log.debug('Validate attempt: {}'.format(cnt))\n try:\n r = requests.get(res['url'], timeout=10)\n if r.status_code == 200:\n if 'zabbix.php' in r.text:\n log.debug('********SUCCESSS*********')\n return\n except Exception as e:\n if cnt > attempts:\n log.error(\n 'after %d attempts zabbix is not started yet, considering it failed...'\n % attempts)\n exception_data = traceback.format_exc().splitlines()\n exception_text = 'Error: {}'.format(exception_data[-1])\n log.error(exception_text)\n raise e\n cnt += 1\n time.sleep(5)\n raise MonitoringResourceValidationException(res)\n",
"<import token>\nlog = logging.getLogger(__name__)\n\n\nclass MonitoringResourceValidator(AbstractValidator):\n\n def validate(self, resource, resource_id, used_resource_id, session):\n log.debug('Validate MonitoringResource with resource_id: {}'.format\n (resource_id))\n log.debug('Validate MonitoringResource with resource: {}'.format(\n resource))\n attempts = int(get_config_value('monitoring-resource', 'attempts',\n '10'))\n try:\n res = json.loads(resource)\n except JSONDecodeError as e:\n raise MonitoringResourceValidationException(e.msg)\n if not res['floatingIp']:\n raise MonitoringResourceValidationException(\n 'Floating ip not available: {}'.format(res))\n cnt = 1\n while cnt <= attempts:\n log.debug('Validate attempt: {}'.format(cnt))\n try:\n r = requests.get(res['url'], timeout=10)\n if r.status_code == 200:\n if 'zabbix.php' in r.text:\n log.debug('********SUCCESSS*********')\n return\n except Exception as e:\n if cnt > attempts:\n log.error(\n 'after %d attempts zabbix is not started yet, considering it failed...'\n % attempts)\n exception_data = traceback.format_exc().splitlines()\n exception_text = 'Error: {}'.format(exception_data[-1])\n log.error(exception_text)\n raise e\n cnt += 1\n time.sleep(5)\n raise MonitoringResourceValidationException(res)\n",
"<import token>\n<assignment token>\n\n\nclass MonitoringResourceValidator(AbstractValidator):\n\n def validate(self, resource, resource_id, used_resource_id, session):\n log.debug('Validate MonitoringResource with resource_id: {}'.format\n (resource_id))\n log.debug('Validate MonitoringResource with resource: {}'.format(\n resource))\n attempts = int(get_config_value('monitoring-resource', 'attempts',\n '10'))\n try:\n res = json.loads(resource)\n except JSONDecodeError as e:\n raise MonitoringResourceValidationException(e.msg)\n if not res['floatingIp']:\n raise MonitoringResourceValidationException(\n 'Floating ip not available: {}'.format(res))\n cnt = 1\n while cnt <= attempts:\n log.debug('Validate attempt: {}'.format(cnt))\n try:\n r = requests.get(res['url'], timeout=10)\n if r.status_code == 200:\n if 'zabbix.php' in r.text:\n log.debug('********SUCCESSS*********')\n return\n except Exception as e:\n if cnt > attempts:\n log.error(\n 'after %d attempts zabbix is not started yet, considering it failed...'\n % attempts)\n exception_data = traceback.format_exc().splitlines()\n exception_text = 'Error: {}'.format(exception_data[-1])\n log.error(exception_text)\n raise e\n cnt += 1\n time.sleep(5)\n raise MonitoringResourceValidationException(res)\n",
"<import token>\n<assignment token>\n\n\nclass MonitoringResourceValidator(AbstractValidator):\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n"
] | false |
98,864 |
730f62e55a7a6366d75d990e557bcce7a623c861
|
import numpy as np
import pandas as pd
import geopandas as gpd
from sklearn.decomposition import PCA
from sklearn import preprocessing as pp
from accessibility_analyzing import utlis as ut
from accessibility_analyzing.accessibility_calculator import accessibility_calculator
"""
此模块下的代码没有使用
"""
def generate_access_index(dir_name=r'D:\multicities\data\深圳分区\可达性结算结果'):
'''
存放可达性计算结果的文件夹
该文件夹下暂时只放置两个文件
'''
for each in ut.iter_shpfile(dir_name,['.shp']):
t = gpd.read_file(r'{}'.format(each))
yield t['access_ind']
def write_accind_2_file(file_dir):
'''
将可达性计算结果写入 file_dir 下的指定文件
返回geo pandas 格式文件
'''
i = 1
t = gpd.read_file(file_dir)
for each in generate_access_index():
name = 'access_ind_{}'.format(i)
t[name] = each
i+=1
return t
def sklearn_pca_cal( n_component=3,
cal_df=None,
is_z_stand=False,
*args):
'''
使用PCA 前必须对数据进行标准化
pca.fit_transform(X)# 返回降维后的数据
pca.components_ #返回因子载荷,横向查看相应载荷,实际为协方差矩阵特征值对应的特征向量
pca.explained_variance_ratio_ #返回方差贡献率
pca.explained_variance_ #返回特征值
注意: X*pca.components_ 结果等于 pca.fit_transform(X) 的输出结果
cal_df: 进行pca计算的数据框
is_z_stand: 这个参数不是简单的是否对输出结果进行z标准化,如果这个这个参数是False 此时计算的是直接对机会项进行PCA
计算赋权(对原始非标准化结果直接赋权),加和,百分化输出。 选择为True时 是对标准化后的可达性计算PCA, 赋权,加和,输出标准化后的
原始三种可达性值 与 最终可达性值(赋权加和后结果)。
'''
column_name_list = []
output_column_name_list = []
for each in args:
assert isinstance(each, str)
column_name_list.append(each)
output_column_name_list.append(each+'_sta')
assert n_component <= len(column_name_list)
acc_file = cal_df
# X = acc_file[['access_ind','access_ind_1','access_ind_2']]#取出dataframe的三列
X = acc_file[column_name_list]
X = np.array(X)
if is_z_stand==False:
pca = PCA(n_components=n_component)
NEWX = pca.fit_transform(X) # 返回降维后的数据
variance_ratio = pca.explained_variance_ratio_
t = NEWX*variance_ratio
t = t.sum(axis=1)
t = t/t.sum(axis=0)# 计算百分值
t = t.reshape(len(t), 1)
results = np.concatenate((X,t),axis=1)# 最后结果,前三列为降维后的数据,最后一列为 加权相加的结果
pca_index = 'pca_en_per'
output_column_name_list.append(pca_index)
results_df = pd.DataFrame(data=results, columns=output_column_name_list)
else:
scaler = pp.StandardScaler()
X_scaler = scaler.fit_transform(X) #对数据进行标准化
pca = PCA(n_components=n_component)
NEWX = pca.fit_transform(X_scaler) # 返回降维后的数据
variance_ratio = pca.explained_variance_ratio_
t = NEWX*variance_ratio #直接进行广播相乘
t = t.sum(axis=1)
t = t.reshape(len(t), 1)
results = np.concatenate((X,t),axis=1)# 最后结果,前三列为降维后的数据,最后一列为 加权相加的结果
# results_df = pd.DataFrame(data=results,columns=['acc_ind_sta','acc_ind_sta_1','acc_ind_sta_2','acc_ind_pca'])
pca_index = 'pca_in_per'
output_column_name_list.append(pca_index)
results_df = pd.DataFrame(data=results,columns=output_column_name_list)
del results_df[output_column_name_list[0]]
del results_df[output_column_name_list[1]]
del results_df[output_column_name_list[2]]
final_re_df = pd.concat([acc_file, results_df], axis=1)
final_re_df['e_0_pe_le'] = ut.value_classify(final_re_df, 'entr_0_per', number=-5)
final_re_df['e_1_pe_le'] = ut.value_classify(final_re_df, 'entr_1_per', number=-5)
final_re_df['e_2_pe_le'] = ut.value_classify(final_re_df, 'entr_2_1_p', number=-5)
final_re_df['pca_en_le'] = ut.value_classify(final_re_df, pca_index, number=-5)
return final_re_df
def entro_add(shp_dir,*args):
df = ut.read_file(shp_dir)
df['aggre_en']=0
for each in args:
assert isinstance(each, str)
df['aggre_en']+=df[each]
df['agg_en_per'] = df['aggre_en']/df['aggre_en'].sum()
df['e_0_pe_le'] = ut.value_classify(df, 'entr_0_per', number=-5)
df['e_1_pe_le'] = ut.value_classify(df, 'entr_1_per', number=-5)
df['e_2_pe_le'] = ut.value_classify(df, 'entr_2_1_p', number=-5)
df['agg_en_le'] = ut.value_classify(df, 'agg_en_per', number=-5)
return df
if __name__ == '__main__':
# df = sklearn_pca_cal(3,ut.read_file(r'D:\multicities\data\深圳分区\sz_10_acc_entro.shp'),True,
# 'entr_0_per','entr_1_per','entr_2_1_p')
df = entro_add(r'D:\multicities\data\深圳分区\sz_10_acc_entro.shp',
'entr_0','entr_1','entr_2_1')
ut.to_file(df,r'D:\multicities\data\深圳分区\sz_10_acc_entro_aggre.shp')
|
[
"import numpy as np\nimport pandas as pd\nimport geopandas as gpd\nfrom sklearn.decomposition import PCA\nfrom sklearn import preprocessing as pp\nfrom accessibility_analyzing import utlis as ut\nfrom accessibility_analyzing.accessibility_calculator import accessibility_calculator\n\n\"\"\"\n此模块下的代码没有使用\n\"\"\"\n\ndef generate_access_index(dir_name=r'D:\\multicities\\data\\深圳分区\\可达性结算结果'):\n '''\n 存放可达性计算结果的文件夹\n 该文件夹下暂时只放置两个文件\n\n '''\n for each in ut.iter_shpfile(dir_name,['.shp']):\n t = gpd.read_file(r'{}'.format(each))\n yield t['access_ind']\n\ndef write_accind_2_file(file_dir):\n '''\n 将可达性计算结果写入 file_dir 下的指定文件\n 返回geo pandas 格式文件\n '''\n i = 1\n t = gpd.read_file(file_dir)\n for each in generate_access_index():\n name = 'access_ind_{}'.format(i)\n t[name] = each\n i+=1\n return t\n\ndef sklearn_pca_cal( n_component=3,\n cal_df=None,\n is_z_stand=False,\n *args):\n '''\n 使用PCA 前必须对数据进行标准化\n pca.fit_transform(X)# 返回降维后的数据\n pca.components_ #返回因子载荷,横向查看相应载荷,实际为协方差矩阵特征值对应的特征向量\n pca.explained_variance_ratio_ #返回方差贡献率\n pca.explained_variance_ #返回特征值\n\n 注意: X*pca.components_ 结果等于 pca.fit_transform(X) 的输出结果\n\n cal_df: 进行pca计算的数据框\n is_z_stand: 这个参数不是简单的是否对输出结果进行z标准化,如果这个这个参数是False 此时计算的是直接对机会项进行PCA\n 计算赋权(对原始非标准化结果直接赋权),加和,百分化输出。 选择为True时 是对标准化后的可达性计算PCA, 赋权,加和,输出标准化后的\n 原始三种可达性值 与 最终可达性值(赋权加和后结果)。\n '''\n column_name_list = []\n output_column_name_list = []\n\n for each in args:\n assert isinstance(each, str)\n column_name_list.append(each)\n output_column_name_list.append(each+'_sta')\n assert n_component <= len(column_name_list)\n\n acc_file = cal_df\n # X = acc_file[['access_ind','access_ind_1','access_ind_2']]#取出dataframe的三列\n X = acc_file[column_name_list]\n X = np.array(X)\n\n\n if is_z_stand==False:\n pca = PCA(n_components=n_component)\n NEWX = pca.fit_transform(X) # 返回降维后的数据\n variance_ratio = pca.explained_variance_ratio_\n t = NEWX*variance_ratio\n t = t.sum(axis=1)\n t = t/t.sum(axis=0)# 计算百分值\n t = t.reshape(len(t), 1)\n results = np.concatenate((X,t),axis=1)# 最后结果,前三列为降维后的数据,最后一列为 加权相加的结果\n pca_index = 'pca_en_per'\n output_column_name_list.append(pca_index)\n results_df = pd.DataFrame(data=results, columns=output_column_name_list)\n else:\n scaler = pp.StandardScaler()\n X_scaler = scaler.fit_transform(X) #对数据进行标准化\n pca = PCA(n_components=n_component)\n NEWX = pca.fit_transform(X_scaler) # 返回降维后的数据\n variance_ratio = pca.explained_variance_ratio_\n t = NEWX*variance_ratio #直接进行广播相乘\n t = t.sum(axis=1)\n t = t.reshape(len(t), 1)\n results = np.concatenate((X,t),axis=1)# 最后结果,前三列为降维后的数据,最后一列为 加权相加的结果\n # results_df = pd.DataFrame(data=results,columns=['acc_ind_sta','acc_ind_sta_1','acc_ind_sta_2','acc_ind_pca'])\n pca_index = 'pca_in_per'\n output_column_name_list.append(pca_index)\n results_df = pd.DataFrame(data=results,columns=output_column_name_list)\n\n del results_df[output_column_name_list[0]]\n del results_df[output_column_name_list[1]]\n del results_df[output_column_name_list[2]]\n final_re_df = pd.concat([acc_file, results_df], axis=1)\n final_re_df['e_0_pe_le'] = ut.value_classify(final_re_df, 'entr_0_per', number=-5)\n final_re_df['e_1_pe_le'] = ut.value_classify(final_re_df, 'entr_1_per', number=-5)\n final_re_df['e_2_pe_le'] = ut.value_classify(final_re_df, 'entr_2_1_p', number=-5)\n final_re_df['pca_en_le'] = ut.value_classify(final_re_df, pca_index, number=-5)\n\n return final_re_df\n\ndef entro_add(shp_dir,*args):\n\n df = ut.read_file(shp_dir)\n df['aggre_en']=0\n for each in args:\n assert isinstance(each, str)\n df['aggre_en']+=df[each]\n df['agg_en_per'] = df['aggre_en']/df['aggre_en'].sum()\n df['e_0_pe_le'] = ut.value_classify(df, 'entr_0_per', number=-5)\n df['e_1_pe_le'] = ut.value_classify(df, 'entr_1_per', number=-5)\n df['e_2_pe_le'] = ut.value_classify(df, 'entr_2_1_p', number=-5)\n df['agg_en_le'] = ut.value_classify(df, 'agg_en_per', number=-5)\n\n return df\nif __name__ == '__main__':\n\n # df = sklearn_pca_cal(3,ut.read_file(r'D:\\multicities\\data\\深圳分区\\sz_10_acc_entro.shp'),True,\n # 'entr_0_per','entr_1_per','entr_2_1_p')\n df = entro_add(r'D:\\multicities\\data\\深圳分区\\sz_10_acc_entro.shp',\n 'entr_0','entr_1','entr_2_1')\n ut.to_file(df,r'D:\\multicities\\data\\深圳分区\\sz_10_acc_entro_aggre.shp')\n\n\n",
"import numpy as np\nimport pandas as pd\nimport geopandas as gpd\nfrom sklearn.decomposition import PCA\nfrom sklearn import preprocessing as pp\nfrom accessibility_analyzing import utlis as ut\nfrom accessibility_analyzing.accessibility_calculator import accessibility_calculator\n<docstring token>\n\n\ndef generate_access_index(dir_name='D:\\\\multicities\\\\data\\\\深圳分区\\\\可达性结算结果'):\n \"\"\"\n 存放可达性计算结果的文件夹\n 该文件夹下暂时只放置两个文件\n\n \"\"\"\n for each in ut.iter_shpfile(dir_name, ['.shp']):\n t = gpd.read_file('{}'.format(each))\n yield t['access_ind']\n\n\ndef write_accind_2_file(file_dir):\n \"\"\"\n 将可达性计算结果写入 file_dir 下的指定文件\n 返回geo pandas 格式文件\n \"\"\"\n i = 1\n t = gpd.read_file(file_dir)\n for each in generate_access_index():\n name = 'access_ind_{}'.format(i)\n t[name] = each\n i += 1\n return t\n\n\ndef sklearn_pca_cal(n_component=3, cal_df=None, is_z_stand=False, *args):\n \"\"\"\n 使用PCA 前必须对数据进行标准化\n pca.fit_transform(X)# 返回降维后的数据\n pca.components_ #返回因子载荷,横向查看相应载荷,实际为协方差矩阵特征值对应的特征向量\n pca.explained_variance_ratio_ #返回方差贡献率\n pca.explained_variance_ #返回特征值\n\n 注意: X*pca.components_ 结果等于 pca.fit_transform(X) 的输出结果\n\n cal_df: 进行pca计算的数据框\n is_z_stand: 这个参数不是简单的是否对输出结果进行z标准化,如果这个这个参数是False 此时计算的是直接对机会项进行PCA\n 计算赋权(对原始非标准化结果直接赋权),加和,百分化输出。 选择为True时 是对标准化后的可达性计算PCA, 赋权,加和,输出标准化后的\n 原始三种可达性值 与 最终可达性值(赋权加和后结果)。\n \"\"\"\n column_name_list = []\n output_column_name_list = []\n for each in args:\n assert isinstance(each, str)\n column_name_list.append(each)\n output_column_name_list.append(each + '_sta')\n assert n_component <= len(column_name_list)\n acc_file = cal_df\n X = acc_file[column_name_list]\n X = np.array(X)\n if is_z_stand == False:\n pca = PCA(n_components=n_component)\n NEWX = pca.fit_transform(X)\n variance_ratio = pca.explained_variance_ratio_\n t = NEWX * variance_ratio\n t = t.sum(axis=1)\n t = t / t.sum(axis=0)\n t = t.reshape(len(t), 1)\n results = np.concatenate((X, t), axis=1)\n pca_index = 'pca_en_per'\n output_column_name_list.append(pca_index)\n results_df = pd.DataFrame(data=results, columns=output_column_name_list\n )\n else:\n scaler = pp.StandardScaler()\n X_scaler = scaler.fit_transform(X)\n pca = PCA(n_components=n_component)\n NEWX = pca.fit_transform(X_scaler)\n variance_ratio = pca.explained_variance_ratio_\n t = NEWX * variance_ratio\n t = t.sum(axis=1)\n t = t.reshape(len(t), 1)\n results = np.concatenate((X, t), axis=1)\n pca_index = 'pca_in_per'\n output_column_name_list.append(pca_index)\n results_df = pd.DataFrame(data=results, columns=output_column_name_list\n )\n del results_df[output_column_name_list[0]]\n del results_df[output_column_name_list[1]]\n del results_df[output_column_name_list[2]]\n final_re_df = pd.concat([acc_file, results_df], axis=1)\n final_re_df['e_0_pe_le'] = ut.value_classify(final_re_df, 'entr_0_per',\n number=-5)\n final_re_df['e_1_pe_le'] = ut.value_classify(final_re_df, 'entr_1_per',\n number=-5)\n final_re_df['e_2_pe_le'] = ut.value_classify(final_re_df, 'entr_2_1_p',\n number=-5)\n final_re_df['pca_en_le'] = ut.value_classify(final_re_df, pca_index,\n number=-5)\n return final_re_df\n\n\ndef entro_add(shp_dir, *args):\n df = ut.read_file(shp_dir)\n df['aggre_en'] = 0\n for each in args:\n assert isinstance(each, str)\n df['aggre_en'] += df[each]\n df['agg_en_per'] = df['aggre_en'] / df['aggre_en'].sum()\n df['e_0_pe_le'] = ut.value_classify(df, 'entr_0_per', number=-5)\n df['e_1_pe_le'] = ut.value_classify(df, 'entr_1_per', number=-5)\n df['e_2_pe_le'] = ut.value_classify(df, 'entr_2_1_p', number=-5)\n df['agg_en_le'] = ut.value_classify(df, 'agg_en_per', number=-5)\n return df\n\n\nif __name__ == '__main__':\n df = entro_add('D:\\\\multicities\\\\data\\\\深圳分区\\\\sz_10_acc_entro.shp',\n 'entr_0', 'entr_1', 'entr_2_1')\n ut.to_file(df, 'D:\\\\multicities\\\\data\\\\深圳分区\\\\sz_10_acc_entro_aggre.shp')\n",
"<import token>\n<docstring token>\n\n\ndef generate_access_index(dir_name='D:\\\\multicities\\\\data\\\\深圳分区\\\\可达性结算结果'):\n \"\"\"\n 存放可达性计算结果的文件夹\n 该文件夹下暂时只放置两个文件\n\n \"\"\"\n for each in ut.iter_shpfile(dir_name, ['.shp']):\n t = gpd.read_file('{}'.format(each))\n yield t['access_ind']\n\n\ndef write_accind_2_file(file_dir):\n \"\"\"\n 将可达性计算结果写入 file_dir 下的指定文件\n 返回geo pandas 格式文件\n \"\"\"\n i = 1\n t = gpd.read_file(file_dir)\n for each in generate_access_index():\n name = 'access_ind_{}'.format(i)\n t[name] = each\n i += 1\n return t\n\n\ndef sklearn_pca_cal(n_component=3, cal_df=None, is_z_stand=False, *args):\n \"\"\"\n 使用PCA 前必须对数据进行标准化\n pca.fit_transform(X)# 返回降维后的数据\n pca.components_ #返回因子载荷,横向查看相应载荷,实际为协方差矩阵特征值对应的特征向量\n pca.explained_variance_ratio_ #返回方差贡献率\n pca.explained_variance_ #返回特征值\n\n 注意: X*pca.components_ 结果等于 pca.fit_transform(X) 的输出结果\n\n cal_df: 进行pca计算的数据框\n is_z_stand: 这个参数不是简单的是否对输出结果进行z标准化,如果这个这个参数是False 此时计算的是直接对机会项进行PCA\n 计算赋权(对原始非标准化结果直接赋权),加和,百分化输出。 选择为True时 是对标准化后的可达性计算PCA, 赋权,加和,输出标准化后的\n 原始三种可达性值 与 最终可达性值(赋权加和后结果)。\n \"\"\"\n column_name_list = []\n output_column_name_list = []\n for each in args:\n assert isinstance(each, str)\n column_name_list.append(each)\n output_column_name_list.append(each + '_sta')\n assert n_component <= len(column_name_list)\n acc_file = cal_df\n X = acc_file[column_name_list]\n X = np.array(X)\n if is_z_stand == False:\n pca = PCA(n_components=n_component)\n NEWX = pca.fit_transform(X)\n variance_ratio = pca.explained_variance_ratio_\n t = NEWX * variance_ratio\n t = t.sum(axis=1)\n t = t / t.sum(axis=0)\n t = t.reshape(len(t), 1)\n results = np.concatenate((X, t), axis=1)\n pca_index = 'pca_en_per'\n output_column_name_list.append(pca_index)\n results_df = pd.DataFrame(data=results, columns=output_column_name_list\n )\n else:\n scaler = pp.StandardScaler()\n X_scaler = scaler.fit_transform(X)\n pca = PCA(n_components=n_component)\n NEWX = pca.fit_transform(X_scaler)\n variance_ratio = pca.explained_variance_ratio_\n t = NEWX * variance_ratio\n t = t.sum(axis=1)\n t = t.reshape(len(t), 1)\n results = np.concatenate((X, t), axis=1)\n pca_index = 'pca_in_per'\n output_column_name_list.append(pca_index)\n results_df = pd.DataFrame(data=results, columns=output_column_name_list\n )\n del results_df[output_column_name_list[0]]\n del results_df[output_column_name_list[1]]\n del results_df[output_column_name_list[2]]\n final_re_df = pd.concat([acc_file, results_df], axis=1)\n final_re_df['e_0_pe_le'] = ut.value_classify(final_re_df, 'entr_0_per',\n number=-5)\n final_re_df['e_1_pe_le'] = ut.value_classify(final_re_df, 'entr_1_per',\n number=-5)\n final_re_df['e_2_pe_le'] = ut.value_classify(final_re_df, 'entr_2_1_p',\n number=-5)\n final_re_df['pca_en_le'] = ut.value_classify(final_re_df, pca_index,\n number=-5)\n return final_re_df\n\n\ndef entro_add(shp_dir, *args):\n df = ut.read_file(shp_dir)\n df['aggre_en'] = 0\n for each in args:\n assert isinstance(each, str)\n df['aggre_en'] += df[each]\n df['agg_en_per'] = df['aggre_en'] / df['aggre_en'].sum()\n df['e_0_pe_le'] = ut.value_classify(df, 'entr_0_per', number=-5)\n df['e_1_pe_le'] = ut.value_classify(df, 'entr_1_per', number=-5)\n df['e_2_pe_le'] = ut.value_classify(df, 'entr_2_1_p', number=-5)\n df['agg_en_le'] = ut.value_classify(df, 'agg_en_per', number=-5)\n return df\n\n\nif __name__ == '__main__':\n df = entro_add('D:\\\\multicities\\\\data\\\\深圳分区\\\\sz_10_acc_entro.shp',\n 'entr_0', 'entr_1', 'entr_2_1')\n ut.to_file(df, 'D:\\\\multicities\\\\data\\\\深圳分区\\\\sz_10_acc_entro_aggre.shp')\n",
"<import token>\n<docstring token>\n\n\ndef generate_access_index(dir_name='D:\\\\multicities\\\\data\\\\深圳分区\\\\可达性结算结果'):\n \"\"\"\n 存放可达性计算结果的文件夹\n 该文件夹下暂时只放置两个文件\n\n \"\"\"\n for each in ut.iter_shpfile(dir_name, ['.shp']):\n t = gpd.read_file('{}'.format(each))\n yield t['access_ind']\n\n\ndef write_accind_2_file(file_dir):\n \"\"\"\n 将可达性计算结果写入 file_dir 下的指定文件\n 返回geo pandas 格式文件\n \"\"\"\n i = 1\n t = gpd.read_file(file_dir)\n for each in generate_access_index():\n name = 'access_ind_{}'.format(i)\n t[name] = each\n i += 1\n return t\n\n\ndef sklearn_pca_cal(n_component=3, cal_df=None, is_z_stand=False, *args):\n \"\"\"\n 使用PCA 前必须对数据进行标准化\n pca.fit_transform(X)# 返回降维后的数据\n pca.components_ #返回因子载荷,横向查看相应载荷,实际为协方差矩阵特征值对应的特征向量\n pca.explained_variance_ratio_ #返回方差贡献率\n pca.explained_variance_ #返回特征值\n\n 注意: X*pca.components_ 结果等于 pca.fit_transform(X) 的输出结果\n\n cal_df: 进行pca计算的数据框\n is_z_stand: 这个参数不是简单的是否对输出结果进行z标准化,如果这个这个参数是False 此时计算的是直接对机会项进行PCA\n 计算赋权(对原始非标准化结果直接赋权),加和,百分化输出。 选择为True时 是对标准化后的可达性计算PCA, 赋权,加和,输出标准化后的\n 原始三种可达性值 与 最终可达性值(赋权加和后结果)。\n \"\"\"\n column_name_list = []\n output_column_name_list = []\n for each in args:\n assert isinstance(each, str)\n column_name_list.append(each)\n output_column_name_list.append(each + '_sta')\n assert n_component <= len(column_name_list)\n acc_file = cal_df\n X = acc_file[column_name_list]\n X = np.array(X)\n if is_z_stand == False:\n pca = PCA(n_components=n_component)\n NEWX = pca.fit_transform(X)\n variance_ratio = pca.explained_variance_ratio_\n t = NEWX * variance_ratio\n t = t.sum(axis=1)\n t = t / t.sum(axis=0)\n t = t.reshape(len(t), 1)\n results = np.concatenate((X, t), axis=1)\n pca_index = 'pca_en_per'\n output_column_name_list.append(pca_index)\n results_df = pd.DataFrame(data=results, columns=output_column_name_list\n )\n else:\n scaler = pp.StandardScaler()\n X_scaler = scaler.fit_transform(X)\n pca = PCA(n_components=n_component)\n NEWX = pca.fit_transform(X_scaler)\n variance_ratio = pca.explained_variance_ratio_\n t = NEWX * variance_ratio\n t = t.sum(axis=1)\n t = t.reshape(len(t), 1)\n results = np.concatenate((X, t), axis=1)\n pca_index = 'pca_in_per'\n output_column_name_list.append(pca_index)\n results_df = pd.DataFrame(data=results, columns=output_column_name_list\n )\n del results_df[output_column_name_list[0]]\n del results_df[output_column_name_list[1]]\n del results_df[output_column_name_list[2]]\n final_re_df = pd.concat([acc_file, results_df], axis=1)\n final_re_df['e_0_pe_le'] = ut.value_classify(final_re_df, 'entr_0_per',\n number=-5)\n final_re_df['e_1_pe_le'] = ut.value_classify(final_re_df, 'entr_1_per',\n number=-5)\n final_re_df['e_2_pe_le'] = ut.value_classify(final_re_df, 'entr_2_1_p',\n number=-5)\n final_re_df['pca_en_le'] = ut.value_classify(final_re_df, pca_index,\n number=-5)\n return final_re_df\n\n\ndef entro_add(shp_dir, *args):\n df = ut.read_file(shp_dir)\n df['aggre_en'] = 0\n for each in args:\n assert isinstance(each, str)\n df['aggre_en'] += df[each]\n df['agg_en_per'] = df['aggre_en'] / df['aggre_en'].sum()\n df['e_0_pe_le'] = ut.value_classify(df, 'entr_0_per', number=-5)\n df['e_1_pe_le'] = ut.value_classify(df, 'entr_1_per', number=-5)\n df['e_2_pe_le'] = ut.value_classify(df, 'entr_2_1_p', number=-5)\n df['agg_en_le'] = ut.value_classify(df, 'agg_en_per', number=-5)\n return df\n\n\n<code token>\n",
"<import token>\n<docstring token>\n\n\ndef generate_access_index(dir_name='D:\\\\multicities\\\\data\\\\深圳分区\\\\可达性结算结果'):\n \"\"\"\n 存放可达性计算结果的文件夹\n 该文件夹下暂时只放置两个文件\n\n \"\"\"\n for each in ut.iter_shpfile(dir_name, ['.shp']):\n t = gpd.read_file('{}'.format(each))\n yield t['access_ind']\n\n\ndef write_accind_2_file(file_dir):\n \"\"\"\n 将可达性计算结果写入 file_dir 下的指定文件\n 返回geo pandas 格式文件\n \"\"\"\n i = 1\n t = gpd.read_file(file_dir)\n for each in generate_access_index():\n name = 'access_ind_{}'.format(i)\n t[name] = each\n i += 1\n return t\n\n\n<function token>\n\n\ndef entro_add(shp_dir, *args):\n df = ut.read_file(shp_dir)\n df['aggre_en'] = 0\n for each in args:\n assert isinstance(each, str)\n df['aggre_en'] += df[each]\n df['agg_en_per'] = df['aggre_en'] / df['aggre_en'].sum()\n df['e_0_pe_le'] = ut.value_classify(df, 'entr_0_per', number=-5)\n df['e_1_pe_le'] = ut.value_classify(df, 'entr_1_per', number=-5)\n df['e_2_pe_le'] = ut.value_classify(df, 'entr_2_1_p', number=-5)\n df['agg_en_le'] = ut.value_classify(df, 'agg_en_per', number=-5)\n return df\n\n\n<code token>\n",
"<import token>\n<docstring token>\n\n\ndef generate_access_index(dir_name='D:\\\\multicities\\\\data\\\\深圳分区\\\\可达性结算结果'):\n \"\"\"\n 存放可达性计算结果的文件夹\n 该文件夹下暂时只放置两个文件\n\n \"\"\"\n for each in ut.iter_shpfile(dir_name, ['.shp']):\n t = gpd.read_file('{}'.format(each))\n yield t['access_ind']\n\n\ndef write_accind_2_file(file_dir):\n \"\"\"\n 将可达性计算结果写入 file_dir 下的指定文件\n 返回geo pandas 格式文件\n \"\"\"\n i = 1\n t = gpd.read_file(file_dir)\n for each in generate_access_index():\n name = 'access_ind_{}'.format(i)\n t[name] = each\n i += 1\n return t\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<docstring token>\n\n\ndef generate_access_index(dir_name='D:\\\\multicities\\\\data\\\\深圳分区\\\\可达性结算结果'):\n \"\"\"\n 存放可达性计算结果的文件夹\n 该文件夹下暂时只放置两个文件\n\n \"\"\"\n for each in ut.iter_shpfile(dir_name, ['.shp']):\n t = gpd.read_file('{}'.format(each))\n yield t['access_ind']\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,865 |
1ac1105abd0927322a6c7d8224f0948be45674ad
|
import chainer
from chainer import functions as F
import chainer.links as L
import sys
import os
from chainer_chemistry.models import GGNN
from chainer_chemistry.models import NFP
from chainer_chemistry.models import SchNet
from chainer_chemistry.models import WeaveNet
sys.path.append(os.path.dirname(__file__))
from models.nfp_drop import NFPDrop
from models.ggnn_drop import GGNNDrop
class MLPDrop(chainer.Chain):
"""Basic implementation for MLP with dropout"""
# def __init__(self, hidden_dim, out_dim, n_layers=2, activation=F.relu):
def __init__(self, out_dim, hidden_dim, n_layers=1, activation=F.relu,
dropout_ratio=0.25):
super(MLPDrop, self).__init__()
if n_layers <= 0:
raise ValueError('n_layers must be positive integer, but set {}'
.format(n_layers))
layers = [L.Linear(None, hidden_dim) for i in range(n_layers - 1)]
with self.init_scope():
self.layers = chainer.ChainList(*layers)
self.l_out = L.Linear(None, out_dim)
self.activation = activation
self.dropout_ratio = dropout_ratio
def __call__(self, x):
h = F.dropout(x, ratio=self.dropout_ratio)
for l in self.layers:
h = F.dropout(self.activation(l(h)), ratio=self.dropout_ratio)
h = self.l_out(h)
return h
def build_predictor(method, n_unit, conv_layers, class_num,
dropout_ratio=0.25, n_layers=1):
print('dropout_ratio, n_layers', dropout_ratio, n_layers)
mlp_class = MLPDrop
if method == 'nfp':
print('Use NFP predictor...')
predictor = GraphConvPredictor(
NFP(out_dim=n_unit, hidden_dim=n_unit, n_layers=conv_layers),
mlp_class(out_dim=class_num, hidden_dim=n_unit, dropout_ratio=dropout_ratio,
n_layers=n_layers))
elif method == 'nfpdrop':
print('Use NFPDrop predictor...')
predictor = GraphConvPredictor(
NFPDrop(out_dim=n_unit, hidden_dim=n_unit, n_layers=conv_layers,
dropout_ratio=dropout_ratio),
mlp_class(out_dim=class_num, hidden_dim=n_unit,
dropout_ratio=dropout_ratio,
n_layers=n_layers))
elif method == 'ggnn':
print('Use GGNN predictor...')
predictor = GraphConvPredictor(
GGNN(out_dim=n_unit, hidden_dim=n_unit, n_layers=conv_layers),
mlp_class(out_dim=class_num, hidden_dim=n_unit,
dropout_ratio=dropout_ratio, n_layers=n_layers))
elif method == 'ggnndrop':
print('Use GGNNDrop predictor...')
predictor = GraphConvPredictor(
GGNNDrop(out_dim=n_unit, hidden_dim=n_unit, n_layers=conv_layers,
dropout_ratio=dropout_ratio),
mlp_class(out_dim=class_num, hidden_dim=n_unit,
dropout_ratio=dropout_ratio, n_layers=n_layers))
elif method == 'schnet':
print('Use SchNet predictor...')
predictor = SchNet(out_dim=class_num, hidden_dim=n_unit,
n_layers=conv_layers, readout_hidden_dim=n_unit)
elif method == 'weavenet':
print('Use WeaveNet predictor...')
n_atom = 20
n_sub_layer = 1
weave_channels = [50] * conv_layers
predictor = GraphConvPredictor(
WeaveNet(weave_channels=weave_channels, hidden_dim=n_unit,
n_sub_layer=n_sub_layer, n_atom=n_atom),
mlp_class(out_dim=class_num, hidden_dim=n_unit,
dropout_ratio=dropout_ratio, n_layers=n_layers))
else:
raise ValueError('[ERROR] Invalid predictor: method={}'.format(method))
return predictor
class GraphConvPredictor(chainer.Chain):
"""Wrapper class that combines a graph convolution and MLP."""
def __init__(self, graph_conv, mlp):
"""Constructor
Args:
graph_conv: graph convolution network to obtain molecule feature
representation
mlp: multi layer perceptron, used as final connected layer
"""
super(GraphConvPredictor, self).__init__()
with self.init_scope():
self.graph_conv = graph_conv
self.mlp = mlp
def __call__(self, atoms, adjs):
x = self.graph_conv(atoms, adjs)
x = self.mlp(x)
return x
def predict(self, atoms, adjs):
with chainer.no_backprop_mode(), chainer.using_config('train', False):
x = self.__call__(atoms, adjs)
return F.sigmoid(x)
|
[
"import chainer\nfrom chainer import functions as F\nimport chainer.links as L\nimport sys\nimport os\n\nfrom chainer_chemistry.models import GGNN\nfrom chainer_chemistry.models import NFP\nfrom chainer_chemistry.models import SchNet\nfrom chainer_chemistry.models import WeaveNet\n\nsys.path.append(os.path.dirname(__file__))\nfrom models.nfp_drop import NFPDrop\nfrom models.ggnn_drop import GGNNDrop\n\n\nclass MLPDrop(chainer.Chain):\n \"\"\"Basic implementation for MLP with dropout\"\"\"\n # def __init__(self, hidden_dim, out_dim, n_layers=2, activation=F.relu):\n def __init__(self, out_dim, hidden_dim, n_layers=1, activation=F.relu,\n dropout_ratio=0.25):\n super(MLPDrop, self).__init__()\n if n_layers <= 0:\n raise ValueError('n_layers must be positive integer, but set {}'\n .format(n_layers))\n layers = [L.Linear(None, hidden_dim) for i in range(n_layers - 1)]\n with self.init_scope():\n self.layers = chainer.ChainList(*layers)\n self.l_out = L.Linear(None, out_dim)\n self.activation = activation\n self.dropout_ratio = dropout_ratio\n\n def __call__(self, x):\n h = F.dropout(x, ratio=self.dropout_ratio)\n for l in self.layers:\n h = F.dropout(self.activation(l(h)), ratio=self.dropout_ratio)\n h = self.l_out(h)\n return h\n\n\ndef build_predictor(method, n_unit, conv_layers, class_num,\n dropout_ratio=0.25, n_layers=1):\n print('dropout_ratio, n_layers', dropout_ratio, n_layers)\n mlp_class = MLPDrop\n if method == 'nfp':\n print('Use NFP predictor...')\n predictor = GraphConvPredictor(\n NFP(out_dim=n_unit, hidden_dim=n_unit, n_layers=conv_layers),\n mlp_class(out_dim=class_num, hidden_dim=n_unit, dropout_ratio=dropout_ratio,\n n_layers=n_layers))\n elif method == 'nfpdrop':\n print('Use NFPDrop predictor...')\n predictor = GraphConvPredictor(\n NFPDrop(out_dim=n_unit, hidden_dim=n_unit, n_layers=conv_layers,\n dropout_ratio=dropout_ratio),\n mlp_class(out_dim=class_num, hidden_dim=n_unit,\n dropout_ratio=dropout_ratio,\n n_layers=n_layers))\n elif method == 'ggnn':\n print('Use GGNN predictor...')\n predictor = GraphConvPredictor(\n GGNN(out_dim=n_unit, hidden_dim=n_unit, n_layers=conv_layers),\n mlp_class(out_dim=class_num, hidden_dim=n_unit,\n dropout_ratio=dropout_ratio, n_layers=n_layers))\n elif method == 'ggnndrop':\n print('Use GGNNDrop predictor...')\n predictor = GraphConvPredictor(\n GGNNDrop(out_dim=n_unit, hidden_dim=n_unit, n_layers=conv_layers,\n dropout_ratio=dropout_ratio),\n mlp_class(out_dim=class_num, hidden_dim=n_unit,\n dropout_ratio=dropout_ratio, n_layers=n_layers))\n elif method == 'schnet':\n print('Use SchNet predictor...')\n predictor = SchNet(out_dim=class_num, hidden_dim=n_unit,\n n_layers=conv_layers, readout_hidden_dim=n_unit)\n elif method == 'weavenet':\n print('Use WeaveNet predictor...')\n n_atom = 20\n n_sub_layer = 1\n weave_channels = [50] * conv_layers\n predictor = GraphConvPredictor(\n WeaveNet(weave_channels=weave_channels, hidden_dim=n_unit,\n n_sub_layer=n_sub_layer, n_atom=n_atom),\n mlp_class(out_dim=class_num, hidden_dim=n_unit,\n dropout_ratio=dropout_ratio, n_layers=n_layers))\n else:\n raise ValueError('[ERROR] Invalid predictor: method={}'.format(method))\n return predictor\n\n\nclass GraphConvPredictor(chainer.Chain):\n \"\"\"Wrapper class that combines a graph convolution and MLP.\"\"\"\n\n def __init__(self, graph_conv, mlp):\n \"\"\"Constructor\n\n Args:\n graph_conv: graph convolution network to obtain molecule feature\n representation\n mlp: multi layer perceptron, used as final connected layer\n \"\"\"\n\n super(GraphConvPredictor, self).__init__()\n with self.init_scope():\n self.graph_conv = graph_conv\n self.mlp = mlp\n\n def __call__(self, atoms, adjs):\n x = self.graph_conv(atoms, adjs)\n x = self.mlp(x)\n return x\n\n def predict(self, atoms, adjs):\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n x = self.__call__(atoms, adjs)\n return F.sigmoid(x)\n",
"import chainer\nfrom chainer import functions as F\nimport chainer.links as L\nimport sys\nimport os\nfrom chainer_chemistry.models import GGNN\nfrom chainer_chemistry.models import NFP\nfrom chainer_chemistry.models import SchNet\nfrom chainer_chemistry.models import WeaveNet\nsys.path.append(os.path.dirname(__file__))\nfrom models.nfp_drop import NFPDrop\nfrom models.ggnn_drop import GGNNDrop\n\n\nclass MLPDrop(chainer.Chain):\n \"\"\"Basic implementation for MLP with dropout\"\"\"\n\n def __init__(self, out_dim, hidden_dim, n_layers=1, activation=F.relu,\n dropout_ratio=0.25):\n super(MLPDrop, self).__init__()\n if n_layers <= 0:\n raise ValueError('n_layers must be positive integer, but set {}'\n .format(n_layers))\n layers = [L.Linear(None, hidden_dim) for i in range(n_layers - 1)]\n with self.init_scope():\n self.layers = chainer.ChainList(*layers)\n self.l_out = L.Linear(None, out_dim)\n self.activation = activation\n self.dropout_ratio = dropout_ratio\n\n def __call__(self, x):\n h = F.dropout(x, ratio=self.dropout_ratio)\n for l in self.layers:\n h = F.dropout(self.activation(l(h)), ratio=self.dropout_ratio)\n h = self.l_out(h)\n return h\n\n\ndef build_predictor(method, n_unit, conv_layers, class_num, dropout_ratio=\n 0.25, n_layers=1):\n print('dropout_ratio, n_layers', dropout_ratio, n_layers)\n mlp_class = MLPDrop\n if method == 'nfp':\n print('Use NFP predictor...')\n predictor = GraphConvPredictor(NFP(out_dim=n_unit, hidden_dim=\n n_unit, n_layers=conv_layers), mlp_class(out_dim=class_num,\n hidden_dim=n_unit, dropout_ratio=dropout_ratio, n_layers=n_layers))\n elif method == 'nfpdrop':\n print('Use NFPDrop predictor...')\n predictor = GraphConvPredictor(NFPDrop(out_dim=n_unit, hidden_dim=\n n_unit, n_layers=conv_layers, dropout_ratio=dropout_ratio),\n mlp_class(out_dim=class_num, hidden_dim=n_unit, dropout_ratio=\n dropout_ratio, n_layers=n_layers))\n elif method == 'ggnn':\n print('Use GGNN predictor...')\n predictor = GraphConvPredictor(GGNN(out_dim=n_unit, hidden_dim=\n n_unit, n_layers=conv_layers), mlp_class(out_dim=class_num,\n hidden_dim=n_unit, dropout_ratio=dropout_ratio, n_layers=n_layers))\n elif method == 'ggnndrop':\n print('Use GGNNDrop predictor...')\n predictor = GraphConvPredictor(GGNNDrop(out_dim=n_unit, hidden_dim=\n n_unit, n_layers=conv_layers, dropout_ratio=dropout_ratio),\n mlp_class(out_dim=class_num, hidden_dim=n_unit, dropout_ratio=\n dropout_ratio, n_layers=n_layers))\n elif method == 'schnet':\n print('Use SchNet predictor...')\n predictor = SchNet(out_dim=class_num, hidden_dim=n_unit, n_layers=\n conv_layers, readout_hidden_dim=n_unit)\n elif method == 'weavenet':\n print('Use WeaveNet predictor...')\n n_atom = 20\n n_sub_layer = 1\n weave_channels = [50] * conv_layers\n predictor = GraphConvPredictor(WeaveNet(weave_channels=\n weave_channels, hidden_dim=n_unit, n_sub_layer=n_sub_layer,\n n_atom=n_atom), mlp_class(out_dim=class_num, hidden_dim=n_unit,\n dropout_ratio=dropout_ratio, n_layers=n_layers))\n else:\n raise ValueError('[ERROR] Invalid predictor: method={}'.format(method))\n return predictor\n\n\nclass GraphConvPredictor(chainer.Chain):\n \"\"\"Wrapper class that combines a graph convolution and MLP.\"\"\"\n\n def __init__(self, graph_conv, mlp):\n \"\"\"Constructor\n\n Args:\n graph_conv: graph convolution network to obtain molecule feature\n representation\n mlp: multi layer perceptron, used as final connected layer\n \"\"\"\n super(GraphConvPredictor, self).__init__()\n with self.init_scope():\n self.graph_conv = graph_conv\n self.mlp = mlp\n\n def __call__(self, atoms, adjs):\n x = self.graph_conv(atoms, adjs)\n x = self.mlp(x)\n return x\n\n def predict(self, atoms, adjs):\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n x = self.__call__(atoms, adjs)\n return F.sigmoid(x)\n",
"<import token>\nsys.path.append(os.path.dirname(__file__))\n<import token>\n\n\nclass MLPDrop(chainer.Chain):\n \"\"\"Basic implementation for MLP with dropout\"\"\"\n\n def __init__(self, out_dim, hidden_dim, n_layers=1, activation=F.relu,\n dropout_ratio=0.25):\n super(MLPDrop, self).__init__()\n if n_layers <= 0:\n raise ValueError('n_layers must be positive integer, but set {}'\n .format(n_layers))\n layers = [L.Linear(None, hidden_dim) for i in range(n_layers - 1)]\n with self.init_scope():\n self.layers = chainer.ChainList(*layers)\n self.l_out = L.Linear(None, out_dim)\n self.activation = activation\n self.dropout_ratio = dropout_ratio\n\n def __call__(self, x):\n h = F.dropout(x, ratio=self.dropout_ratio)\n for l in self.layers:\n h = F.dropout(self.activation(l(h)), ratio=self.dropout_ratio)\n h = self.l_out(h)\n return h\n\n\ndef build_predictor(method, n_unit, conv_layers, class_num, dropout_ratio=\n 0.25, n_layers=1):\n print('dropout_ratio, n_layers', dropout_ratio, n_layers)\n mlp_class = MLPDrop\n if method == 'nfp':\n print('Use NFP predictor...')\n predictor = GraphConvPredictor(NFP(out_dim=n_unit, hidden_dim=\n n_unit, n_layers=conv_layers), mlp_class(out_dim=class_num,\n hidden_dim=n_unit, dropout_ratio=dropout_ratio, n_layers=n_layers))\n elif method == 'nfpdrop':\n print('Use NFPDrop predictor...')\n predictor = GraphConvPredictor(NFPDrop(out_dim=n_unit, hidden_dim=\n n_unit, n_layers=conv_layers, dropout_ratio=dropout_ratio),\n mlp_class(out_dim=class_num, hidden_dim=n_unit, dropout_ratio=\n dropout_ratio, n_layers=n_layers))\n elif method == 'ggnn':\n print('Use GGNN predictor...')\n predictor = GraphConvPredictor(GGNN(out_dim=n_unit, hidden_dim=\n n_unit, n_layers=conv_layers), mlp_class(out_dim=class_num,\n hidden_dim=n_unit, dropout_ratio=dropout_ratio, n_layers=n_layers))\n elif method == 'ggnndrop':\n print('Use GGNNDrop predictor...')\n predictor = GraphConvPredictor(GGNNDrop(out_dim=n_unit, hidden_dim=\n n_unit, n_layers=conv_layers, dropout_ratio=dropout_ratio),\n mlp_class(out_dim=class_num, hidden_dim=n_unit, dropout_ratio=\n dropout_ratio, n_layers=n_layers))\n elif method == 'schnet':\n print('Use SchNet predictor...')\n predictor = SchNet(out_dim=class_num, hidden_dim=n_unit, n_layers=\n conv_layers, readout_hidden_dim=n_unit)\n elif method == 'weavenet':\n print('Use WeaveNet predictor...')\n n_atom = 20\n n_sub_layer = 1\n weave_channels = [50] * conv_layers\n predictor = GraphConvPredictor(WeaveNet(weave_channels=\n weave_channels, hidden_dim=n_unit, n_sub_layer=n_sub_layer,\n n_atom=n_atom), mlp_class(out_dim=class_num, hidden_dim=n_unit,\n dropout_ratio=dropout_ratio, n_layers=n_layers))\n else:\n raise ValueError('[ERROR] Invalid predictor: method={}'.format(method))\n return predictor\n\n\nclass GraphConvPredictor(chainer.Chain):\n \"\"\"Wrapper class that combines a graph convolution and MLP.\"\"\"\n\n def __init__(self, graph_conv, mlp):\n \"\"\"Constructor\n\n Args:\n graph_conv: graph convolution network to obtain molecule feature\n representation\n mlp: multi layer perceptron, used as final connected layer\n \"\"\"\n super(GraphConvPredictor, self).__init__()\n with self.init_scope():\n self.graph_conv = graph_conv\n self.mlp = mlp\n\n def __call__(self, atoms, adjs):\n x = self.graph_conv(atoms, adjs)\n x = self.mlp(x)\n return x\n\n def predict(self, atoms, adjs):\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n x = self.__call__(atoms, adjs)\n return F.sigmoid(x)\n",
"<import token>\n<code token>\n<import token>\n\n\nclass MLPDrop(chainer.Chain):\n \"\"\"Basic implementation for MLP with dropout\"\"\"\n\n def __init__(self, out_dim, hidden_dim, n_layers=1, activation=F.relu,\n dropout_ratio=0.25):\n super(MLPDrop, self).__init__()\n if n_layers <= 0:\n raise ValueError('n_layers must be positive integer, but set {}'\n .format(n_layers))\n layers = [L.Linear(None, hidden_dim) for i in range(n_layers - 1)]\n with self.init_scope():\n self.layers = chainer.ChainList(*layers)\n self.l_out = L.Linear(None, out_dim)\n self.activation = activation\n self.dropout_ratio = dropout_ratio\n\n def __call__(self, x):\n h = F.dropout(x, ratio=self.dropout_ratio)\n for l in self.layers:\n h = F.dropout(self.activation(l(h)), ratio=self.dropout_ratio)\n h = self.l_out(h)\n return h\n\n\ndef build_predictor(method, n_unit, conv_layers, class_num, dropout_ratio=\n 0.25, n_layers=1):\n print('dropout_ratio, n_layers', dropout_ratio, n_layers)\n mlp_class = MLPDrop\n if method == 'nfp':\n print('Use NFP predictor...')\n predictor = GraphConvPredictor(NFP(out_dim=n_unit, hidden_dim=\n n_unit, n_layers=conv_layers), mlp_class(out_dim=class_num,\n hidden_dim=n_unit, dropout_ratio=dropout_ratio, n_layers=n_layers))\n elif method == 'nfpdrop':\n print('Use NFPDrop predictor...')\n predictor = GraphConvPredictor(NFPDrop(out_dim=n_unit, hidden_dim=\n n_unit, n_layers=conv_layers, dropout_ratio=dropout_ratio),\n mlp_class(out_dim=class_num, hidden_dim=n_unit, dropout_ratio=\n dropout_ratio, n_layers=n_layers))\n elif method == 'ggnn':\n print('Use GGNN predictor...')\n predictor = GraphConvPredictor(GGNN(out_dim=n_unit, hidden_dim=\n n_unit, n_layers=conv_layers), mlp_class(out_dim=class_num,\n hidden_dim=n_unit, dropout_ratio=dropout_ratio, n_layers=n_layers))\n elif method == 'ggnndrop':\n print('Use GGNNDrop predictor...')\n predictor = GraphConvPredictor(GGNNDrop(out_dim=n_unit, hidden_dim=\n n_unit, n_layers=conv_layers, dropout_ratio=dropout_ratio),\n mlp_class(out_dim=class_num, hidden_dim=n_unit, dropout_ratio=\n dropout_ratio, n_layers=n_layers))\n elif method == 'schnet':\n print('Use SchNet predictor...')\n predictor = SchNet(out_dim=class_num, hidden_dim=n_unit, n_layers=\n conv_layers, readout_hidden_dim=n_unit)\n elif method == 'weavenet':\n print('Use WeaveNet predictor...')\n n_atom = 20\n n_sub_layer = 1\n weave_channels = [50] * conv_layers\n predictor = GraphConvPredictor(WeaveNet(weave_channels=\n weave_channels, hidden_dim=n_unit, n_sub_layer=n_sub_layer,\n n_atom=n_atom), mlp_class(out_dim=class_num, hidden_dim=n_unit,\n dropout_ratio=dropout_ratio, n_layers=n_layers))\n else:\n raise ValueError('[ERROR] Invalid predictor: method={}'.format(method))\n return predictor\n\n\nclass GraphConvPredictor(chainer.Chain):\n \"\"\"Wrapper class that combines a graph convolution and MLP.\"\"\"\n\n def __init__(self, graph_conv, mlp):\n \"\"\"Constructor\n\n Args:\n graph_conv: graph convolution network to obtain molecule feature\n representation\n mlp: multi layer perceptron, used as final connected layer\n \"\"\"\n super(GraphConvPredictor, self).__init__()\n with self.init_scope():\n self.graph_conv = graph_conv\n self.mlp = mlp\n\n def __call__(self, atoms, adjs):\n x = self.graph_conv(atoms, adjs)\n x = self.mlp(x)\n return x\n\n def predict(self, atoms, adjs):\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n x = self.__call__(atoms, adjs)\n return F.sigmoid(x)\n",
"<import token>\n<code token>\n<import token>\n\n\nclass MLPDrop(chainer.Chain):\n \"\"\"Basic implementation for MLP with dropout\"\"\"\n\n def __init__(self, out_dim, hidden_dim, n_layers=1, activation=F.relu,\n dropout_ratio=0.25):\n super(MLPDrop, self).__init__()\n if n_layers <= 0:\n raise ValueError('n_layers must be positive integer, but set {}'\n .format(n_layers))\n layers = [L.Linear(None, hidden_dim) for i in range(n_layers - 1)]\n with self.init_scope():\n self.layers = chainer.ChainList(*layers)\n self.l_out = L.Linear(None, out_dim)\n self.activation = activation\n self.dropout_ratio = dropout_ratio\n\n def __call__(self, x):\n h = F.dropout(x, ratio=self.dropout_ratio)\n for l in self.layers:\n h = F.dropout(self.activation(l(h)), ratio=self.dropout_ratio)\n h = self.l_out(h)\n return h\n\n\n<function token>\n\n\nclass GraphConvPredictor(chainer.Chain):\n \"\"\"Wrapper class that combines a graph convolution and MLP.\"\"\"\n\n def __init__(self, graph_conv, mlp):\n \"\"\"Constructor\n\n Args:\n graph_conv: graph convolution network to obtain molecule feature\n representation\n mlp: multi layer perceptron, used as final connected layer\n \"\"\"\n super(GraphConvPredictor, self).__init__()\n with self.init_scope():\n self.graph_conv = graph_conv\n self.mlp = mlp\n\n def __call__(self, atoms, adjs):\n x = self.graph_conv(atoms, adjs)\n x = self.mlp(x)\n return x\n\n def predict(self, atoms, adjs):\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n x = self.__call__(atoms, adjs)\n return F.sigmoid(x)\n",
"<import token>\n<code token>\n<import token>\n\n\nclass MLPDrop(chainer.Chain):\n <docstring token>\n\n def __init__(self, out_dim, hidden_dim, n_layers=1, activation=F.relu,\n dropout_ratio=0.25):\n super(MLPDrop, self).__init__()\n if n_layers <= 0:\n raise ValueError('n_layers must be positive integer, but set {}'\n .format(n_layers))\n layers = [L.Linear(None, hidden_dim) for i in range(n_layers - 1)]\n with self.init_scope():\n self.layers = chainer.ChainList(*layers)\n self.l_out = L.Linear(None, out_dim)\n self.activation = activation\n self.dropout_ratio = dropout_ratio\n\n def __call__(self, x):\n h = F.dropout(x, ratio=self.dropout_ratio)\n for l in self.layers:\n h = F.dropout(self.activation(l(h)), ratio=self.dropout_ratio)\n h = self.l_out(h)\n return h\n\n\n<function token>\n\n\nclass GraphConvPredictor(chainer.Chain):\n \"\"\"Wrapper class that combines a graph convolution and MLP.\"\"\"\n\n def __init__(self, graph_conv, mlp):\n \"\"\"Constructor\n\n Args:\n graph_conv: graph convolution network to obtain molecule feature\n representation\n mlp: multi layer perceptron, used as final connected layer\n \"\"\"\n super(GraphConvPredictor, self).__init__()\n with self.init_scope():\n self.graph_conv = graph_conv\n self.mlp = mlp\n\n def __call__(self, atoms, adjs):\n x = self.graph_conv(atoms, adjs)\n x = self.mlp(x)\n return x\n\n def predict(self, atoms, adjs):\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n x = self.__call__(atoms, adjs)\n return F.sigmoid(x)\n",
"<import token>\n<code token>\n<import token>\n\n\nclass MLPDrop(chainer.Chain):\n <docstring token>\n\n def __init__(self, out_dim, hidden_dim, n_layers=1, activation=F.relu,\n dropout_ratio=0.25):\n super(MLPDrop, self).__init__()\n if n_layers <= 0:\n raise ValueError('n_layers must be positive integer, but set {}'\n .format(n_layers))\n layers = [L.Linear(None, hidden_dim) for i in range(n_layers - 1)]\n with self.init_scope():\n self.layers = chainer.ChainList(*layers)\n self.l_out = L.Linear(None, out_dim)\n self.activation = activation\n self.dropout_ratio = dropout_ratio\n <function token>\n\n\n<function token>\n\n\nclass GraphConvPredictor(chainer.Chain):\n \"\"\"Wrapper class that combines a graph convolution and MLP.\"\"\"\n\n def __init__(self, graph_conv, mlp):\n \"\"\"Constructor\n\n Args:\n graph_conv: graph convolution network to obtain molecule feature\n representation\n mlp: multi layer perceptron, used as final connected layer\n \"\"\"\n super(GraphConvPredictor, self).__init__()\n with self.init_scope():\n self.graph_conv = graph_conv\n self.mlp = mlp\n\n def __call__(self, atoms, adjs):\n x = self.graph_conv(atoms, adjs)\n x = self.mlp(x)\n return x\n\n def predict(self, atoms, adjs):\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n x = self.__call__(atoms, adjs)\n return F.sigmoid(x)\n",
"<import token>\n<code token>\n<import token>\n\n\nclass MLPDrop(chainer.Chain):\n <docstring token>\n <function token>\n <function token>\n\n\n<function token>\n\n\nclass GraphConvPredictor(chainer.Chain):\n \"\"\"Wrapper class that combines a graph convolution and MLP.\"\"\"\n\n def __init__(self, graph_conv, mlp):\n \"\"\"Constructor\n\n Args:\n graph_conv: graph convolution network to obtain molecule feature\n representation\n mlp: multi layer perceptron, used as final connected layer\n \"\"\"\n super(GraphConvPredictor, self).__init__()\n with self.init_scope():\n self.graph_conv = graph_conv\n self.mlp = mlp\n\n def __call__(self, atoms, adjs):\n x = self.graph_conv(atoms, adjs)\n x = self.mlp(x)\n return x\n\n def predict(self, atoms, adjs):\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n x = self.__call__(atoms, adjs)\n return F.sigmoid(x)\n",
"<import token>\n<code token>\n<import token>\n<class token>\n<function token>\n\n\nclass GraphConvPredictor(chainer.Chain):\n \"\"\"Wrapper class that combines a graph convolution and MLP.\"\"\"\n\n def __init__(self, graph_conv, mlp):\n \"\"\"Constructor\n\n Args:\n graph_conv: graph convolution network to obtain molecule feature\n representation\n mlp: multi layer perceptron, used as final connected layer\n \"\"\"\n super(GraphConvPredictor, self).__init__()\n with self.init_scope():\n self.graph_conv = graph_conv\n self.mlp = mlp\n\n def __call__(self, atoms, adjs):\n x = self.graph_conv(atoms, adjs)\n x = self.mlp(x)\n return x\n\n def predict(self, atoms, adjs):\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n x = self.__call__(atoms, adjs)\n return F.sigmoid(x)\n",
"<import token>\n<code token>\n<import token>\n<class token>\n<function token>\n\n\nclass GraphConvPredictor(chainer.Chain):\n <docstring token>\n\n def __init__(self, graph_conv, mlp):\n \"\"\"Constructor\n\n Args:\n graph_conv: graph convolution network to obtain molecule feature\n representation\n mlp: multi layer perceptron, used as final connected layer\n \"\"\"\n super(GraphConvPredictor, self).__init__()\n with self.init_scope():\n self.graph_conv = graph_conv\n self.mlp = mlp\n\n def __call__(self, atoms, adjs):\n x = self.graph_conv(atoms, adjs)\n x = self.mlp(x)\n return x\n\n def predict(self, atoms, adjs):\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n x = self.__call__(atoms, adjs)\n return F.sigmoid(x)\n",
"<import token>\n<code token>\n<import token>\n<class token>\n<function token>\n\n\nclass GraphConvPredictor(chainer.Chain):\n <docstring token>\n\n def __init__(self, graph_conv, mlp):\n \"\"\"Constructor\n\n Args:\n graph_conv: graph convolution network to obtain molecule feature\n representation\n mlp: multi layer perceptron, used as final connected layer\n \"\"\"\n super(GraphConvPredictor, self).__init__()\n with self.init_scope():\n self.graph_conv = graph_conv\n self.mlp = mlp\n <function token>\n\n def predict(self, atoms, adjs):\n with chainer.no_backprop_mode(), chainer.using_config('train', False):\n x = self.__call__(atoms, adjs)\n return F.sigmoid(x)\n",
"<import token>\n<code token>\n<import token>\n<class token>\n<function token>\n\n\nclass GraphConvPredictor(chainer.Chain):\n <docstring token>\n\n def __init__(self, graph_conv, mlp):\n \"\"\"Constructor\n\n Args:\n graph_conv: graph convolution network to obtain molecule feature\n representation\n mlp: multi layer perceptron, used as final connected layer\n \"\"\"\n super(GraphConvPredictor, self).__init__()\n with self.init_scope():\n self.graph_conv = graph_conv\n self.mlp = mlp\n <function token>\n <function token>\n",
"<import token>\n<code token>\n<import token>\n<class token>\n<function token>\n\n\nclass GraphConvPredictor(chainer.Chain):\n <docstring token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<code token>\n<import token>\n<class token>\n<function token>\n<class token>\n"
] | false |
98,866 |
2478dca0e46ad00c9bfd8fb184149e3957b56c46
|
import math # імпортуємо модуль математики для подальшого використання
x = float(input("Write x ")) #дозволяємо ввести змінні з клавіатури
y = float(input("Write y "))
R = ((math.e) ** (2 * x) + (math.sin(y))) / (math.log1p(3.8 * x + y)) #записуэмо математичний вираз(не забуваючи про модуль)
print("R=",R) #виводимо результат
|
[
"import math # імпортуємо модуль математики для подальшого використання\r\n\r\nx = float(input(\"Write x \")) #дозволяємо ввести змінні з клавіатури\r\ny = float(input(\"Write y \"))\r\nR = ((math.e) ** (2 * x) + (math.sin(y))) / (math.log1p(3.8 * x + y)) #записуэмо математичний вираз(не забуваючи про модуль)\r\nprint(\"R=\",R) #виводимо результат\r\n",
"import math\nx = float(input('Write x '))\ny = float(input('Write y '))\nR = (math.e ** (2 * x) + math.sin(y)) / math.log1p(3.8 * x + y)\nprint('R=', R)\n",
"<import token>\nx = float(input('Write x '))\ny = float(input('Write y '))\nR = (math.e ** (2 * x) + math.sin(y)) / math.log1p(3.8 * x + y)\nprint('R=', R)\n",
"<import token>\n<assignment token>\nprint('R=', R)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,867 |
4a31ecc130214cfcb60e331e514812eaeccec1e6
|
import pickle
import pandas as pd
from pandas import DataFrame, Series
import pymongo
with open('./varweathertweets') as p1:
tweets = pickle.load(p1)
connection = pymongo.MongoClient("mongodb://localhost")
db = connection.tweets
mongtweets = db.tweets
mongtweets.drop()
for item in tweets:
mongtweets.insert_one(item)
|
[
"import pickle\nimport pandas as pd\nfrom pandas import DataFrame, Series\nimport pymongo\n\nwith open('./varweathertweets') as p1:\n tweets = pickle.load(p1)\n\nconnection = pymongo.MongoClient(\"mongodb://localhost\")\ndb = connection.tweets\nmongtweets = db.tweets\nmongtweets.drop()\nfor item in tweets:\n mongtweets.insert_one(item)",
"import pickle\nimport pandas as pd\nfrom pandas import DataFrame, Series\nimport pymongo\nwith open('./varweathertweets') as p1:\n tweets = pickle.load(p1)\nconnection = pymongo.MongoClient('mongodb://localhost')\ndb = connection.tweets\nmongtweets = db.tweets\nmongtweets.drop()\nfor item in tweets:\n mongtweets.insert_one(item)\n",
"<import token>\nwith open('./varweathertweets') as p1:\n tweets = pickle.load(p1)\nconnection = pymongo.MongoClient('mongodb://localhost')\ndb = connection.tweets\nmongtweets = db.tweets\nmongtweets.drop()\nfor item in tweets:\n mongtweets.insert_one(item)\n",
"<import token>\nwith open('./varweathertweets') as p1:\n tweets = pickle.load(p1)\n<assignment token>\nmongtweets.drop()\nfor item in tweets:\n mongtweets.insert_one(item)\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,868 |
9231e41c3780cbc8fde1e8e08d8094e09c28d4de
|
import flask
import uuid
import os
import socket
import logging
app = flask.Flask(__name__)
@app.route('/ids', methods=['GET'])
def get_uuids():
node_name = os.getenv('NODE_NAME', socket.gethostname())
generated_uuid = uuid.uuid1()
app.logger.info('Node: [%s] UUID: [%s]', node_name, generated_uuid)
rsp = flask.jsonify(uuid=generated_uuid, node=node_name)
rsp.status_code = 200
rsp.headers['Content-Type'] = 'application/json'
return rsp
|
[
"import flask\nimport uuid\nimport os\nimport socket\nimport logging\n\n\napp = flask.Flask(__name__)\n\n\[email protected]('/ids', methods=['GET'])\ndef get_uuids():\n node_name = os.getenv('NODE_NAME', socket.gethostname())\n generated_uuid = uuid.uuid1()\n app.logger.info('Node: [%s] UUID: [%s]', node_name, generated_uuid)\n rsp = flask.jsonify(uuid=generated_uuid, node=node_name)\n rsp.status_code = 200\n rsp.headers['Content-Type'] = 'application/json'\n return rsp\n",
"import flask\nimport uuid\nimport os\nimport socket\nimport logging\napp = flask.Flask(__name__)\n\n\[email protected]('/ids', methods=['GET'])\ndef get_uuids():\n node_name = os.getenv('NODE_NAME', socket.gethostname())\n generated_uuid = uuid.uuid1()\n app.logger.info('Node: [%s] UUID: [%s]', node_name, generated_uuid)\n rsp = flask.jsonify(uuid=generated_uuid, node=node_name)\n rsp.status_code = 200\n rsp.headers['Content-Type'] = 'application/json'\n return rsp\n",
"<import token>\napp = flask.Flask(__name__)\n\n\[email protected]('/ids', methods=['GET'])\ndef get_uuids():\n node_name = os.getenv('NODE_NAME', socket.gethostname())\n generated_uuid = uuid.uuid1()\n app.logger.info('Node: [%s] UUID: [%s]', node_name, generated_uuid)\n rsp = flask.jsonify(uuid=generated_uuid, node=node_name)\n rsp.status_code = 200\n rsp.headers['Content-Type'] = 'application/json'\n return rsp\n",
"<import token>\n<assignment token>\n\n\[email protected]('/ids', methods=['GET'])\ndef get_uuids():\n node_name = os.getenv('NODE_NAME', socket.gethostname())\n generated_uuid = uuid.uuid1()\n app.logger.info('Node: [%s] UUID: [%s]', node_name, generated_uuid)\n rsp = flask.jsonify(uuid=generated_uuid, node=node_name)\n rsp.status_code = 200\n rsp.headers['Content-Type'] = 'application/json'\n return rsp\n",
"<import token>\n<assignment token>\n<function token>\n"
] | false |
98,869 |
a7fd8f701ddecfa8c2355e759b29a5166a222f12
|
__author__ = "gongwei"
import time
tt = time.time()
m = tt/3600/24/365
print(m)
print(1970+int(m))
print(time.localtime()[4])
print(time.timezone/3600)
print(time.daylight)
print(time.clock())
print(time.localtime(29999433234))
|
[
"__author__ = \"gongwei\"\n\n\nimport time\n\ntt = time.time()\n\nm = tt/3600/24/365\n\nprint(m)\n\nprint(1970+int(m))\n\n\nprint(time.localtime()[4])\n\nprint(time.timezone/3600)\nprint(time.daylight)\nprint(time.clock())\nprint(time.localtime(29999433234))",
"__author__ = 'gongwei'\nimport time\ntt = time.time()\nm = tt / 3600 / 24 / 365\nprint(m)\nprint(1970 + int(m))\nprint(time.localtime()[4])\nprint(time.timezone / 3600)\nprint(time.daylight)\nprint(time.clock())\nprint(time.localtime(29999433234))\n",
"__author__ = 'gongwei'\n<import token>\ntt = time.time()\nm = tt / 3600 / 24 / 365\nprint(m)\nprint(1970 + int(m))\nprint(time.localtime()[4])\nprint(time.timezone / 3600)\nprint(time.daylight)\nprint(time.clock())\nprint(time.localtime(29999433234))\n",
"<assignment token>\n<import token>\n<assignment token>\nprint(m)\nprint(1970 + int(m))\nprint(time.localtime()[4])\nprint(time.timezone / 3600)\nprint(time.daylight)\nprint(time.clock())\nprint(time.localtime(29999433234))\n",
"<assignment token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
98,870 |
fc15fa5e8b0a4ee792fd95388fc7fbf54f1d87da
|
import cv2
import sys
sys.path.append("game/")
import HighSpeedRacingGame as game
from BrainDQN_Nature import BrainDQN
import numpy as np
import matplotlib.pyplot as plt
import time
imgDim = [80*1,80*1]
# preprocess raw image to 80*80 gray image
def preprocess(observation):
observation = cv2.cvtColor(cv2.resize(observation, (imgDim[0], imgDim[1])), cv2.COLOR_BGR2GRAY)
ret, observation = cv2.threshold(observation,1,255,cv2.THRESH_BINARY)
return np.reshape(observation,(imgDim[0],imgDim[1],1))
def HighSpeedRacing():
# Step 1: init BrainDQN
actions = 5
brain = BrainDQN(actions, imgDim)
# Step 2: init Flappy Bird Game
flappyBird = game.GameState()
# Step 3: play game
# Step 3.1: obtain init state
action0 = np.array([0,1,0,0,0]) # do nothing
observation0, reward0, terminal = flappyBird.frame_step(action0)
print(observation0)
# print('observation0 1:',observation0)
# observation0 = cv2.cvtColor(cv2.resize(observation0, (imgDim[0],imgDim[1])), cv2.COLOR_BGR2GRAY)
# ret, observation0 = cv2.threshold(observation0,1,255,cv2.THRESH_BINARY)
brain.setInitState(observation0,action0) #将observation0复制4份放进BrainDQN的属性self.currentState中
# isUseExpertData = False
## isUseExpertData = True
# if(isUseExpertData == True):
# filename = "./expertData/observation"
# actInd = 0
# observation0 = np.load(filename + str(actInd) + ".npy")
# plt.imshow(observation0)
# # # Step 3.2: run the game
# # while 1!= 0:
# for _ in range(1):
# actInd = 0
# for actInd in range(1,2073):
# actInd += 1
# action = np.load(filename + "action" + str(actInd) + ".npy")
# reward = np.load(filename + "reward" + str(actInd) + ".npy")
# terminal = np.load(filename + "terminal" + str(actInd) + ".npy")
# nextObservation = np.load(filename + str(actInd) + ".npy")
# plt.imshow(nextObservation)
# nextObservation = preprocess(nextObservation)
# brain.setPerception(nextObservation,action,reward,terminal)
loss=[]
plt.figure()
ind = 0
# Step 3.2: run the game
while 1!= 0:
# time.sleep(0.1)
action= brain.getAction()
loss.append(brain.loss_temp)
ind += 1
if ind%500==499:
plt.plot(loss)
plt.show()
nextObservation,reward,terminal = flappyBird.frame_step(action)
# nextObservation = preprocess(nextObservation)
brain.setPerception(nextObservation,action,reward,terminal)
def main():
HighSpeedRacing()
if __name__ == '__main__':
main()
|
[
"import cv2\r\nimport sys\r\nsys.path.append(\"game/\")\r\nimport HighSpeedRacingGame as game\r\nfrom BrainDQN_Nature import BrainDQN\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimgDim = [80*1,80*1]\r\n# preprocess raw image to 80*80 gray image\r\ndef preprocess(observation):\r\n observation = cv2.cvtColor(cv2.resize(observation, (imgDim[0], imgDim[1])), cv2.COLOR_BGR2GRAY)\r\n ret, observation = cv2.threshold(observation,1,255,cv2.THRESH_BINARY)\r\n return np.reshape(observation,(imgDim[0],imgDim[1],1))\r\n\r\ndef HighSpeedRacing():\r\n # Step 1: init BrainDQN\r\n actions = 5\r\n brain = BrainDQN(actions, imgDim)\r\n # Step 2: init Flappy Bird Game\r\n flappyBird = game.GameState()\r\n # Step 3: play game\r\n # Step 3.1: obtain init state\r\n action0 = np.array([0,1,0,0,0]) # do nothing\r\n observation0, reward0, terminal = flappyBird.frame_step(action0)\r\n print(observation0)\r\n# print('observation0 1:',observation0)\r\n# observation0 = cv2.cvtColor(cv2.resize(observation0, (imgDim[0],imgDim[1])), cv2.COLOR_BGR2GRAY)\r\n# ret, observation0 = cv2.threshold(observation0,1,255,cv2.THRESH_BINARY)\r\n brain.setInitState(observation0,action0) #将observation0复制4份放进BrainDQN的属性self.currentState中\r\n\r\n# isUseExpertData = False\r\n## isUseExpertData = True\r\n# if(isUseExpertData == True):\r\n# filename = \"./expertData/observation\"\r\n# actInd = 0\r\n# observation0 = np.load(filename + str(actInd) + \".npy\")\r\n# plt.imshow(observation0)\r\n# # # Step 3.2: run the game\r\n# # while 1!= 0:\r\n# for _ in range(1):\r\n# actInd = 0\r\n# for actInd in range(1,2073):\r\n# actInd += 1\r\n# action = np.load(filename + \"action\" + str(actInd) + \".npy\")\r\n# reward = np.load(filename + \"reward\" + str(actInd) + \".npy\")\r\n# terminal = np.load(filename + \"terminal\" + str(actInd) + \".npy\")\r\n# nextObservation = np.load(filename + str(actInd) + \".npy\")\r\n# plt.imshow(nextObservation)\r\n# nextObservation = preprocess(nextObservation)\r\n# brain.setPerception(nextObservation,action,reward,terminal)\r\n loss=[]\r\n plt.figure()\r\n ind = 0\r\n # Step 3.2: run the game\r\n while 1!= 0:\r\n# time.sleep(0.1)\r\n action= brain.getAction()\r\n loss.append(brain.loss_temp)\r\n ind += 1\r\n if ind%500==499:\r\n plt.plot(loss)\r\n plt.show()\r\n nextObservation,reward,terminal = flappyBird.frame_step(action)\r\n# nextObservation = preprocess(nextObservation)\r\n brain.setPerception(nextObservation,action,reward,terminal)\r\n\r\ndef main():\r\n HighSpeedRacing()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n",
"import cv2\nimport sys\nsys.path.append('game/')\nimport HighSpeedRacingGame as game\nfrom BrainDQN_Nature import BrainDQN\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimgDim = [80 * 1, 80 * 1]\n\n\ndef preprocess(observation):\n observation = cv2.cvtColor(cv2.resize(observation, (imgDim[0], imgDim[1\n ])), cv2.COLOR_BGR2GRAY)\n ret, observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)\n return np.reshape(observation, (imgDim[0], imgDim[1], 1))\n\n\ndef HighSpeedRacing():\n actions = 5\n brain = BrainDQN(actions, imgDim)\n flappyBird = game.GameState()\n action0 = np.array([0, 1, 0, 0, 0])\n observation0, reward0, terminal = flappyBird.frame_step(action0)\n print(observation0)\n brain.setInitState(observation0, action0)\n loss = []\n plt.figure()\n ind = 0\n while 1 != 0:\n action = brain.getAction()\n loss.append(brain.loss_temp)\n ind += 1\n if ind % 500 == 499:\n plt.plot(loss)\n plt.show()\n nextObservation, reward, terminal = flappyBird.frame_step(action)\n brain.setPerception(nextObservation, action, reward, terminal)\n\n\ndef main():\n HighSpeedRacing()\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\nsys.path.append('game/')\n<import token>\nimgDim = [80 * 1, 80 * 1]\n\n\ndef preprocess(observation):\n observation = cv2.cvtColor(cv2.resize(observation, (imgDim[0], imgDim[1\n ])), cv2.COLOR_BGR2GRAY)\n ret, observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)\n return np.reshape(observation, (imgDim[0], imgDim[1], 1))\n\n\ndef HighSpeedRacing():\n actions = 5\n brain = BrainDQN(actions, imgDim)\n flappyBird = game.GameState()\n action0 = np.array([0, 1, 0, 0, 0])\n observation0, reward0, terminal = flappyBird.frame_step(action0)\n print(observation0)\n brain.setInitState(observation0, action0)\n loss = []\n plt.figure()\n ind = 0\n while 1 != 0:\n action = brain.getAction()\n loss.append(brain.loss_temp)\n ind += 1\n if ind % 500 == 499:\n plt.plot(loss)\n plt.show()\n nextObservation, reward, terminal = flappyBird.frame_step(action)\n brain.setPerception(nextObservation, action, reward, terminal)\n\n\ndef main():\n HighSpeedRacing()\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\nsys.path.append('game/')\n<import token>\n<assignment token>\n\n\ndef preprocess(observation):\n observation = cv2.cvtColor(cv2.resize(observation, (imgDim[0], imgDim[1\n ])), cv2.COLOR_BGR2GRAY)\n ret, observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)\n return np.reshape(observation, (imgDim[0], imgDim[1], 1))\n\n\ndef HighSpeedRacing():\n actions = 5\n brain = BrainDQN(actions, imgDim)\n flappyBird = game.GameState()\n action0 = np.array([0, 1, 0, 0, 0])\n observation0, reward0, terminal = flappyBird.frame_step(action0)\n print(observation0)\n brain.setInitState(observation0, action0)\n loss = []\n plt.figure()\n ind = 0\n while 1 != 0:\n action = brain.getAction()\n loss.append(brain.loss_temp)\n ind += 1\n if ind % 500 == 499:\n plt.plot(loss)\n plt.show()\n nextObservation, reward, terminal = flappyBird.frame_step(action)\n brain.setPerception(nextObservation, action, reward, terminal)\n\n\ndef main():\n HighSpeedRacing()\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef preprocess(observation):\n observation = cv2.cvtColor(cv2.resize(observation, (imgDim[0], imgDim[1\n ])), cv2.COLOR_BGR2GRAY)\n ret, observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)\n return np.reshape(observation, (imgDim[0], imgDim[1], 1))\n\n\ndef HighSpeedRacing():\n actions = 5\n brain = BrainDQN(actions, imgDim)\n flappyBird = game.GameState()\n action0 = np.array([0, 1, 0, 0, 0])\n observation0, reward0, terminal = flappyBird.frame_step(action0)\n print(observation0)\n brain.setInitState(observation0, action0)\n loss = []\n plt.figure()\n ind = 0\n while 1 != 0:\n action = brain.getAction()\n loss.append(brain.loss_temp)\n ind += 1\n if ind % 500 == 499:\n plt.plot(loss)\n plt.show()\n nextObservation, reward, terminal = flappyBird.frame_step(action)\n brain.setPerception(nextObservation, action, reward, terminal)\n\n\ndef main():\n HighSpeedRacing()\n\n\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef preprocess(observation):\n observation = cv2.cvtColor(cv2.resize(observation, (imgDim[0], imgDim[1\n ])), cv2.COLOR_BGR2GRAY)\n ret, observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)\n return np.reshape(observation, (imgDim[0], imgDim[1], 1))\n\n\ndef HighSpeedRacing():\n actions = 5\n brain = BrainDQN(actions, imgDim)\n flappyBird = game.GameState()\n action0 = np.array([0, 1, 0, 0, 0])\n observation0, reward0, terminal = flappyBird.frame_step(action0)\n print(observation0)\n brain.setInitState(observation0, action0)\n loss = []\n plt.figure()\n ind = 0\n while 1 != 0:\n action = brain.getAction()\n loss.append(brain.loss_temp)\n ind += 1\n if ind % 500 == 499:\n plt.plot(loss)\n plt.show()\n nextObservation, reward, terminal = flappyBird.frame_step(action)\n brain.setPerception(nextObservation, action, reward, terminal)\n\n\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef HighSpeedRacing():\n actions = 5\n brain = BrainDQN(actions, imgDim)\n flappyBird = game.GameState()\n action0 = np.array([0, 1, 0, 0, 0])\n observation0, reward0, terminal = flappyBird.frame_step(action0)\n print(observation0)\n brain.setInitState(observation0, action0)\n loss = []\n plt.figure()\n ind = 0\n while 1 != 0:\n action = brain.getAction()\n loss.append(brain.loss_temp)\n ind += 1\n if ind % 500 == 499:\n plt.plot(loss)\n plt.show()\n nextObservation, reward, terminal = flappyBird.frame_step(action)\n brain.setPerception(nextObservation, action, reward, terminal)\n\n\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
98,871 |
bee688771fbc171686012c49720ddb62bbb65711
|
import sys
sys.stdin = open('14501_퇴사.txt')
def check_schedule():
global max_cost
day_list = [0] * N
cost = 0
for i in range(N):
if A[i] == 1:
start_day = schedule[i][2]
for j in range(schedule[i][0]):
if start_day + j < N and day_list[start_day + j] == 0:
day_list[start_day + j] += 1
else:
return
cost += schedule[i][1]
if max_cost < cost:
max_cost = cost
def PowerSet(N, m):
if N == m:
check_schedule()
else:
A[m] = 1
PowerSet(N, m + 1)
A[m] = 0
PowerSet(N, m + 1)
N = int(input())
schedule = [list(map(int, input().split())) for _ in range(N)]
day = 0
for i in range(len(schedule)):
schedule[i].append(day)
day += 1
A = [0] * N
max_cost = 0
PowerSet(N, 0)
print(max_cost)
|
[
"import sys\nsys.stdin = open('14501_퇴사.txt')\n\ndef check_schedule():\n global max_cost\n day_list = [0] * N\n cost = 0\n for i in range(N):\n if A[i] == 1:\n start_day = schedule[i][2]\n for j in range(schedule[i][0]):\n if start_day + j < N and day_list[start_day + j] == 0:\n day_list[start_day + j] += 1\n else:\n return\n cost += schedule[i][1]\n if max_cost < cost:\n max_cost = cost\n\n\n\ndef PowerSet(N, m):\n if N == m:\n check_schedule()\n\n else:\n A[m] = 1\n PowerSet(N, m + 1)\n A[m] = 0\n PowerSet(N, m + 1)\n\nN = int(input())\n\nschedule = [list(map(int, input().split())) for _ in range(N)]\n\nday = 0\nfor i in range(len(schedule)):\n schedule[i].append(day)\n day += 1\n\nA = [0] * N\nmax_cost = 0\nPowerSet(N, 0)\nprint(max_cost)",
"import sys\nsys.stdin = open('14501_퇴사.txt')\n\n\ndef check_schedule():\n global max_cost\n day_list = [0] * N\n cost = 0\n for i in range(N):\n if A[i] == 1:\n start_day = schedule[i][2]\n for j in range(schedule[i][0]):\n if start_day + j < N and day_list[start_day + j] == 0:\n day_list[start_day + j] += 1\n else:\n return\n cost += schedule[i][1]\n if max_cost < cost:\n max_cost = cost\n\n\ndef PowerSet(N, m):\n if N == m:\n check_schedule()\n else:\n A[m] = 1\n PowerSet(N, m + 1)\n A[m] = 0\n PowerSet(N, m + 1)\n\n\nN = int(input())\nschedule = [list(map(int, input().split())) for _ in range(N)]\nday = 0\nfor i in range(len(schedule)):\n schedule[i].append(day)\n day += 1\nA = [0] * N\nmax_cost = 0\nPowerSet(N, 0)\nprint(max_cost)\n",
"<import token>\nsys.stdin = open('14501_퇴사.txt')\n\n\ndef check_schedule():\n global max_cost\n day_list = [0] * N\n cost = 0\n for i in range(N):\n if A[i] == 1:\n start_day = schedule[i][2]\n for j in range(schedule[i][0]):\n if start_day + j < N and day_list[start_day + j] == 0:\n day_list[start_day + j] += 1\n else:\n return\n cost += schedule[i][1]\n if max_cost < cost:\n max_cost = cost\n\n\ndef PowerSet(N, m):\n if N == m:\n check_schedule()\n else:\n A[m] = 1\n PowerSet(N, m + 1)\n A[m] = 0\n PowerSet(N, m + 1)\n\n\nN = int(input())\nschedule = [list(map(int, input().split())) for _ in range(N)]\nday = 0\nfor i in range(len(schedule)):\n schedule[i].append(day)\n day += 1\nA = [0] * N\nmax_cost = 0\nPowerSet(N, 0)\nprint(max_cost)\n",
"<import token>\n<assignment token>\n\n\ndef check_schedule():\n global max_cost\n day_list = [0] * N\n cost = 0\n for i in range(N):\n if A[i] == 1:\n start_day = schedule[i][2]\n for j in range(schedule[i][0]):\n if start_day + j < N and day_list[start_day + j] == 0:\n day_list[start_day + j] += 1\n else:\n return\n cost += schedule[i][1]\n if max_cost < cost:\n max_cost = cost\n\n\ndef PowerSet(N, m):\n if N == m:\n check_schedule()\n else:\n A[m] = 1\n PowerSet(N, m + 1)\n A[m] = 0\n PowerSet(N, m + 1)\n\n\n<assignment token>\nfor i in range(len(schedule)):\n schedule[i].append(day)\n day += 1\n<assignment token>\nPowerSet(N, 0)\nprint(max_cost)\n",
"<import token>\n<assignment token>\n\n\ndef check_schedule():\n global max_cost\n day_list = [0] * N\n cost = 0\n for i in range(N):\n if A[i] == 1:\n start_day = schedule[i][2]\n for j in range(schedule[i][0]):\n if start_day + j < N and day_list[start_day + j] == 0:\n day_list[start_day + j] += 1\n else:\n return\n cost += schedule[i][1]\n if max_cost < cost:\n max_cost = cost\n\n\ndef PowerSet(N, m):\n if N == m:\n check_schedule()\n else:\n A[m] = 1\n PowerSet(N, m + 1)\n A[m] = 0\n PowerSet(N, m + 1)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef PowerSet(N, m):\n if N == m:\n check_schedule()\n else:\n A[m] = 1\n PowerSet(N, m + 1)\n A[m] = 0\n PowerSet(N, m + 1)\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,872 |
fe2547274aa427d399751001da40702ffd90bc68
|
from exports.bindings import Exports
from imports.bindings import add_imports_to_linker, Imports
from typing import Callable
import imports.bindings as i
import sys
import wasmtime
class MyImports(Imports):
def roundtrip_u8(self, x: int) -> int:
raise Exception('unreachable')
def roundtrip_s8(self, x: int) -> int:
raise Exception('unreachable')
def roundtrip_u16(self, x: int) -> int:
raise Exception('unreachable')
def roundtrip_s16(self, x: int) -> int:
raise Exception('unreachable')
def roundtrip_bool(self, x: bool) -> bool:
raise Exception('unreachable')
def roundtrip_char(self, x: str) -> str:
raise Exception('unreachable')
def roundtrip_enum(self, x: i.E) -> i.E:
raise Exception('unreachable')
def get_internal(self, x: i.HostState) -> int:
raise Exception('unreachable')
def run(wasm_file: str) -> None:
store = wasmtime.Store()
module = wasmtime.Module.from_file(store.engine, wasm_file)
linker = wasmtime.Linker(store.engine)
linker.define_wasi()
wasi = wasmtime.WasiConfig()
wasi.inherit_stdout()
wasi.inherit_stderr()
store.set_wasi(wasi)
imports = MyImports()
add_imports_to_linker(linker, store, imports)
wasm = Exports(store, linker, module)
def assert_throws(f: Callable, msg: str) -> None:
try:
f()
raise RuntimeError('expected exception')
except TypeError as e:
actual = str(e)
except OverflowError as e:
actual = str(e)
except ValueError as e:
actual = str(e)
except IndexError as e:
actual = str(e)
if not msg in actual:
print(actual)
assert(msg in actual)
assert_throws(lambda: wasm.invalid_bool(store), 'invalid variant discriminant for bool')
assert_throws(lambda: wasm.invalid_u8(store), 'must be between')
assert_throws(lambda: wasm.invalid_s8(store), 'must be between')
assert_throws(lambda: wasm.invalid_u16(store), 'must be between')
assert_throws(lambda: wasm.invalid_s16(store), 'must be between')
assert_throws(lambda: wasm.invalid_char(store), 'not a valid char')
assert_throws(lambda: wasm.invalid_enum(store), 'not a valid E')
assert_throws(lambda: wasm.invalid_handle(store), 'handle index not valid')
assert_throws(lambda: wasm.invalid_handle_close(store), 'handle index not valid')
if __name__ == '__main__':
run(sys.argv[1])
|
[
"from exports.bindings import Exports\nfrom imports.bindings import add_imports_to_linker, Imports\nfrom typing import Callable\nimport imports.bindings as i\nimport sys\nimport wasmtime\n\nclass MyImports(Imports):\n def roundtrip_u8(self, x: int) -> int:\n raise Exception('unreachable')\n\n def roundtrip_s8(self, x: int) -> int:\n raise Exception('unreachable')\n\n def roundtrip_u16(self, x: int) -> int:\n raise Exception('unreachable')\n\n def roundtrip_s16(self, x: int) -> int:\n raise Exception('unreachable')\n\n def roundtrip_bool(self, x: bool) -> bool:\n raise Exception('unreachable')\n\n def roundtrip_char(self, x: str) -> str:\n raise Exception('unreachable')\n\n def roundtrip_enum(self, x: i.E) -> i.E:\n raise Exception('unreachable')\n\n def get_internal(self, x: i.HostState) -> int:\n raise Exception('unreachable')\n\ndef run(wasm_file: str) -> None:\n store = wasmtime.Store()\n module = wasmtime.Module.from_file(store.engine, wasm_file)\n linker = wasmtime.Linker(store.engine)\n linker.define_wasi()\n wasi = wasmtime.WasiConfig()\n wasi.inherit_stdout()\n wasi.inherit_stderr()\n store.set_wasi(wasi)\n\n imports = MyImports()\n add_imports_to_linker(linker, store, imports)\n wasm = Exports(store, linker, module)\n\n def assert_throws(f: Callable, msg: str) -> None:\n try:\n f()\n raise RuntimeError('expected exception')\n except TypeError as e:\n actual = str(e)\n except OverflowError as e:\n actual = str(e)\n except ValueError as e:\n actual = str(e)\n except IndexError as e:\n actual = str(e)\n if not msg in actual:\n print(actual)\n assert(msg in actual)\n\n assert_throws(lambda: wasm.invalid_bool(store), 'invalid variant discriminant for bool')\n assert_throws(lambda: wasm.invalid_u8(store), 'must be between')\n assert_throws(lambda: wasm.invalid_s8(store), 'must be between')\n assert_throws(lambda: wasm.invalid_u16(store), 'must be between')\n assert_throws(lambda: wasm.invalid_s16(store), 'must be between')\n assert_throws(lambda: wasm.invalid_char(store), 'not a valid char')\n assert_throws(lambda: wasm.invalid_enum(store), 'not a valid E')\n assert_throws(lambda: wasm.invalid_handle(store), 'handle index not valid')\n assert_throws(lambda: wasm.invalid_handle_close(store), 'handle index not valid')\n\nif __name__ == '__main__':\n run(sys.argv[1])\n",
"from exports.bindings import Exports\nfrom imports.bindings import add_imports_to_linker, Imports\nfrom typing import Callable\nimport imports.bindings as i\nimport sys\nimport wasmtime\n\n\nclass MyImports(Imports):\n\n def roundtrip_u8(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_s8(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_u16(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_s16(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_bool(self, x: bool) ->bool:\n raise Exception('unreachable')\n\n def roundtrip_char(self, x: str) ->str:\n raise Exception('unreachable')\n\n def roundtrip_enum(self, x: i.E) ->i.E:\n raise Exception('unreachable')\n\n def get_internal(self, x: i.HostState) ->int:\n raise Exception('unreachable')\n\n\ndef run(wasm_file: str) ->None:\n store = wasmtime.Store()\n module = wasmtime.Module.from_file(store.engine, wasm_file)\n linker = wasmtime.Linker(store.engine)\n linker.define_wasi()\n wasi = wasmtime.WasiConfig()\n wasi.inherit_stdout()\n wasi.inherit_stderr()\n store.set_wasi(wasi)\n imports = MyImports()\n add_imports_to_linker(linker, store, imports)\n wasm = Exports(store, linker, module)\n\n def assert_throws(f: Callable, msg: str) ->None:\n try:\n f()\n raise RuntimeError('expected exception')\n except TypeError as e:\n actual = str(e)\n except OverflowError as e:\n actual = str(e)\n except ValueError as e:\n actual = str(e)\n except IndexError as e:\n actual = str(e)\n if not msg in actual:\n print(actual)\n assert msg in actual\n assert_throws(lambda : wasm.invalid_bool(store),\n 'invalid variant discriminant for bool')\n assert_throws(lambda : wasm.invalid_u8(store), 'must be between')\n assert_throws(lambda : wasm.invalid_s8(store), 'must be between')\n assert_throws(lambda : wasm.invalid_u16(store), 'must be between')\n assert_throws(lambda : wasm.invalid_s16(store), 'must be between')\n assert_throws(lambda : wasm.invalid_char(store), 'not a valid char')\n assert_throws(lambda : wasm.invalid_enum(store), 'not a valid E')\n assert_throws(lambda : wasm.invalid_handle(store), 'handle index not valid'\n )\n assert_throws(lambda : wasm.invalid_handle_close(store),\n 'handle index not valid')\n\n\nif __name__ == '__main__':\n run(sys.argv[1])\n",
"<import token>\n\n\nclass MyImports(Imports):\n\n def roundtrip_u8(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_s8(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_u16(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_s16(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_bool(self, x: bool) ->bool:\n raise Exception('unreachable')\n\n def roundtrip_char(self, x: str) ->str:\n raise Exception('unreachable')\n\n def roundtrip_enum(self, x: i.E) ->i.E:\n raise Exception('unreachable')\n\n def get_internal(self, x: i.HostState) ->int:\n raise Exception('unreachable')\n\n\ndef run(wasm_file: str) ->None:\n store = wasmtime.Store()\n module = wasmtime.Module.from_file(store.engine, wasm_file)\n linker = wasmtime.Linker(store.engine)\n linker.define_wasi()\n wasi = wasmtime.WasiConfig()\n wasi.inherit_stdout()\n wasi.inherit_stderr()\n store.set_wasi(wasi)\n imports = MyImports()\n add_imports_to_linker(linker, store, imports)\n wasm = Exports(store, linker, module)\n\n def assert_throws(f: Callable, msg: str) ->None:\n try:\n f()\n raise RuntimeError('expected exception')\n except TypeError as e:\n actual = str(e)\n except OverflowError as e:\n actual = str(e)\n except ValueError as e:\n actual = str(e)\n except IndexError as e:\n actual = str(e)\n if not msg in actual:\n print(actual)\n assert msg in actual\n assert_throws(lambda : wasm.invalid_bool(store),\n 'invalid variant discriminant for bool')\n assert_throws(lambda : wasm.invalid_u8(store), 'must be between')\n assert_throws(lambda : wasm.invalid_s8(store), 'must be between')\n assert_throws(lambda : wasm.invalid_u16(store), 'must be between')\n assert_throws(lambda : wasm.invalid_s16(store), 'must be between')\n assert_throws(lambda : wasm.invalid_char(store), 'not a valid char')\n assert_throws(lambda : wasm.invalid_enum(store), 'not a valid E')\n assert_throws(lambda : wasm.invalid_handle(store), 'handle index not valid'\n )\n assert_throws(lambda : wasm.invalid_handle_close(store),\n 'handle index not valid')\n\n\nif __name__ == '__main__':\n run(sys.argv[1])\n",
"<import token>\n\n\nclass MyImports(Imports):\n\n def roundtrip_u8(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_s8(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_u16(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_s16(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_bool(self, x: bool) ->bool:\n raise Exception('unreachable')\n\n def roundtrip_char(self, x: str) ->str:\n raise Exception('unreachable')\n\n def roundtrip_enum(self, x: i.E) ->i.E:\n raise Exception('unreachable')\n\n def get_internal(self, x: i.HostState) ->int:\n raise Exception('unreachable')\n\n\ndef run(wasm_file: str) ->None:\n store = wasmtime.Store()\n module = wasmtime.Module.from_file(store.engine, wasm_file)\n linker = wasmtime.Linker(store.engine)\n linker.define_wasi()\n wasi = wasmtime.WasiConfig()\n wasi.inherit_stdout()\n wasi.inherit_stderr()\n store.set_wasi(wasi)\n imports = MyImports()\n add_imports_to_linker(linker, store, imports)\n wasm = Exports(store, linker, module)\n\n def assert_throws(f: Callable, msg: str) ->None:\n try:\n f()\n raise RuntimeError('expected exception')\n except TypeError as e:\n actual = str(e)\n except OverflowError as e:\n actual = str(e)\n except ValueError as e:\n actual = str(e)\n except IndexError as e:\n actual = str(e)\n if not msg in actual:\n print(actual)\n assert msg in actual\n assert_throws(lambda : wasm.invalid_bool(store),\n 'invalid variant discriminant for bool')\n assert_throws(lambda : wasm.invalid_u8(store), 'must be between')\n assert_throws(lambda : wasm.invalid_s8(store), 'must be between')\n assert_throws(lambda : wasm.invalid_u16(store), 'must be between')\n assert_throws(lambda : wasm.invalid_s16(store), 'must be between')\n assert_throws(lambda : wasm.invalid_char(store), 'not a valid char')\n assert_throws(lambda : wasm.invalid_enum(store), 'not a valid E')\n assert_throws(lambda : wasm.invalid_handle(store), 'handle index not valid'\n )\n assert_throws(lambda : wasm.invalid_handle_close(store),\n 'handle index not valid')\n\n\n<code token>\n",
"<import token>\n\n\nclass MyImports(Imports):\n\n def roundtrip_u8(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_s8(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_u16(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_s16(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_bool(self, x: bool) ->bool:\n raise Exception('unreachable')\n\n def roundtrip_char(self, x: str) ->str:\n raise Exception('unreachable')\n\n def roundtrip_enum(self, x: i.E) ->i.E:\n raise Exception('unreachable')\n\n def get_internal(self, x: i.HostState) ->int:\n raise Exception('unreachable')\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass MyImports(Imports):\n\n def roundtrip_u8(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_s8(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_u16(self, x: int) ->int:\n raise Exception('unreachable')\n <function token>\n\n def roundtrip_bool(self, x: bool) ->bool:\n raise Exception('unreachable')\n\n def roundtrip_char(self, x: str) ->str:\n raise Exception('unreachable')\n\n def roundtrip_enum(self, x: i.E) ->i.E:\n raise Exception('unreachable')\n\n def get_internal(self, x: i.HostState) ->int:\n raise Exception('unreachable')\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass MyImports(Imports):\n\n def roundtrip_u8(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_s8(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_u16(self, x: int) ->int:\n raise Exception('unreachable')\n <function token>\n <function token>\n\n def roundtrip_char(self, x: str) ->str:\n raise Exception('unreachable')\n\n def roundtrip_enum(self, x: i.E) ->i.E:\n raise Exception('unreachable')\n\n def get_internal(self, x: i.HostState) ->int:\n raise Exception('unreachable')\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass MyImports(Imports):\n <function token>\n\n def roundtrip_s8(self, x: int) ->int:\n raise Exception('unreachable')\n\n def roundtrip_u16(self, x: int) ->int:\n raise Exception('unreachable')\n <function token>\n <function token>\n\n def roundtrip_char(self, x: str) ->str:\n raise Exception('unreachable')\n\n def roundtrip_enum(self, x: i.E) ->i.E:\n raise Exception('unreachable')\n\n def get_internal(self, x: i.HostState) ->int:\n raise Exception('unreachable')\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass MyImports(Imports):\n <function token>\n <function token>\n\n def roundtrip_u16(self, x: int) ->int:\n raise Exception('unreachable')\n <function token>\n <function token>\n\n def roundtrip_char(self, x: str) ->str:\n raise Exception('unreachable')\n\n def roundtrip_enum(self, x: i.E) ->i.E:\n raise Exception('unreachable')\n\n def get_internal(self, x: i.HostState) ->int:\n raise Exception('unreachable')\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass MyImports(Imports):\n <function token>\n <function token>\n\n def roundtrip_u16(self, x: int) ->int:\n raise Exception('unreachable')\n <function token>\n <function token>\n\n def roundtrip_char(self, x: str) ->str:\n raise Exception('unreachable')\n\n def roundtrip_enum(self, x: i.E) ->i.E:\n raise Exception('unreachable')\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass MyImports(Imports):\n <function token>\n <function token>\n\n def roundtrip_u16(self, x: int) ->int:\n raise Exception('unreachable')\n <function token>\n <function token>\n\n def roundtrip_char(self, x: str) ->str:\n raise Exception('unreachable')\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass MyImports(Imports):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def roundtrip_char(self, x: str) ->str:\n raise Exception('unreachable')\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass MyImports(Imports):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<class token>\n<function token>\n<code token>\n"
] | false |
98,873 |
fa11e463bdde30550c5a9da6189fa0efba459811
|
# Copyright 2020 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for wfa_cardinality_estimation_evaluation_framework.evaluations.tests.report_generator."""
import os
import re
from absl.testing import absltest
import numpy as np
import pandas as pd
from wfa_cardinality_estimation_evaluation_framework.estimators import exact_set
from wfa_cardinality_estimation_evaluation_framework.evaluations import analyzer
from wfa_cardinality_estimation_evaluation_framework.evaluations import configs
from wfa_cardinality_estimation_evaluation_framework.evaluations import evaluator
from wfa_cardinality_estimation_evaluation_framework.evaluations import report_generator
from wfa_cardinality_estimation_evaluation_framework.evaluations.data import evaluation_configs
from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator
from wfa_cardinality_estimation_evaluation_framework.simulations import simulator
class ReportGeneratorTest(absltest.TestCase):
def setUp(self):
super(ReportGeneratorTest, self).setUp()
exact_set_lossless = simulator.SketchEstimatorConfig(
name='exact_set-infty-infty-lossless',
sketch_factory=exact_set.ExactSet.get_sketch_factory(),
estimator=exact_set.LosslessEstimator(),
sketch_noiser=None,
estimate_noiser=None)
exact_set_less_one = simulator.SketchEstimatorConfig(
name='exact_set-infty-infty-less_one',
sketch_factory=exact_set.ExactSet.get_sketch_factory(),
estimator=exact_set.LessOneEstimator(),
sketch_noiser=exact_set.AddRandomElementsNoiser(
num_random_elements=0, random_state=np.random.RandomState()),
estimate_noiser=None)
self.sketch_estimator_config_list = (exact_set_lossless, exact_set_less_one)
self.evaluation_config = configs.EvaluationConfig(
name='test_evaluation',
num_runs=2,
scenario_config_list=[
configs.ScenarioConfig(
name='ind1',
set_generator_factory=(
set_generator.IndependentSetGenerator
.get_generator_factory_with_num_and_size(
universe_size=10, num_sets=5, set_size=1))),
configs.ScenarioConfig(
name='ind2',
set_generator_factory=(
set_generator.IndependentSetGenerator
.get_generator_factory_with_num_and_size(
universe_size=10, num_sets=5, set_size=1))),
])
self.evaluation_run_name = 'test_run'
def _run_evaluation_and_simulation(out_dir):
self.evaluator = evaluator.Evaluator(
evaluation_config=self.evaluation_config,
sketch_estimator_config_list=self.sketch_estimator_config_list,
run_name=self.evaluation_run_name,
out_dir=out_dir)
self.evaluator()
self.analyzer = analyzer.CardinalityEstimatorEvaluationAnalyzer(
out_dir=out_dir,
evaluation_directory=out_dir,
evaluation_run_name=self.evaluation_run_name,
evaluation_name=self.evaluation_config.name,
estimable_criteria_list=[(0.05, 0.95), (1.01, 0.9)])
self.analyzer()
self.run_evaluation_and_simulation = _run_evaluation_and_simulation
def test_parse_sketch_estimator_name(self):
sketch_estimator_name = 'vector_of_counts-4096-ln3-sequential'
parsed_name = report_generator.ReportGenerator.parse_sketch_estimator_name(
sketch_estimator_name)
expected = {
evaluation_configs.SKETCH: 'vector_of_counts',
evaluation_configs.SKETCH_CONFIG: '4096',
evaluation_configs.EPSILON: 'ln3',
evaluation_configs.ESTIMATOR: 'sequential'
}
self.assertEqual(parsed_name, expected)
def test_add_parsed_sketch_estimator_name_cols(self):
df = pd.DataFrame({
'sketch_estimator': ['vector_of_counts-4096-ln3-sequential',
'bloom_filter-1e6-infty-union_estimator']})
result = (
report_generator.ReportGenerator
.add_parsed_sketch_estimator_name_cols(df, 'sketch_estimator'))
expected = pd.DataFrame({
'sketch_estimator': ['vector_of_counts-4096-ln3-sequential',
'bloom_filter-1e6-infty-union_estimator'],
evaluation_configs.SKETCH: ['vector_of_counts', 'bloom_filter'],
evaluation_configs.SKETCH_CONFIG: ['4096', '1e6'],
evaluation_configs.EPSILON: ['ln3', 'infty'],
evaluation_configs.ESTIMATOR: ['sequential', 'union_estimator']
})
try:
pd.testing.assert_frame_equal(result, expected)
except AssertionError:
self.fail('Parsed sketch_estimator_name is not added correctly to df.')
def test_widen_num_estimable_sets_df(self):
out_dir = self.create_tempdir('test_widen_num_estimable_sets_df')
self.run_evaluation_and_simulation(out_dir)
analysis_results = analyzer.get_analysis_results(
analysis_out_dir=out_dir,
evaluation_run_name=self.evaluation_run_name,
evaluation_name=self.evaluation_config.name)
num_estimable_sets_stats_df = (
report_generator.ReportGenerator.widen_num_estimable_sets_df(
analysis_results[report_generator.KEY_NUM_ESTIMABLE_SETS_STATS_DF]))
# Test values are in correct format.
regex = re.compile(
r'\d+<br>relative_error: mean=(((-)?\d+\.\d+)|(nan)), '
r'std=(((-)?\d+\.\d+)|(nan))')
for s in np.ndarray.flatten(num_estimable_sets_stats_df.values):
self.assertRegex(s, regex, f'value {s} not is not in correct format.')
# Test the columns are correct.
regex = r'(\d+)\%\/(\d+)'
for col in num_estimable_sets_stats_df.columns.values:
self.assertRegex(
col[0], regex, f'column {col[0]} not is not in correct format.')
def test_generate_boxplot_html(self):
out_dir = self.create_tempdir('test_generate_boxplot_html')
self.run_evaluation_and_simulation(out_dir)
analysis_results = analyzer.get_analysis_results(
analysis_out_dir=out_dir,
evaluation_run_name=self.evaluation_run_name,
evaluation_name=self.evaluation_config.name)
# Generate boxplot html.
description_to_file_dir = analysis_results[
report_generator.KEY_DESCRIPTION_TO_FILE_DIR]
sketch_estimator_list = [i.name for i in self.sketch_estimator_config_list]
scenario_list = [
conf.name for conf in self.evaluation_config.scenario_config_list]
plot_html = report_generator.ReportGenerator.generate_boxplot_html(
description_to_file_dir=description_to_file_dir,
sketch_estimator_list=sketch_estimator_list,
scenario_list=scenario_list,
out_dir=out_dir)
# Read the table from html.
plot_html = ' '.join(plot_html.split('\n'))
regex = r'<table(.+?)</table>'
for h in re.finditer(regex, plot_html):
tab = pd.read_html(h.group(0), header=[0, 1])[0]
self.assertGreater(tab.shape[0], 0,
'The html table is empty table.')
def test_generate_and_save_html_report(self):
analysis_out_dir = self.create_tempdir('analysis_dir')
report_out_dir = self.create_tempdir('test_report_dir')
self.run_evaluation_and_simulation(analysis_out_dir)
new_report = report_generator.ReportGenerator(
out_dir=report_out_dir,
analysis_out_dir=analysis_out_dir,
evaluation_run_name=self.evaluation_run_name,
evaluation_name=self.evaluation_config.name)
report_url = new_report('new_report')
self.assertTrue(os.path.exists(report_url))
if __name__ == '__main__':
absltest.main()
|
[
"# Copyright 2020 The Private Cardinality Estimation Framework Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for wfa_cardinality_estimation_evaluation_framework.evaluations.tests.report_generator.\"\"\"\nimport os\nimport re\n\nfrom absl.testing import absltest\n\nimport numpy as np\nimport pandas as pd\n\nfrom wfa_cardinality_estimation_evaluation_framework.estimators import exact_set\nfrom wfa_cardinality_estimation_evaluation_framework.evaluations import analyzer\nfrom wfa_cardinality_estimation_evaluation_framework.evaluations import configs\nfrom wfa_cardinality_estimation_evaluation_framework.evaluations import evaluator\nfrom wfa_cardinality_estimation_evaluation_framework.evaluations import report_generator\nfrom wfa_cardinality_estimation_evaluation_framework.evaluations.data import evaluation_configs\nfrom wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\nfrom wfa_cardinality_estimation_evaluation_framework.simulations import simulator\n\n\nclass ReportGeneratorTest(absltest.TestCase):\n\n def setUp(self):\n super(ReportGeneratorTest, self).setUp()\n exact_set_lossless = simulator.SketchEstimatorConfig(\n name='exact_set-infty-infty-lossless',\n sketch_factory=exact_set.ExactSet.get_sketch_factory(),\n estimator=exact_set.LosslessEstimator(),\n sketch_noiser=None,\n estimate_noiser=None)\n exact_set_less_one = simulator.SketchEstimatorConfig(\n name='exact_set-infty-infty-less_one',\n sketch_factory=exact_set.ExactSet.get_sketch_factory(),\n estimator=exact_set.LessOneEstimator(),\n sketch_noiser=exact_set.AddRandomElementsNoiser(\n num_random_elements=0, random_state=np.random.RandomState()),\n estimate_noiser=None)\n self.sketch_estimator_config_list = (exact_set_lossless, exact_set_less_one)\n\n self.evaluation_config = configs.EvaluationConfig(\n name='test_evaluation',\n num_runs=2,\n scenario_config_list=[\n configs.ScenarioConfig(\n name='ind1',\n set_generator_factory=(\n set_generator.IndependentSetGenerator\n .get_generator_factory_with_num_and_size(\n universe_size=10, num_sets=5, set_size=1))),\n configs.ScenarioConfig(\n name='ind2',\n set_generator_factory=(\n set_generator.IndependentSetGenerator\n .get_generator_factory_with_num_and_size(\n universe_size=10, num_sets=5, set_size=1))),\n ])\n\n self.evaluation_run_name = 'test_run'\n\n def _run_evaluation_and_simulation(out_dir):\n self.evaluator = evaluator.Evaluator(\n evaluation_config=self.evaluation_config,\n sketch_estimator_config_list=self.sketch_estimator_config_list,\n run_name=self.evaluation_run_name,\n out_dir=out_dir)\n self.evaluator()\n\n self.analyzer = analyzer.CardinalityEstimatorEvaluationAnalyzer(\n out_dir=out_dir,\n evaluation_directory=out_dir,\n evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name,\n estimable_criteria_list=[(0.05, 0.95), (1.01, 0.9)])\n self.analyzer()\n\n self.run_evaluation_and_simulation = _run_evaluation_and_simulation\n\n def test_parse_sketch_estimator_name(self):\n sketch_estimator_name = 'vector_of_counts-4096-ln3-sequential'\n parsed_name = report_generator.ReportGenerator.parse_sketch_estimator_name(\n sketch_estimator_name)\n expected = {\n evaluation_configs.SKETCH: 'vector_of_counts',\n evaluation_configs.SKETCH_CONFIG: '4096',\n evaluation_configs.EPSILON: 'ln3',\n evaluation_configs.ESTIMATOR: 'sequential'\n }\n self.assertEqual(parsed_name, expected)\n\n def test_add_parsed_sketch_estimator_name_cols(self):\n df = pd.DataFrame({\n 'sketch_estimator': ['vector_of_counts-4096-ln3-sequential',\n 'bloom_filter-1e6-infty-union_estimator']})\n result = (\n report_generator.ReportGenerator\n .add_parsed_sketch_estimator_name_cols(df, 'sketch_estimator'))\n expected = pd.DataFrame({\n 'sketch_estimator': ['vector_of_counts-4096-ln3-sequential',\n 'bloom_filter-1e6-infty-union_estimator'],\n evaluation_configs.SKETCH: ['vector_of_counts', 'bloom_filter'],\n evaluation_configs.SKETCH_CONFIG: ['4096', '1e6'],\n evaluation_configs.EPSILON: ['ln3', 'infty'],\n evaluation_configs.ESTIMATOR: ['sequential', 'union_estimator']\n })\n try:\n pd.testing.assert_frame_equal(result, expected)\n except AssertionError:\n self.fail('Parsed sketch_estimator_name is not added correctly to df.')\n\n def test_widen_num_estimable_sets_df(self):\n out_dir = self.create_tempdir('test_widen_num_estimable_sets_df')\n self.run_evaluation_and_simulation(out_dir)\n analysis_results = analyzer.get_analysis_results(\n analysis_out_dir=out_dir,\n evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name)\n num_estimable_sets_stats_df = (\n report_generator.ReportGenerator.widen_num_estimable_sets_df(\n analysis_results[report_generator.KEY_NUM_ESTIMABLE_SETS_STATS_DF]))\n\n # Test values are in correct format.\n regex = re.compile(\n r'\\d+<br>relative_error: mean=(((-)?\\d+\\.\\d+)|(nan)), '\n r'std=(((-)?\\d+\\.\\d+)|(nan))')\n for s in np.ndarray.flatten(num_estimable_sets_stats_df.values):\n self.assertRegex(s, regex, f'value {s} not is not in correct format.')\n\n # Test the columns are correct.\n regex = r'(\\d+)\\%\\/(\\d+)'\n for col in num_estimable_sets_stats_df.columns.values:\n self.assertRegex(\n col[0], regex, f'column {col[0]} not is not in correct format.')\n\n def test_generate_boxplot_html(self):\n out_dir = self.create_tempdir('test_generate_boxplot_html')\n self.run_evaluation_and_simulation(out_dir)\n analysis_results = analyzer.get_analysis_results(\n analysis_out_dir=out_dir,\n evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name)\n # Generate boxplot html.\n description_to_file_dir = analysis_results[\n report_generator.KEY_DESCRIPTION_TO_FILE_DIR]\n sketch_estimator_list = [i.name for i in self.sketch_estimator_config_list]\n scenario_list = [\n conf.name for conf in self.evaluation_config.scenario_config_list]\n plot_html = report_generator.ReportGenerator.generate_boxplot_html(\n description_to_file_dir=description_to_file_dir,\n sketch_estimator_list=sketch_estimator_list,\n scenario_list=scenario_list,\n out_dir=out_dir)\n # Read the table from html.\n plot_html = ' '.join(plot_html.split('\\n'))\n regex = r'<table(.+?)</table>'\n for h in re.finditer(regex, plot_html):\n tab = pd.read_html(h.group(0), header=[0, 1])[0]\n self.assertGreater(tab.shape[0], 0,\n 'The html table is empty table.')\n\n def test_generate_and_save_html_report(self):\n analysis_out_dir = self.create_tempdir('analysis_dir')\n report_out_dir = self.create_tempdir('test_report_dir')\n self.run_evaluation_and_simulation(analysis_out_dir)\n new_report = report_generator.ReportGenerator(\n out_dir=report_out_dir,\n analysis_out_dir=analysis_out_dir,\n evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name)\n report_url = new_report('new_report')\n self.assertTrue(os.path.exists(report_url))\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"<docstring token>\nimport os\nimport re\nfrom absl.testing import absltest\nimport numpy as np\nimport pandas as pd\nfrom wfa_cardinality_estimation_evaluation_framework.estimators import exact_set\nfrom wfa_cardinality_estimation_evaluation_framework.evaluations import analyzer\nfrom wfa_cardinality_estimation_evaluation_framework.evaluations import configs\nfrom wfa_cardinality_estimation_evaluation_framework.evaluations import evaluator\nfrom wfa_cardinality_estimation_evaluation_framework.evaluations import report_generator\nfrom wfa_cardinality_estimation_evaluation_framework.evaluations.data import evaluation_configs\nfrom wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\nfrom wfa_cardinality_estimation_evaluation_framework.simulations import simulator\n\n\nclass ReportGeneratorTest(absltest.TestCase):\n\n def setUp(self):\n super(ReportGeneratorTest, self).setUp()\n exact_set_lossless = simulator.SketchEstimatorConfig(name=\n 'exact_set-infty-infty-lossless', sketch_factory=exact_set.\n ExactSet.get_sketch_factory(), estimator=exact_set.\n LosslessEstimator(), sketch_noiser=None, estimate_noiser=None)\n exact_set_less_one = simulator.SketchEstimatorConfig(name=\n 'exact_set-infty-infty-less_one', sketch_factory=exact_set.\n ExactSet.get_sketch_factory(), estimator=exact_set.\n LessOneEstimator(), sketch_noiser=exact_set.\n AddRandomElementsNoiser(num_random_elements=0, random_state=np.\n random.RandomState()), estimate_noiser=None)\n self.sketch_estimator_config_list = (exact_set_lossless,\n exact_set_less_one)\n self.evaluation_config = configs.EvaluationConfig(name=\n 'test_evaluation', num_runs=2, scenario_config_list=[configs.\n ScenarioConfig(name='ind1', set_generator_factory=set_generator\n .IndependentSetGenerator.\n get_generator_factory_with_num_and_size(universe_size=10,\n num_sets=5, set_size=1)), configs.ScenarioConfig(name='ind2',\n set_generator_factory=set_generator.IndependentSetGenerator.\n get_generator_factory_with_num_and_size(universe_size=10,\n num_sets=5, set_size=1))])\n self.evaluation_run_name = 'test_run'\n\n def _run_evaluation_and_simulation(out_dir):\n self.evaluator = evaluator.Evaluator(evaluation_config=self.\n evaluation_config, sketch_estimator_config_list=self.\n sketch_estimator_config_list, run_name=self.\n evaluation_run_name, out_dir=out_dir)\n self.evaluator()\n self.analyzer = analyzer.CardinalityEstimatorEvaluationAnalyzer(\n out_dir=out_dir, evaluation_directory=out_dir,\n evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name,\n estimable_criteria_list=[(0.05, 0.95), (1.01, 0.9)])\n self.analyzer()\n self.run_evaluation_and_simulation = _run_evaluation_and_simulation\n\n def test_parse_sketch_estimator_name(self):\n sketch_estimator_name = 'vector_of_counts-4096-ln3-sequential'\n parsed_name = (report_generator.ReportGenerator.\n parse_sketch_estimator_name(sketch_estimator_name))\n expected = {evaluation_configs.SKETCH: 'vector_of_counts',\n evaluation_configs.SKETCH_CONFIG: '4096', evaluation_configs.\n EPSILON: 'ln3', evaluation_configs.ESTIMATOR: 'sequential'}\n self.assertEqual(parsed_name, expected)\n\n def test_add_parsed_sketch_estimator_name_cols(self):\n df = pd.DataFrame({'sketch_estimator': [\n 'vector_of_counts-4096-ln3-sequential',\n 'bloom_filter-1e6-infty-union_estimator']})\n result = (report_generator.ReportGenerator.\n add_parsed_sketch_estimator_name_cols(df, 'sketch_estimator'))\n expected = pd.DataFrame({'sketch_estimator': [\n 'vector_of_counts-4096-ln3-sequential',\n 'bloom_filter-1e6-infty-union_estimator'], evaluation_configs.\n SKETCH: ['vector_of_counts', 'bloom_filter'],\n evaluation_configs.SKETCH_CONFIG: ['4096', '1e6'],\n evaluation_configs.EPSILON: ['ln3', 'infty'],\n evaluation_configs.ESTIMATOR: ['sequential', 'union_estimator']})\n try:\n pd.testing.assert_frame_equal(result, expected)\n except AssertionError:\n self.fail(\n 'Parsed sketch_estimator_name is not added correctly to df.')\n\n def test_widen_num_estimable_sets_df(self):\n out_dir = self.create_tempdir('test_widen_num_estimable_sets_df')\n self.run_evaluation_and_simulation(out_dir)\n analysis_results = analyzer.get_analysis_results(analysis_out_dir=\n out_dir, evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name)\n num_estimable_sets_stats_df = (report_generator.ReportGenerator.\n widen_num_estimable_sets_df(analysis_results[report_generator.\n KEY_NUM_ESTIMABLE_SETS_STATS_DF]))\n regex = re.compile(\n '\\\\d+<br>relative_error: mean=(((-)?\\\\d+\\\\.\\\\d+)|(nan)), std=(((-)?\\\\d+\\\\.\\\\d+)|(nan))'\n )\n for s in np.ndarray.flatten(num_estimable_sets_stats_df.values):\n self.assertRegex(s, regex,\n f'value {s} not is not in correct format.')\n regex = '(\\\\d+)\\\\%\\\\/(\\\\d+)'\n for col in num_estimable_sets_stats_df.columns.values:\n self.assertRegex(col[0], regex,\n f'column {col[0]} not is not in correct format.')\n\n def test_generate_boxplot_html(self):\n out_dir = self.create_tempdir('test_generate_boxplot_html')\n self.run_evaluation_and_simulation(out_dir)\n analysis_results = analyzer.get_analysis_results(analysis_out_dir=\n out_dir, evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name)\n description_to_file_dir = analysis_results[report_generator.\n KEY_DESCRIPTION_TO_FILE_DIR]\n sketch_estimator_list = [i.name for i in self.\n sketch_estimator_config_list]\n scenario_list = [conf.name for conf in self.evaluation_config.\n scenario_config_list]\n plot_html = report_generator.ReportGenerator.generate_boxplot_html(\n description_to_file_dir=description_to_file_dir,\n sketch_estimator_list=sketch_estimator_list, scenario_list=\n scenario_list, out_dir=out_dir)\n plot_html = ' '.join(plot_html.split('\\n'))\n regex = '<table(.+?)</table>'\n for h in re.finditer(regex, plot_html):\n tab = pd.read_html(h.group(0), header=[0, 1])[0]\n self.assertGreater(tab.shape[0], 0,\n 'The html table is empty table.')\n\n def test_generate_and_save_html_report(self):\n analysis_out_dir = self.create_tempdir('analysis_dir')\n report_out_dir = self.create_tempdir('test_report_dir')\n self.run_evaluation_and_simulation(analysis_out_dir)\n new_report = report_generator.ReportGenerator(out_dir=\n report_out_dir, analysis_out_dir=analysis_out_dir,\n evaluation_run_name=self.evaluation_run_name, evaluation_name=\n self.evaluation_config.name)\n report_url = new_report('new_report')\n self.assertTrue(os.path.exists(report_url))\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"<docstring token>\n<import token>\n\n\nclass ReportGeneratorTest(absltest.TestCase):\n\n def setUp(self):\n super(ReportGeneratorTest, self).setUp()\n exact_set_lossless = simulator.SketchEstimatorConfig(name=\n 'exact_set-infty-infty-lossless', sketch_factory=exact_set.\n ExactSet.get_sketch_factory(), estimator=exact_set.\n LosslessEstimator(), sketch_noiser=None, estimate_noiser=None)\n exact_set_less_one = simulator.SketchEstimatorConfig(name=\n 'exact_set-infty-infty-less_one', sketch_factory=exact_set.\n ExactSet.get_sketch_factory(), estimator=exact_set.\n LessOneEstimator(), sketch_noiser=exact_set.\n AddRandomElementsNoiser(num_random_elements=0, random_state=np.\n random.RandomState()), estimate_noiser=None)\n self.sketch_estimator_config_list = (exact_set_lossless,\n exact_set_less_one)\n self.evaluation_config = configs.EvaluationConfig(name=\n 'test_evaluation', num_runs=2, scenario_config_list=[configs.\n ScenarioConfig(name='ind1', set_generator_factory=set_generator\n .IndependentSetGenerator.\n get_generator_factory_with_num_and_size(universe_size=10,\n num_sets=5, set_size=1)), configs.ScenarioConfig(name='ind2',\n set_generator_factory=set_generator.IndependentSetGenerator.\n get_generator_factory_with_num_and_size(universe_size=10,\n num_sets=5, set_size=1))])\n self.evaluation_run_name = 'test_run'\n\n def _run_evaluation_and_simulation(out_dir):\n self.evaluator = evaluator.Evaluator(evaluation_config=self.\n evaluation_config, sketch_estimator_config_list=self.\n sketch_estimator_config_list, run_name=self.\n evaluation_run_name, out_dir=out_dir)\n self.evaluator()\n self.analyzer = analyzer.CardinalityEstimatorEvaluationAnalyzer(\n out_dir=out_dir, evaluation_directory=out_dir,\n evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name,\n estimable_criteria_list=[(0.05, 0.95), (1.01, 0.9)])\n self.analyzer()\n self.run_evaluation_and_simulation = _run_evaluation_and_simulation\n\n def test_parse_sketch_estimator_name(self):\n sketch_estimator_name = 'vector_of_counts-4096-ln3-sequential'\n parsed_name = (report_generator.ReportGenerator.\n parse_sketch_estimator_name(sketch_estimator_name))\n expected = {evaluation_configs.SKETCH: 'vector_of_counts',\n evaluation_configs.SKETCH_CONFIG: '4096', evaluation_configs.\n EPSILON: 'ln3', evaluation_configs.ESTIMATOR: 'sequential'}\n self.assertEqual(parsed_name, expected)\n\n def test_add_parsed_sketch_estimator_name_cols(self):\n df = pd.DataFrame({'sketch_estimator': [\n 'vector_of_counts-4096-ln3-sequential',\n 'bloom_filter-1e6-infty-union_estimator']})\n result = (report_generator.ReportGenerator.\n add_parsed_sketch_estimator_name_cols(df, 'sketch_estimator'))\n expected = pd.DataFrame({'sketch_estimator': [\n 'vector_of_counts-4096-ln3-sequential',\n 'bloom_filter-1e6-infty-union_estimator'], evaluation_configs.\n SKETCH: ['vector_of_counts', 'bloom_filter'],\n evaluation_configs.SKETCH_CONFIG: ['4096', '1e6'],\n evaluation_configs.EPSILON: ['ln3', 'infty'],\n evaluation_configs.ESTIMATOR: ['sequential', 'union_estimator']})\n try:\n pd.testing.assert_frame_equal(result, expected)\n except AssertionError:\n self.fail(\n 'Parsed sketch_estimator_name is not added correctly to df.')\n\n def test_widen_num_estimable_sets_df(self):\n out_dir = self.create_tempdir('test_widen_num_estimable_sets_df')\n self.run_evaluation_and_simulation(out_dir)\n analysis_results = analyzer.get_analysis_results(analysis_out_dir=\n out_dir, evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name)\n num_estimable_sets_stats_df = (report_generator.ReportGenerator.\n widen_num_estimable_sets_df(analysis_results[report_generator.\n KEY_NUM_ESTIMABLE_SETS_STATS_DF]))\n regex = re.compile(\n '\\\\d+<br>relative_error: mean=(((-)?\\\\d+\\\\.\\\\d+)|(nan)), std=(((-)?\\\\d+\\\\.\\\\d+)|(nan))'\n )\n for s in np.ndarray.flatten(num_estimable_sets_stats_df.values):\n self.assertRegex(s, regex,\n f'value {s} not is not in correct format.')\n regex = '(\\\\d+)\\\\%\\\\/(\\\\d+)'\n for col in num_estimable_sets_stats_df.columns.values:\n self.assertRegex(col[0], regex,\n f'column {col[0]} not is not in correct format.')\n\n def test_generate_boxplot_html(self):\n out_dir = self.create_tempdir('test_generate_boxplot_html')\n self.run_evaluation_and_simulation(out_dir)\n analysis_results = analyzer.get_analysis_results(analysis_out_dir=\n out_dir, evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name)\n description_to_file_dir = analysis_results[report_generator.\n KEY_DESCRIPTION_TO_FILE_DIR]\n sketch_estimator_list = [i.name for i in self.\n sketch_estimator_config_list]\n scenario_list = [conf.name for conf in self.evaluation_config.\n scenario_config_list]\n plot_html = report_generator.ReportGenerator.generate_boxplot_html(\n description_to_file_dir=description_to_file_dir,\n sketch_estimator_list=sketch_estimator_list, scenario_list=\n scenario_list, out_dir=out_dir)\n plot_html = ' '.join(plot_html.split('\\n'))\n regex = '<table(.+?)</table>'\n for h in re.finditer(regex, plot_html):\n tab = pd.read_html(h.group(0), header=[0, 1])[0]\n self.assertGreater(tab.shape[0], 0,\n 'The html table is empty table.')\n\n def test_generate_and_save_html_report(self):\n analysis_out_dir = self.create_tempdir('analysis_dir')\n report_out_dir = self.create_tempdir('test_report_dir')\n self.run_evaluation_and_simulation(analysis_out_dir)\n new_report = report_generator.ReportGenerator(out_dir=\n report_out_dir, analysis_out_dir=analysis_out_dir,\n evaluation_run_name=self.evaluation_run_name, evaluation_name=\n self.evaluation_config.name)\n report_url = new_report('new_report')\n self.assertTrue(os.path.exists(report_url))\n\n\nif __name__ == '__main__':\n absltest.main()\n",
"<docstring token>\n<import token>\n\n\nclass ReportGeneratorTest(absltest.TestCase):\n\n def setUp(self):\n super(ReportGeneratorTest, self).setUp()\n exact_set_lossless = simulator.SketchEstimatorConfig(name=\n 'exact_set-infty-infty-lossless', sketch_factory=exact_set.\n ExactSet.get_sketch_factory(), estimator=exact_set.\n LosslessEstimator(), sketch_noiser=None, estimate_noiser=None)\n exact_set_less_one = simulator.SketchEstimatorConfig(name=\n 'exact_set-infty-infty-less_one', sketch_factory=exact_set.\n ExactSet.get_sketch_factory(), estimator=exact_set.\n LessOneEstimator(), sketch_noiser=exact_set.\n AddRandomElementsNoiser(num_random_elements=0, random_state=np.\n random.RandomState()), estimate_noiser=None)\n self.sketch_estimator_config_list = (exact_set_lossless,\n exact_set_less_one)\n self.evaluation_config = configs.EvaluationConfig(name=\n 'test_evaluation', num_runs=2, scenario_config_list=[configs.\n ScenarioConfig(name='ind1', set_generator_factory=set_generator\n .IndependentSetGenerator.\n get_generator_factory_with_num_and_size(universe_size=10,\n num_sets=5, set_size=1)), configs.ScenarioConfig(name='ind2',\n set_generator_factory=set_generator.IndependentSetGenerator.\n get_generator_factory_with_num_and_size(universe_size=10,\n num_sets=5, set_size=1))])\n self.evaluation_run_name = 'test_run'\n\n def _run_evaluation_and_simulation(out_dir):\n self.evaluator = evaluator.Evaluator(evaluation_config=self.\n evaluation_config, sketch_estimator_config_list=self.\n sketch_estimator_config_list, run_name=self.\n evaluation_run_name, out_dir=out_dir)\n self.evaluator()\n self.analyzer = analyzer.CardinalityEstimatorEvaluationAnalyzer(\n out_dir=out_dir, evaluation_directory=out_dir,\n evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name,\n estimable_criteria_list=[(0.05, 0.95), (1.01, 0.9)])\n self.analyzer()\n self.run_evaluation_and_simulation = _run_evaluation_and_simulation\n\n def test_parse_sketch_estimator_name(self):\n sketch_estimator_name = 'vector_of_counts-4096-ln3-sequential'\n parsed_name = (report_generator.ReportGenerator.\n parse_sketch_estimator_name(sketch_estimator_name))\n expected = {evaluation_configs.SKETCH: 'vector_of_counts',\n evaluation_configs.SKETCH_CONFIG: '4096', evaluation_configs.\n EPSILON: 'ln3', evaluation_configs.ESTIMATOR: 'sequential'}\n self.assertEqual(parsed_name, expected)\n\n def test_add_parsed_sketch_estimator_name_cols(self):\n df = pd.DataFrame({'sketch_estimator': [\n 'vector_of_counts-4096-ln3-sequential',\n 'bloom_filter-1e6-infty-union_estimator']})\n result = (report_generator.ReportGenerator.\n add_parsed_sketch_estimator_name_cols(df, 'sketch_estimator'))\n expected = pd.DataFrame({'sketch_estimator': [\n 'vector_of_counts-4096-ln3-sequential',\n 'bloom_filter-1e6-infty-union_estimator'], evaluation_configs.\n SKETCH: ['vector_of_counts', 'bloom_filter'],\n evaluation_configs.SKETCH_CONFIG: ['4096', '1e6'],\n evaluation_configs.EPSILON: ['ln3', 'infty'],\n evaluation_configs.ESTIMATOR: ['sequential', 'union_estimator']})\n try:\n pd.testing.assert_frame_equal(result, expected)\n except AssertionError:\n self.fail(\n 'Parsed sketch_estimator_name is not added correctly to df.')\n\n def test_widen_num_estimable_sets_df(self):\n out_dir = self.create_tempdir('test_widen_num_estimable_sets_df')\n self.run_evaluation_and_simulation(out_dir)\n analysis_results = analyzer.get_analysis_results(analysis_out_dir=\n out_dir, evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name)\n num_estimable_sets_stats_df = (report_generator.ReportGenerator.\n widen_num_estimable_sets_df(analysis_results[report_generator.\n KEY_NUM_ESTIMABLE_SETS_STATS_DF]))\n regex = re.compile(\n '\\\\d+<br>relative_error: mean=(((-)?\\\\d+\\\\.\\\\d+)|(nan)), std=(((-)?\\\\d+\\\\.\\\\d+)|(nan))'\n )\n for s in np.ndarray.flatten(num_estimable_sets_stats_df.values):\n self.assertRegex(s, regex,\n f'value {s} not is not in correct format.')\n regex = '(\\\\d+)\\\\%\\\\/(\\\\d+)'\n for col in num_estimable_sets_stats_df.columns.values:\n self.assertRegex(col[0], regex,\n f'column {col[0]} not is not in correct format.')\n\n def test_generate_boxplot_html(self):\n out_dir = self.create_tempdir('test_generate_boxplot_html')\n self.run_evaluation_and_simulation(out_dir)\n analysis_results = analyzer.get_analysis_results(analysis_out_dir=\n out_dir, evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name)\n description_to_file_dir = analysis_results[report_generator.\n KEY_DESCRIPTION_TO_FILE_DIR]\n sketch_estimator_list = [i.name for i in self.\n sketch_estimator_config_list]\n scenario_list = [conf.name for conf in self.evaluation_config.\n scenario_config_list]\n plot_html = report_generator.ReportGenerator.generate_boxplot_html(\n description_to_file_dir=description_to_file_dir,\n sketch_estimator_list=sketch_estimator_list, scenario_list=\n scenario_list, out_dir=out_dir)\n plot_html = ' '.join(plot_html.split('\\n'))\n regex = '<table(.+?)</table>'\n for h in re.finditer(regex, plot_html):\n tab = pd.read_html(h.group(0), header=[0, 1])[0]\n self.assertGreater(tab.shape[0], 0,\n 'The html table is empty table.')\n\n def test_generate_and_save_html_report(self):\n analysis_out_dir = self.create_tempdir('analysis_dir')\n report_out_dir = self.create_tempdir('test_report_dir')\n self.run_evaluation_and_simulation(analysis_out_dir)\n new_report = report_generator.ReportGenerator(out_dir=\n report_out_dir, analysis_out_dir=analysis_out_dir,\n evaluation_run_name=self.evaluation_run_name, evaluation_name=\n self.evaluation_config.name)\n report_url = new_report('new_report')\n self.assertTrue(os.path.exists(report_url))\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass ReportGeneratorTest(absltest.TestCase):\n\n def setUp(self):\n super(ReportGeneratorTest, self).setUp()\n exact_set_lossless = simulator.SketchEstimatorConfig(name=\n 'exact_set-infty-infty-lossless', sketch_factory=exact_set.\n ExactSet.get_sketch_factory(), estimator=exact_set.\n LosslessEstimator(), sketch_noiser=None, estimate_noiser=None)\n exact_set_less_one = simulator.SketchEstimatorConfig(name=\n 'exact_set-infty-infty-less_one', sketch_factory=exact_set.\n ExactSet.get_sketch_factory(), estimator=exact_set.\n LessOneEstimator(), sketch_noiser=exact_set.\n AddRandomElementsNoiser(num_random_elements=0, random_state=np.\n random.RandomState()), estimate_noiser=None)\n self.sketch_estimator_config_list = (exact_set_lossless,\n exact_set_less_one)\n self.evaluation_config = configs.EvaluationConfig(name=\n 'test_evaluation', num_runs=2, scenario_config_list=[configs.\n ScenarioConfig(name='ind1', set_generator_factory=set_generator\n .IndependentSetGenerator.\n get_generator_factory_with_num_and_size(universe_size=10,\n num_sets=5, set_size=1)), configs.ScenarioConfig(name='ind2',\n set_generator_factory=set_generator.IndependentSetGenerator.\n get_generator_factory_with_num_and_size(universe_size=10,\n num_sets=5, set_size=1))])\n self.evaluation_run_name = 'test_run'\n\n def _run_evaluation_and_simulation(out_dir):\n self.evaluator = evaluator.Evaluator(evaluation_config=self.\n evaluation_config, sketch_estimator_config_list=self.\n sketch_estimator_config_list, run_name=self.\n evaluation_run_name, out_dir=out_dir)\n self.evaluator()\n self.analyzer = analyzer.CardinalityEstimatorEvaluationAnalyzer(\n out_dir=out_dir, evaluation_directory=out_dir,\n evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name,\n estimable_criteria_list=[(0.05, 0.95), (1.01, 0.9)])\n self.analyzer()\n self.run_evaluation_and_simulation = _run_evaluation_and_simulation\n\n def test_parse_sketch_estimator_name(self):\n sketch_estimator_name = 'vector_of_counts-4096-ln3-sequential'\n parsed_name = (report_generator.ReportGenerator.\n parse_sketch_estimator_name(sketch_estimator_name))\n expected = {evaluation_configs.SKETCH: 'vector_of_counts',\n evaluation_configs.SKETCH_CONFIG: '4096', evaluation_configs.\n EPSILON: 'ln3', evaluation_configs.ESTIMATOR: 'sequential'}\n self.assertEqual(parsed_name, expected)\n\n def test_add_parsed_sketch_estimator_name_cols(self):\n df = pd.DataFrame({'sketch_estimator': [\n 'vector_of_counts-4096-ln3-sequential',\n 'bloom_filter-1e6-infty-union_estimator']})\n result = (report_generator.ReportGenerator.\n add_parsed_sketch_estimator_name_cols(df, 'sketch_estimator'))\n expected = pd.DataFrame({'sketch_estimator': [\n 'vector_of_counts-4096-ln3-sequential',\n 'bloom_filter-1e6-infty-union_estimator'], evaluation_configs.\n SKETCH: ['vector_of_counts', 'bloom_filter'],\n evaluation_configs.SKETCH_CONFIG: ['4096', '1e6'],\n evaluation_configs.EPSILON: ['ln3', 'infty'],\n evaluation_configs.ESTIMATOR: ['sequential', 'union_estimator']})\n try:\n pd.testing.assert_frame_equal(result, expected)\n except AssertionError:\n self.fail(\n 'Parsed sketch_estimator_name is not added correctly to df.')\n <function token>\n\n def test_generate_boxplot_html(self):\n out_dir = self.create_tempdir('test_generate_boxplot_html')\n self.run_evaluation_and_simulation(out_dir)\n analysis_results = analyzer.get_analysis_results(analysis_out_dir=\n out_dir, evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name)\n description_to_file_dir = analysis_results[report_generator.\n KEY_DESCRIPTION_TO_FILE_DIR]\n sketch_estimator_list = [i.name for i in self.\n sketch_estimator_config_list]\n scenario_list = [conf.name for conf in self.evaluation_config.\n scenario_config_list]\n plot_html = report_generator.ReportGenerator.generate_boxplot_html(\n description_to_file_dir=description_to_file_dir,\n sketch_estimator_list=sketch_estimator_list, scenario_list=\n scenario_list, out_dir=out_dir)\n plot_html = ' '.join(plot_html.split('\\n'))\n regex = '<table(.+?)</table>'\n for h in re.finditer(regex, plot_html):\n tab = pd.read_html(h.group(0), header=[0, 1])[0]\n self.assertGreater(tab.shape[0], 0,\n 'The html table is empty table.')\n\n def test_generate_and_save_html_report(self):\n analysis_out_dir = self.create_tempdir('analysis_dir')\n report_out_dir = self.create_tempdir('test_report_dir')\n self.run_evaluation_and_simulation(analysis_out_dir)\n new_report = report_generator.ReportGenerator(out_dir=\n report_out_dir, analysis_out_dir=analysis_out_dir,\n evaluation_run_name=self.evaluation_run_name, evaluation_name=\n self.evaluation_config.name)\n report_url = new_report('new_report')\n self.assertTrue(os.path.exists(report_url))\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass ReportGeneratorTest(absltest.TestCase):\n\n def setUp(self):\n super(ReportGeneratorTest, self).setUp()\n exact_set_lossless = simulator.SketchEstimatorConfig(name=\n 'exact_set-infty-infty-lossless', sketch_factory=exact_set.\n ExactSet.get_sketch_factory(), estimator=exact_set.\n LosslessEstimator(), sketch_noiser=None, estimate_noiser=None)\n exact_set_less_one = simulator.SketchEstimatorConfig(name=\n 'exact_set-infty-infty-less_one', sketch_factory=exact_set.\n ExactSet.get_sketch_factory(), estimator=exact_set.\n LessOneEstimator(), sketch_noiser=exact_set.\n AddRandomElementsNoiser(num_random_elements=0, random_state=np.\n random.RandomState()), estimate_noiser=None)\n self.sketch_estimator_config_list = (exact_set_lossless,\n exact_set_less_one)\n self.evaluation_config = configs.EvaluationConfig(name=\n 'test_evaluation', num_runs=2, scenario_config_list=[configs.\n ScenarioConfig(name='ind1', set_generator_factory=set_generator\n .IndependentSetGenerator.\n get_generator_factory_with_num_and_size(universe_size=10,\n num_sets=5, set_size=1)), configs.ScenarioConfig(name='ind2',\n set_generator_factory=set_generator.IndependentSetGenerator.\n get_generator_factory_with_num_and_size(universe_size=10,\n num_sets=5, set_size=1))])\n self.evaluation_run_name = 'test_run'\n\n def _run_evaluation_and_simulation(out_dir):\n self.evaluator = evaluator.Evaluator(evaluation_config=self.\n evaluation_config, sketch_estimator_config_list=self.\n sketch_estimator_config_list, run_name=self.\n evaluation_run_name, out_dir=out_dir)\n self.evaluator()\n self.analyzer = analyzer.CardinalityEstimatorEvaluationAnalyzer(\n out_dir=out_dir, evaluation_directory=out_dir,\n evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name,\n estimable_criteria_list=[(0.05, 0.95), (1.01, 0.9)])\n self.analyzer()\n self.run_evaluation_and_simulation = _run_evaluation_and_simulation\n <function token>\n\n def test_add_parsed_sketch_estimator_name_cols(self):\n df = pd.DataFrame({'sketch_estimator': [\n 'vector_of_counts-4096-ln3-sequential',\n 'bloom_filter-1e6-infty-union_estimator']})\n result = (report_generator.ReportGenerator.\n add_parsed_sketch_estimator_name_cols(df, 'sketch_estimator'))\n expected = pd.DataFrame({'sketch_estimator': [\n 'vector_of_counts-4096-ln3-sequential',\n 'bloom_filter-1e6-infty-union_estimator'], evaluation_configs.\n SKETCH: ['vector_of_counts', 'bloom_filter'],\n evaluation_configs.SKETCH_CONFIG: ['4096', '1e6'],\n evaluation_configs.EPSILON: ['ln3', 'infty'],\n evaluation_configs.ESTIMATOR: ['sequential', 'union_estimator']})\n try:\n pd.testing.assert_frame_equal(result, expected)\n except AssertionError:\n self.fail(\n 'Parsed sketch_estimator_name is not added correctly to df.')\n <function token>\n\n def test_generate_boxplot_html(self):\n out_dir = self.create_tempdir('test_generate_boxplot_html')\n self.run_evaluation_and_simulation(out_dir)\n analysis_results = analyzer.get_analysis_results(analysis_out_dir=\n out_dir, evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name)\n description_to_file_dir = analysis_results[report_generator.\n KEY_DESCRIPTION_TO_FILE_DIR]\n sketch_estimator_list = [i.name for i in self.\n sketch_estimator_config_list]\n scenario_list = [conf.name for conf in self.evaluation_config.\n scenario_config_list]\n plot_html = report_generator.ReportGenerator.generate_boxplot_html(\n description_to_file_dir=description_to_file_dir,\n sketch_estimator_list=sketch_estimator_list, scenario_list=\n scenario_list, out_dir=out_dir)\n plot_html = ' '.join(plot_html.split('\\n'))\n regex = '<table(.+?)</table>'\n for h in re.finditer(regex, plot_html):\n tab = pd.read_html(h.group(0), header=[0, 1])[0]\n self.assertGreater(tab.shape[0], 0,\n 'The html table is empty table.')\n\n def test_generate_and_save_html_report(self):\n analysis_out_dir = self.create_tempdir('analysis_dir')\n report_out_dir = self.create_tempdir('test_report_dir')\n self.run_evaluation_and_simulation(analysis_out_dir)\n new_report = report_generator.ReportGenerator(out_dir=\n report_out_dir, analysis_out_dir=analysis_out_dir,\n evaluation_run_name=self.evaluation_run_name, evaluation_name=\n self.evaluation_config.name)\n report_url = new_report('new_report')\n self.assertTrue(os.path.exists(report_url))\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass ReportGeneratorTest(absltest.TestCase):\n\n def setUp(self):\n super(ReportGeneratorTest, self).setUp()\n exact_set_lossless = simulator.SketchEstimatorConfig(name=\n 'exact_set-infty-infty-lossless', sketch_factory=exact_set.\n ExactSet.get_sketch_factory(), estimator=exact_set.\n LosslessEstimator(), sketch_noiser=None, estimate_noiser=None)\n exact_set_less_one = simulator.SketchEstimatorConfig(name=\n 'exact_set-infty-infty-less_one', sketch_factory=exact_set.\n ExactSet.get_sketch_factory(), estimator=exact_set.\n LessOneEstimator(), sketch_noiser=exact_set.\n AddRandomElementsNoiser(num_random_elements=0, random_state=np.\n random.RandomState()), estimate_noiser=None)\n self.sketch_estimator_config_list = (exact_set_lossless,\n exact_set_less_one)\n self.evaluation_config = configs.EvaluationConfig(name=\n 'test_evaluation', num_runs=2, scenario_config_list=[configs.\n ScenarioConfig(name='ind1', set_generator_factory=set_generator\n .IndependentSetGenerator.\n get_generator_factory_with_num_and_size(universe_size=10,\n num_sets=5, set_size=1)), configs.ScenarioConfig(name='ind2',\n set_generator_factory=set_generator.IndependentSetGenerator.\n get_generator_factory_with_num_and_size(universe_size=10,\n num_sets=5, set_size=1))])\n self.evaluation_run_name = 'test_run'\n\n def _run_evaluation_and_simulation(out_dir):\n self.evaluator = evaluator.Evaluator(evaluation_config=self.\n evaluation_config, sketch_estimator_config_list=self.\n sketch_estimator_config_list, run_name=self.\n evaluation_run_name, out_dir=out_dir)\n self.evaluator()\n self.analyzer = analyzer.CardinalityEstimatorEvaluationAnalyzer(\n out_dir=out_dir, evaluation_directory=out_dir,\n evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name,\n estimable_criteria_list=[(0.05, 0.95), (1.01, 0.9)])\n self.analyzer()\n self.run_evaluation_and_simulation = _run_evaluation_and_simulation\n <function token>\n <function token>\n <function token>\n\n def test_generate_boxplot_html(self):\n out_dir = self.create_tempdir('test_generate_boxplot_html')\n self.run_evaluation_and_simulation(out_dir)\n analysis_results = analyzer.get_analysis_results(analysis_out_dir=\n out_dir, evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name)\n description_to_file_dir = analysis_results[report_generator.\n KEY_DESCRIPTION_TO_FILE_DIR]\n sketch_estimator_list = [i.name for i in self.\n sketch_estimator_config_list]\n scenario_list = [conf.name for conf in self.evaluation_config.\n scenario_config_list]\n plot_html = report_generator.ReportGenerator.generate_boxplot_html(\n description_to_file_dir=description_to_file_dir,\n sketch_estimator_list=sketch_estimator_list, scenario_list=\n scenario_list, out_dir=out_dir)\n plot_html = ' '.join(plot_html.split('\\n'))\n regex = '<table(.+?)</table>'\n for h in re.finditer(regex, plot_html):\n tab = pd.read_html(h.group(0), header=[0, 1])[0]\n self.assertGreater(tab.shape[0], 0,\n 'The html table is empty table.')\n\n def test_generate_and_save_html_report(self):\n analysis_out_dir = self.create_tempdir('analysis_dir')\n report_out_dir = self.create_tempdir('test_report_dir')\n self.run_evaluation_and_simulation(analysis_out_dir)\n new_report = report_generator.ReportGenerator(out_dir=\n report_out_dir, analysis_out_dir=analysis_out_dir,\n evaluation_run_name=self.evaluation_run_name, evaluation_name=\n self.evaluation_config.name)\n report_url = new_report('new_report')\n self.assertTrue(os.path.exists(report_url))\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass ReportGeneratorTest(absltest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_generate_boxplot_html(self):\n out_dir = self.create_tempdir('test_generate_boxplot_html')\n self.run_evaluation_and_simulation(out_dir)\n analysis_results = analyzer.get_analysis_results(analysis_out_dir=\n out_dir, evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name)\n description_to_file_dir = analysis_results[report_generator.\n KEY_DESCRIPTION_TO_FILE_DIR]\n sketch_estimator_list = [i.name for i in self.\n sketch_estimator_config_list]\n scenario_list = [conf.name for conf in self.evaluation_config.\n scenario_config_list]\n plot_html = report_generator.ReportGenerator.generate_boxplot_html(\n description_to_file_dir=description_to_file_dir,\n sketch_estimator_list=sketch_estimator_list, scenario_list=\n scenario_list, out_dir=out_dir)\n plot_html = ' '.join(plot_html.split('\\n'))\n regex = '<table(.+?)</table>'\n for h in re.finditer(regex, plot_html):\n tab = pd.read_html(h.group(0), header=[0, 1])[0]\n self.assertGreater(tab.shape[0], 0,\n 'The html table is empty table.')\n\n def test_generate_and_save_html_report(self):\n analysis_out_dir = self.create_tempdir('analysis_dir')\n report_out_dir = self.create_tempdir('test_report_dir')\n self.run_evaluation_and_simulation(analysis_out_dir)\n new_report = report_generator.ReportGenerator(out_dir=\n report_out_dir, analysis_out_dir=analysis_out_dir,\n evaluation_run_name=self.evaluation_run_name, evaluation_name=\n self.evaluation_config.name)\n report_url = new_report('new_report')\n self.assertTrue(os.path.exists(report_url))\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass ReportGeneratorTest(absltest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_generate_boxplot_html(self):\n out_dir = self.create_tempdir('test_generate_boxplot_html')\n self.run_evaluation_and_simulation(out_dir)\n analysis_results = analyzer.get_analysis_results(analysis_out_dir=\n out_dir, evaluation_run_name=self.evaluation_run_name,\n evaluation_name=self.evaluation_config.name)\n description_to_file_dir = analysis_results[report_generator.\n KEY_DESCRIPTION_TO_FILE_DIR]\n sketch_estimator_list = [i.name for i in self.\n sketch_estimator_config_list]\n scenario_list = [conf.name for conf in self.evaluation_config.\n scenario_config_list]\n plot_html = report_generator.ReportGenerator.generate_boxplot_html(\n description_to_file_dir=description_to_file_dir,\n sketch_estimator_list=sketch_estimator_list, scenario_list=\n scenario_list, out_dir=out_dir)\n plot_html = ' '.join(plot_html.split('\\n'))\n regex = '<table(.+?)</table>'\n for h in re.finditer(regex, plot_html):\n tab = pd.read_html(h.group(0), header=[0, 1])[0]\n self.assertGreater(tab.shape[0], 0,\n 'The html table is empty table.')\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass ReportGeneratorTest(absltest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<class token>\n<code token>\n"
] | false |
98,874 |
b33388d33fb195d40d167d30bc048df9fb4aba70
|
#Britni Canale
#SoftDev1 pd 6
#K26 -- Getting More REST
#2018-11-15
from flask import Flask, render_template, session, request, url_for, redirect, flash
from urllib.request import urlopen, Request
import json, requests
app = Flask(__name__)
@app.route("/")
def hello():
dog = "https://dog.ceo/api/breeds/image/random"
req = urlopen(dog)
dogdict = json.loads(req.read())
dogpic = dogdict["message"]
print(dogpic)
breed = dogpic[30:dogpic.rindex("/")]
print(breed)
#url = 'http://api.repo.nypl.org/api/v1/items/search?q=cats&publicDomainOnly=true'
dogfacts = 'http://api.repo.nypl.org/api/v1/items/search?q='+breed
auth = 'Token token=ekujifnuvmrzwzuk'
call = requests.get(dogfacts, headers={'Authorization': auth})
#r = requests.get(dogfacts)
#logger.info(type(r))
#request = Request(dogfacts, values.encode("utf-8"))
#openrequest = urlopen(request)
#readrequest = openrequest.read()
#print(r.text)
#factsdict = json.loads(r.text)
#print(factsdict)
return render_template("index.html", ttl = "DOGS AND STUFF", DOG = dogpic, breed = breed)
if __name__ == "__main__":
app.debug = True
app.run()
|
[
"#Britni Canale\n#SoftDev1 pd 6\n#K26 -- Getting More REST\n#2018-11-15\n\n\nfrom flask import Flask, render_template, session, request, url_for, redirect, flash\nfrom urllib.request import urlopen, Request\nimport json, requests\n\napp = Flask(__name__)\n\[email protected](\"/\")\ndef hello():\n dog = \"https://dog.ceo/api/breeds/image/random\"\n req = urlopen(dog)\n dogdict = json.loads(req.read())\n dogpic = dogdict[\"message\"]\n print(dogpic)\n breed = dogpic[30:dogpic.rindex(\"/\")]\n print(breed)\n\n #url = 'http://api.repo.nypl.org/api/v1/items/search?q=cats&publicDomainOnly=true'\n\n\n\n dogfacts = 'http://api.repo.nypl.org/api/v1/items/search?q='+breed\n auth = 'Token token=ekujifnuvmrzwzuk'\n call = requests.get(dogfacts, headers={'Authorization': auth})\n\n #r = requests.get(dogfacts)\n #logger.info(type(r))\n #request = Request(dogfacts, values.encode(\"utf-8\"))\n #openrequest = urlopen(request)\n #readrequest = openrequest.read()\n #print(r.text)\n #factsdict = json.loads(r.text)\n #print(factsdict)\n return render_template(\"index.html\", ttl = \"DOGS AND STUFF\", DOG = dogpic, breed = breed)\n\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n",
"from flask import Flask, render_template, session, request, url_for, redirect, flash\nfrom urllib.request import urlopen, Request\nimport json, requests\napp = Flask(__name__)\n\n\[email protected]('/')\ndef hello():\n dog = 'https://dog.ceo/api/breeds/image/random'\n req = urlopen(dog)\n dogdict = json.loads(req.read())\n dogpic = dogdict['message']\n print(dogpic)\n breed = dogpic[30:dogpic.rindex('/')]\n print(breed)\n dogfacts = 'http://api.repo.nypl.org/api/v1/items/search?q=' + breed\n auth = 'Token token=ekujifnuvmrzwzuk'\n call = requests.get(dogfacts, headers={'Authorization': auth})\n return render_template('index.html', ttl='DOGS AND STUFF', DOG=dogpic,\n breed=breed)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n",
"<import token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef hello():\n dog = 'https://dog.ceo/api/breeds/image/random'\n req = urlopen(dog)\n dogdict = json.loads(req.read())\n dogpic = dogdict['message']\n print(dogpic)\n breed = dogpic[30:dogpic.rindex('/')]\n print(breed)\n dogfacts = 'http://api.repo.nypl.org/api/v1/items/search?q=' + breed\n auth = 'Token token=ekujifnuvmrzwzuk'\n call = requests.get(dogfacts, headers={'Authorization': auth})\n return render_template('index.html', ttl='DOGS AND STUFF', DOG=dogpic,\n breed=breed)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef hello():\n dog = 'https://dog.ceo/api/breeds/image/random'\n req = urlopen(dog)\n dogdict = json.loads(req.read())\n dogpic = dogdict['message']\n print(dogpic)\n breed = dogpic[30:dogpic.rindex('/')]\n print(breed)\n dogfacts = 'http://api.repo.nypl.org/api/v1/items/search?q=' + breed\n auth = 'Token token=ekujifnuvmrzwzuk'\n call = requests.get(dogfacts, headers={'Authorization': auth})\n return render_template('index.html', ttl='DOGS AND STUFF', DOG=dogpic,\n breed=breed)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n",
"<import token>\n<assignment token>\n\n\[email protected]('/')\ndef hello():\n dog = 'https://dog.ceo/api/breeds/image/random'\n req = urlopen(dog)\n dogdict = json.loads(req.read())\n dogpic = dogdict['message']\n print(dogpic)\n breed = dogpic[30:dogpic.rindex('/')]\n print(breed)\n dogfacts = 'http://api.repo.nypl.org/api/v1/items/search?q=' + breed\n auth = 'Token token=ekujifnuvmrzwzuk'\n call = requests.get(dogfacts, headers={'Authorization': auth})\n return render_template('index.html', ttl='DOGS AND STUFF', DOG=dogpic,\n breed=breed)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<code token>\n"
] | false |
98,875 |
5e8aa2fd85aef1a9e0fa939b80ba66f4065a90a2
|
#FluidChannel.py
"""
Class implementation file for the Python class FluidChannel
Depends on vtkHelper module for geometry visualization functionality
"""
import math
import argparse
import numpy as np
from vtkHelper import saveStructuredPointsVTK_ascii as writeVTK
import scipy.io
class EmptyChannel:
"""
a channel with nothing in it
"""
def __init__(self,Lo):
"""
constructor
"""
self.Lo = Lo
def get_Lo(self):
"""
set Lo if need be ?
"""
return self.Lo
def get_obstList(self,X,Y,Z):
"""
for an empty channel - no obstacles
"""
return []
class SphereObstruction(EmptyChannel):
"""
a channel with a sphere obstruction
"""
def __init__(self,r,x_c,y_c,z_c):
"""
just need to define the radius and position of the center of the obstacle.
it is up to the caller to verify that the object will fit within the intended
channel. If it does not fit, the obstacle will effectively be
truncated at the channel boundaries
"""
self.r = r
self.x_c = x_c
self.y_c = y_c
self.z_c = z_c
def get_Lo(self):
return self.r*2.
def get_obstList(self,X,Y,Z):
"""
return a list of all indices all indices within boundary of sphere
"""
x = np.array(X); y = np.array(Y); z = np.array(Z);
dist = (x - self.x_c)**2 + (y - self.y_c)**2 + (z - self.z_c)**2
return list(np.where(dist < self.r**2))
class EllipticalScourPit(EmptyChannel):
"""
a channel with an elliptical scour pit with prescribed properties
corresponds to case 3 of Bryan's geometry_desc.m
"""
def __init__(self,x_c,z_c,cyl_rad):
"""
constructor giving the x and z coordinates of the scour pit along with
the radius of the cylindrical piling
"""
self.x_c = x_c
self.z_c = z_c
self.cyl_rad = cyl_rad
def get_Lo(self):
return self.cyl_rad*2.
def get_obstList(self,X,Y,Z):
"""
return a list of all indices of lattice points within the boundaries of the
scour pit obstacle
"""
ellip_a = 2.*2.*self.cyl_rad
ellip_b = 2.*self.cyl_rad
ellip_c = 8.*self.cyl_rad
ellip_x = self.x_c
ellip_z = self.z_c + self.cyl_rad
ellip_y = ellip_b
floor_part = np.array(np.where(Y < ellip_b)).flatten()
dist = (X - self.x_c)**2 + (Z - self.z_c)**2;
cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())
scour_pit = np.array(np.where( (X - ellip_x)**2/(ellip_a**2) +
(Y - ellip_y)**2/(ellip_b**2) +
(Z - ellip_z)**2/(ellip_c**2) <= 1.)).flatten()
# remove the scour pit from the floor
obst_list = np.setxor1d(floor_part[:],
np.intersect1d(floor_part[:],scour_pit[:]))
# then add the cylinder
obst_list = np.union1d(obst_list[:],cyl_part[:])
return list(obst_list[:])
def fluid_properties(fluid_str):
"""
Return the physical density and kinematic viscosity for the prescribed
fluid.
"""
fluid_lib = {'water':(1000., 1.0e-6),
'glycol':(965.3,6.216e-4),
'glycerin':(1260,1.18e-3)}
if fluid_str in fluid_lib.keys():
return fluid_lib[fluid_str]
else:
print 'valid fluids are:'
for keys in fluid_lib:
print " '%s' " % keys
raise KeyError('invalid fluid specified')
class FluidChannel:
def __init__(self,Lx_p=1.,
Ly_p=1.,
Lz_p=6.,
fluid='water',
obst=EmptyChannel(1.),
N_divs = 5,
wallList=['left','right','top','bottom']):
"""
class constructor
"""
self.Lx_p = Lx_p
self.Ly_p = Ly_p
self.Lz_p = Lz_p
self.N_divs = N_divs
self.fluid = fluid
self.obst = obst
# generate the geometry
Lo = obst.get_Lo()
self.Ny = math.ceil((N_divs-1)*(Ly_p/Lo))+1
self.Nx = math.ceil((N_divs-1)*(Lx_p/Lo))+1
self.Nz = math.ceil((N_divs-1)*(Lz_p/Lo))+1
self.nnodes = self.Nx*self.Ny*self.Nz
print "Creating channel with %g lattice points." % self.nnodes
x = np.linspace(0.,Lx_p,self.Nx).astype(np.float32);
y = np.linspace(0.,Ly_p,self.Ny).astype(np.float32);
z = np.linspace(0.,Lz_p,self.Nz).astype(np.float32);
Y,Z,X = np.meshgrid(y,z,x);
self.x = np.reshape(X,self.nnodes)
self.y = np.reshape(Y,self.nnodes)
self.z = np.reshape(Z,self.nnodes)
# get fluid properties from the included fluid library
self.rho_p, self.nu_p = fluid_properties(fluid)
# identify inlet and outlet nodes -
# require the user to set solid boundaries separately
self.inlet_list = np.where(self.z==0)
self.outlet_list = np.where(self.z==Lz_p)
print "Getting obstacle list"
# get obstacle list
self.obst_list = self.obst.get_obstList(self.x[:],self.y[:],self.z[:])
print "Generating channel solid boundaries"
# set channel walls
self.set_channel_walls(wallList)
# now eliminate overlap between node lists
self.inlet_list = np.setxor1d(self.inlet_list[:],
np.intersect1d(self.inlet_list[:],self.solid_list[:]))
self.inlet_list = np.setxor1d(self.inlet_list[:],
np.intersect1d(self.inlet_list[:],self.obst_list[:]))
self.outlet_list = np.setxor1d(self.outlet_list[:],
np.intersect1d(self.outlet_list[:],self.solid_list[:]))
self.outlet_list = np.setxor1d(self.outlet_list[:],
np.intersect1d(self.outlet_list[:],self.obst_list[:]))
self.obst_list = np.setxor1d(self.obst_list[:],
np.intersect1d(self.obst_list[:],self.solid_list[:]))
def write_mat_file(self):
"""
generate the mat file to interface with genInput.py. Needs to save
Lx_p, Ly_p, Lz_p, Lo, Ny_divs, rho_p, nu_p, snl, inl and onl.
note that the snl and obst_list need to be combined into one list
"""
mat_dict = {}
mat_dict['Lx_p'] = self.Lx_p
mat_dict['Ly_p'] = self.Ly_p
mat_dict['Lz_p'] = self.Lz_p
mat_dict['Lo'] = self.obst.get_Lo()
mat_dict['Ny_divs'] = self.N_divs
mat_dict['rho_p'] = self.rho_p
mat_dict['nu_p'] = self.nu_p
mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))
mat_dict['inl'] = list(self.inlet_list[:])
mat_dict['onl'] = list(self.outlet_list[:])
scipy.io.savemat('geometry_description',mat_dict)
def write_bc_vtk(self):
"""
write node lists to properly formatted VTK files
"""
print "Creating boundary condition arrays"
obst_array = np.zeros(self.nnodes)
obst_array[list(self.obst_list)] = 100.
#print type(self.inlet_list)
inlet_array = np.zeros(self.nnodes)
inlet_array[list(self.inlet_list)] = 200.
outlet_array = np.zeros(self.nnodes)
outlet_array[list(self.outlet_list)] = 300.
solid_array = np.zeros(self.nnodes)
solid_array[list(self.solid_list)] = 500.
dims = [int(self.Nx), int(self.Ny), int(self.Nz)]
origin = [0., 0., 0.]
dx = self.x[1] - self.x[0]
spacing = [dx, dx, dx] #uniform lattice
print "Writing boundary conditions to VTK files"
writeVTK(inlet_array,'inlet','inlet.vtk',dims,origin,spacing)
writeVTK(outlet_array,'outlet','outlet.vtk',dims,origin,spacing)
writeVTK(obst_array,'obst','obst.vtk',dims,origin,spacing)
writeVTK(solid_array,'solid','solid.vtk',dims,origin,spacing)
# must have geometry set first
def set_channel_walls(self,walls=['left','right','top','bottom']):
"""
set up to 4 walls as solid walls for the simulation
"""
solid_list_a = np.empty(0).flatten()
solid_list_b = np.empty(0).flatten()
solid_list_c = np.empty(0).flatten()
solid_list_d = np.empty(0).flatten()
for w in walls:
if w=='right':
solid_list_a = np.array(np.where((self.x==0.))).flatten()
elif w=='left':
solid_list_b = np.array(np.where((self.x == self.Lx_p))).flatten()
elif w=='top':
solid_list_d = np.array(np.where((self.y == self.Ly_p))).flatten()
elif w=='bottom':
solid_list_c = np.array(np.where((self.y == 0.))).flatten()
solid_list = np.array(np.union1d(solid_list_a,solid_list_b));
solid_list = np.array(np.union1d(solid_list,solid_list_c))
self.solid_list = np.array(np.union1d(solid_list,solid_list_d))
|
[
"#FluidChannel.py\n\"\"\"\nClass implementation file for the Python class FluidChannel\nDepends on vtkHelper module for geometry visualization functionality\n\n\"\"\"\nimport math\nimport argparse\nimport numpy as np\nfrom vtkHelper import saveStructuredPointsVTK_ascii as writeVTK\nimport scipy.io\n\nclass EmptyChannel: \n \"\"\"\n a channel with nothing in it\n \"\"\"\n def __init__(self,Lo):\n \"\"\"\n constructor\n \"\"\"\n self.Lo = Lo\n\n def get_Lo(self):\n \"\"\"\n set Lo if need be ?\n \"\"\"\n return self.Lo\n\n def get_obstList(self,X,Y,Z):\n \"\"\"\n for an empty channel - no obstacles \n \"\"\"\n return []\n\nclass SphereObstruction(EmptyChannel):\n \"\"\"\n a channel with a sphere obstruction\n \"\"\"\n\n def __init__(self,r,x_c,y_c,z_c):\n \"\"\"\n just need to define the radius and position of the center of the obstacle.\n it is up to the caller to verify that the object will fit within the intended\n channel. If it does not fit, the obstacle will effectively be\n truncated at the channel boundaries\n \n \"\"\"\n self.r = r\n self.x_c = x_c\n self.y_c = y_c\n self.z_c = z_c\n \n def get_Lo(self):\n return self.r*2.\n\n def get_obstList(self,X,Y,Z):\n \"\"\"\n return a list of all indices all indices within boundary of sphere \n \"\"\"\n\n x = np.array(X); y = np.array(Y); z = np.array(Z);\n dist = (x - self.x_c)**2 + (y - self.y_c)**2 + (z - self.z_c)**2\n \n return list(np.where(dist < self.r**2))\n \nclass EllipticalScourPit(EmptyChannel):\n \"\"\"\n a channel with an elliptical scour pit with prescribed properties\n corresponds to case 3 of Bryan's geometry_desc.m\n \"\"\"\n\n def __init__(self,x_c,z_c,cyl_rad):\n \"\"\"\n constructor giving the x and z coordinates of the scour pit along with\n the radius of the cylindrical piling\n \"\"\"\n self.x_c = x_c\n self.z_c = z_c\n self.cyl_rad = cyl_rad\n\n def get_Lo(self):\n return self.cyl_rad*2.\n\n def get_obstList(self,X,Y,Z):\n \"\"\"\n return a list of all indices of lattice points within the boundaries of the\n scour pit obstacle\n\n \"\"\"\n \n ellip_a = 2.*2.*self.cyl_rad\n ellip_b = 2.*self.cyl_rad\n ellip_c = 8.*self.cyl_rad\n ellip_x = self.x_c\n ellip_z = self.z_c + self.cyl_rad\n ellip_y = ellip_b \n\n floor_part = np.array(np.where(Y < ellip_b)).flatten()\n\n dist = (X - self.x_c)**2 + (Z - self.z_c)**2;\n cyl_part = list(np.array(np.where( dist < self.cyl_rad**2)).flatten())\n\n scour_pit = np.array(np.where( (X - ellip_x)**2/(ellip_a**2) + \n (Y - ellip_y)**2/(ellip_b**2) +\n (Z - ellip_z)**2/(ellip_c**2) <= 1.)).flatten()\n\n # remove the scour pit from the floor\n obst_list = np.setxor1d(floor_part[:], \n np.intersect1d(floor_part[:],scour_pit[:]))\n\n\n # then add the cylinder\n obst_list = np.union1d(obst_list[:],cyl_part[:])\n \n return list(obst_list[:])\n\n\n\ndef fluid_properties(fluid_str): \n \"\"\"\n Return the physical density and kinematic viscosity for the prescribed\n fluid.\n \n \"\"\"\n fluid_lib = {'water':(1000., 1.0e-6), \n 'glycol':(965.3,6.216e-4),\n 'glycerin':(1260,1.18e-3)}\n if fluid_str in fluid_lib.keys():\n return fluid_lib[fluid_str]\n else:\n print 'valid fluids are:'\n for keys in fluid_lib:\n print \" '%s' \" % keys\n raise KeyError('invalid fluid specified')\n\nclass FluidChannel:\n def __init__(self,Lx_p=1.,\n Ly_p=1.,\n Lz_p=6.,\n fluid='water', \n obst=EmptyChannel(1.),\n N_divs = 5,\n wallList=['left','right','top','bottom']):\n \"\"\"\n class constructor\n\n \"\"\"\n self.Lx_p = Lx_p\n self.Ly_p = Ly_p\n self.Lz_p = Lz_p\n self.N_divs = N_divs\n self.fluid = fluid\n self.obst = obst\n\n # generate the geometry\n\n Lo = obst.get_Lo()\n\n self.Ny = math.ceil((N_divs-1)*(Ly_p/Lo))+1\n self.Nx = math.ceil((N_divs-1)*(Lx_p/Lo))+1\n self.Nz = math.ceil((N_divs-1)*(Lz_p/Lo))+1\n self.nnodes = self.Nx*self.Ny*self.Nz\n print \"Creating channel with %g lattice points.\" % self.nnodes\n x = np.linspace(0.,Lx_p,self.Nx).astype(np.float32);\n y = np.linspace(0.,Ly_p,self.Ny).astype(np.float32);\n z = np.linspace(0.,Lz_p,self.Nz).astype(np.float32);\n \n Y,Z,X = np.meshgrid(y,z,x);\n \n self.x = np.reshape(X,self.nnodes)\n self.y = np.reshape(Y,self.nnodes)\n self.z = np.reshape(Z,self.nnodes)\n\n # get fluid properties from the included fluid library\n self.rho_p, self.nu_p = fluid_properties(fluid)\n\n # identify inlet and outlet nodes - \n # require the user to set solid boundaries separately\n self.inlet_list = np.where(self.z==0)\n self.outlet_list = np.where(self.z==Lz_p)\n \n print \"Getting obstacle list\"\n # get obstacle list\n self.obst_list = self.obst.get_obstList(self.x[:],self.y[:],self.z[:])\n \n\n print \"Generating channel solid boundaries\"\n # set channel walls\n self.set_channel_walls(wallList)\n\n # now eliminate overlap between node lists\n\n self.inlet_list = np.setxor1d(self.inlet_list[:],\n np.intersect1d(self.inlet_list[:],self.solid_list[:]))\n self.inlet_list = np.setxor1d(self.inlet_list[:],\n np.intersect1d(self.inlet_list[:],self.obst_list[:]))\n \n self.outlet_list = np.setxor1d(self.outlet_list[:],\n np.intersect1d(self.outlet_list[:],self.solid_list[:]))\n self.outlet_list = np.setxor1d(self.outlet_list[:],\n np.intersect1d(self.outlet_list[:],self.obst_list[:]))\n\n self.obst_list = np.setxor1d(self.obst_list[:],\n np.intersect1d(self.obst_list[:],self.solid_list[:]))\n \n def write_mat_file(self):\n \"\"\"\n generate the mat file to interface with genInput.py. Needs to save\n Lx_p, Ly_p, Lz_p, Lo, Ny_divs, rho_p, nu_p, snl, inl and onl.\n\n note that the snl and obst_list need to be combined into one list \n\n \"\"\"\n mat_dict = {}\n mat_dict['Lx_p'] = self.Lx_p\n mat_dict['Ly_p'] = self.Ly_p\n mat_dict['Lz_p'] = self.Lz_p\n mat_dict['Lo'] = self.obst.get_Lo()\n mat_dict['Ny_divs'] = self.N_divs\n mat_dict['rho_p'] = self.rho_p\n mat_dict['nu_p'] = self.nu_p\n mat_dict['snl'] = list(np.union1d(self.obst_list[:],self.solid_list[:]))\n mat_dict['inl'] = list(self.inlet_list[:])\n mat_dict['onl'] = list(self.outlet_list[:])\n\n scipy.io.savemat('geometry_description',mat_dict)\n\n\n \n def write_bc_vtk(self):\n \"\"\"\n write node lists to properly formatted VTK files\n \"\"\"\n print \"Creating boundary condition arrays\"\n obst_array = np.zeros(self.nnodes)\n obst_array[list(self.obst_list)] = 100.\n\n #print type(self.inlet_list)\n inlet_array = np.zeros(self.nnodes)\n inlet_array[list(self.inlet_list)] = 200.\n\n outlet_array = np.zeros(self.nnodes)\n outlet_array[list(self.outlet_list)] = 300.\n\n solid_array = np.zeros(self.nnodes)\n solid_array[list(self.solid_list)] = 500.\n \n dims = [int(self.Nx), int(self.Ny), int(self.Nz)]\n origin = [0., 0., 0.]\n dx = self.x[1] - self.x[0]\n spacing = [dx, dx, dx] #uniform lattice\n \n print \"Writing boundary conditions to VTK files\"\n writeVTK(inlet_array,'inlet','inlet.vtk',dims,origin,spacing)\n writeVTK(outlet_array,'outlet','outlet.vtk',dims,origin,spacing)\n writeVTK(obst_array,'obst','obst.vtk',dims,origin,spacing)\n writeVTK(solid_array,'solid','solid.vtk',dims,origin,spacing)\n\n\n # must have geometry set first\n def set_channel_walls(self,walls=['left','right','top','bottom']): \n \"\"\"\n set up to 4 walls as solid walls for the simulation\n \"\"\"\n solid_list_a = np.empty(0).flatten()\n solid_list_b = np.empty(0).flatten()\n solid_list_c = np.empty(0).flatten()\n solid_list_d = np.empty(0).flatten()\n\n for w in walls:\n if w=='right':\n solid_list_a = np.array(np.where((self.x==0.))).flatten()\n elif w=='left':\n solid_list_b = np.array(np.where((self.x == self.Lx_p))).flatten()\n elif w=='top':\n solid_list_d = np.array(np.where((self.y == self.Ly_p))).flatten()\n elif w=='bottom':\n solid_list_c = np.array(np.where((self.y == 0.))).flatten()\n\n solid_list = np.array(np.union1d(solid_list_a,solid_list_b)); \n solid_list = np.array(np.union1d(solid_list,solid_list_c))\n self.solid_list = np.array(np.union1d(solid_list,solid_list_d))\n\n\n \n\n\n\n\n\n\n\n"
] | true |
98,876 |
a5f960b897ef6e484fedd8827c619be3b0f90522
|
from tensorflow.keras import models , optimizers , losses ,activations , callbacks
from tensorflow.keras.layers import *
import tensorflow.keras.backend as K
from PIL import Image
import tensorflow as tf
import time
import os
import numpy as np
class Recognizer (object) :
def __init__( self ):
#input_shape = ( 10080, 1 ) # For steps
input_shape = ( 17280, 8 ) # 1-day of 5-sec frequency values (heart rate)
kernel_size_1 = ( 32 )
kernel_size_2 = ( 32 )
pool_size_1 = ( 2 )
pool_size_2 = ( 2 )
strides = 1
seq_conv_model = [
Conv1D(32, kernel_size=kernel_size_1 , strides=strides , activation=self.leaky_relu),
Conv1D(32, kernel_size=kernel_size_1, strides=strides, activation=self.leaky_relu),
MaxPooling1D(pool_size=pool_size_1, strides=strides),
Conv1D(64, kernel_size=kernel_size_2 , strides=strides , activation=self.leaky_relu),
Conv1D(64, kernel_size=kernel_size_2 , strides=strides , activation=self.leaky_relu),
MaxPooling1D(pool_size=pool_size_2 , strides=strides),
Flatten(),
Dense( 64 , activation=activations.sigmoid ),
]
seq_model = tf.keras.Sequential( seq_conv_model )
input_x1 = Input( shape=input_shape )
input_x2 = Input( shape=input_shape )
output_x1 = seq_model( input_x1 )
output_x2 = seq_model( input_x2 )
seq_model.summary()
distance_euclid = Lambda( lambda tensors : K.abs( tensors[0] - tensors[1] ))( [output_x1 , output_x2] )
outputs = Dense( 1 , activation=activations.sigmoid) ( distance_euclid )
self.__model = models.Model( [ input_x1 , input_x2 ] , outputs )
self.__model.compile( loss=losses.binary_crossentropy , optimizer=optimizers.Adam(lr=0.0001), metrics=['accuracy'])
def leaky_relu(self, x):
return tf.nn.leaky_relu(x, alpha=0.01)
def fit(self, X, Y , hyperparameters ):
initial_time = time.time()
history = self.__model.fit( X , Y ,
batch_size=hyperparameters[ 'batch_size' ] ,
epochs=hyperparameters[ 'epochs' ] ,
callbacks=hyperparameters[ 'callbacks'],
validation_data=hyperparameters[ 'val_data' ]
)
final_time = time.time()
eta = ( final_time - initial_time )
time_unit = 'seconds'
if eta >= 60 :
eta = eta / 60
time_unit = 'minutes'
self.__model.summary( )
print( 'Elapsed time acquired for {} epoch(s) -> {} {}'.format( hyperparameters[ 'epochs' ] , eta , time_unit ) )
return history
def evaluate(self , test_X , test_Y ) :
return self.__model.evaluate(test_X, test_Y)
def predict(self, X ):
predictions = self.__model.predict( X )
return predictions
def summary(self):
self.__model.summary()
def save_model(self , file_path ):
self.__model.save(file_path )
def load_model(self , file_path ):
self.__model = models.load_model(file_path)
|
[
"from tensorflow.keras import models , optimizers , losses ,activations , callbacks\nfrom tensorflow.keras.layers import *\nimport tensorflow.keras.backend as K\nfrom PIL import Image\nimport tensorflow as tf\nimport time\nimport os\nimport numpy as np\n\n\nclass Recognizer (object) :\n\n\tdef __init__( self ):\n\n\t\t#input_shape = ( 10080, 1 ) # For steps\n\t\tinput_shape = ( 17280, 8 ) # 1-day of 5-sec frequency values (heart rate)\n\t\tkernel_size_1 = ( 32 )\n\t\tkernel_size_2 = ( 32 )\n\t\tpool_size_1 = ( 2 )\n\t\tpool_size_2 = ( 2 )\n\t\tstrides = 1\n\n\t\tseq_conv_model = [\n\n\t\t\tConv1D(32, kernel_size=kernel_size_1 , strides=strides , activation=self.leaky_relu),\n\t\t\tConv1D(32, kernel_size=kernel_size_1, strides=strides, activation=self.leaky_relu),\n\t\t\tMaxPooling1D(pool_size=pool_size_1, strides=strides),\n\n\t\t\tConv1D(64, kernel_size=kernel_size_2 , strides=strides , activation=self.leaky_relu),\n\t\t\tConv1D(64, kernel_size=kernel_size_2 , strides=strides , activation=self.leaky_relu),\n\t\t\tMaxPooling1D(pool_size=pool_size_2 , strides=strides),\n\n\t\t\tFlatten(),\n\n\t\t\tDense( 64 , activation=activations.sigmoid ),\n\n\t\t]\n\n\t\tseq_model = tf.keras.Sequential( seq_conv_model )\n\n\t\tinput_x1 = Input( shape=input_shape )\n\t\tinput_x2 = Input( shape=input_shape )\n\n\t\toutput_x1 = seq_model( input_x1 )\n\t\toutput_x2 = seq_model( input_x2 )\n\t\tseq_model.summary()\n\n\t\tdistance_euclid = Lambda( lambda tensors : K.abs( tensors[0] - tensors[1] ))( [output_x1 , output_x2] )\n\t\toutputs = Dense( 1 , activation=activations.sigmoid) ( distance_euclid )\n\t\tself.__model = models.Model( [ input_x1 , input_x2 ] , outputs )\n\n\t\tself.__model.compile( loss=losses.binary_crossentropy , optimizer=optimizers.Adam(lr=0.0001), metrics=['accuracy'])\n\n\tdef leaky_relu(self, x):\n\t\treturn tf.nn.leaky_relu(x, alpha=0.01)\n\n\tdef fit(self, X, Y , hyperparameters ):\n\t\tinitial_time = time.time()\n\t\thistory = self.__model.fit( X , Y ,\n\t\t\t\t\t\t batch_size=hyperparameters[ 'batch_size' ] ,\n\t\t\t\t\t\t epochs=hyperparameters[ 'epochs' ] ,\n\t\t\t\t\t\t callbacks=hyperparameters[ 'callbacks'],\n\t\t\t\t\t\t validation_data=hyperparameters[ 'val_data' ]\n\t\t\t\t\t\t )\n\t\tfinal_time = time.time()\n\t\teta = ( final_time - initial_time )\n\t\ttime_unit = 'seconds'\n\t\tif eta >= 60 :\n\t\t\teta = eta / 60\n\t\t\ttime_unit = 'minutes'\n\t\tself.__model.summary( )\n\t\tprint( 'Elapsed time acquired for {} epoch(s) -> {} {}'.format( hyperparameters[ 'epochs' ] , eta , time_unit ) )\n\t\treturn history\n\n\n\tdef evaluate(self , test_X , test_Y ) :\n\t\treturn self.__model.evaluate(test_X, test_Y)\n\n\n\tdef predict(self, X ):\n\t\tpredictions = self.__model.predict( X )\n\t\treturn predictions\n\n\n\tdef summary(self):\n\t\tself.__model.summary()\n\n\n\tdef save_model(self , file_path ):\n\t\tself.__model.save(file_path )\n\n\n\tdef load_model(self , file_path ):\n\t\tself.__model = models.load_model(file_path)\n",
"from tensorflow.keras import models, optimizers, losses, activations, callbacks\nfrom tensorflow.keras.layers import *\nimport tensorflow.keras.backend as K\nfrom PIL import Image\nimport tensorflow as tf\nimport time\nimport os\nimport numpy as np\n\n\nclass Recognizer(object):\n\n def __init__(self):\n input_shape = 17280, 8\n kernel_size_1 = 32\n kernel_size_2 = 32\n pool_size_1 = 2\n pool_size_2 = 2\n strides = 1\n seq_conv_model = [Conv1D(32, kernel_size=kernel_size_1, strides=\n strides, activation=self.leaky_relu), Conv1D(32, kernel_size=\n kernel_size_1, strides=strides, activation=self.leaky_relu),\n MaxPooling1D(pool_size=pool_size_1, strides=strides), Conv1D(64,\n kernel_size=kernel_size_2, strides=strides, activation=self.\n leaky_relu), Conv1D(64, kernel_size=kernel_size_2, strides=\n strides, activation=self.leaky_relu), MaxPooling1D(pool_size=\n pool_size_2, strides=strides), Flatten(), Dense(64, activation=\n activations.sigmoid)]\n seq_model = tf.keras.Sequential(seq_conv_model)\n input_x1 = Input(shape=input_shape)\n input_x2 = Input(shape=input_shape)\n output_x1 = seq_model(input_x1)\n output_x2 = seq_model(input_x2)\n seq_model.summary()\n distance_euclid = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1])\n )([output_x1, output_x2])\n outputs = Dense(1, activation=activations.sigmoid)(distance_euclid)\n self.__model = models.Model([input_x1, input_x2], outputs)\n self.__model.compile(loss=losses.binary_crossentropy, optimizer=\n optimizers.Adam(lr=0.0001), metrics=['accuracy'])\n\n def leaky_relu(self, x):\n return tf.nn.leaky_relu(x, alpha=0.01)\n\n def fit(self, X, Y, hyperparameters):\n initial_time = time.time()\n history = self.__model.fit(X, Y, batch_size=hyperparameters[\n 'batch_size'], epochs=hyperparameters['epochs'], callbacks=\n hyperparameters['callbacks'], validation_data=hyperparameters[\n 'val_data'])\n final_time = time.time()\n eta = final_time - initial_time\n time_unit = 'seconds'\n if eta >= 60:\n eta = eta / 60\n time_unit = 'minutes'\n self.__model.summary()\n print('Elapsed time acquired for {} epoch(s) -> {} {}'.format(\n hyperparameters['epochs'], eta, time_unit))\n return history\n\n def evaluate(self, test_X, test_Y):\n return self.__model.evaluate(test_X, test_Y)\n\n def predict(self, X):\n predictions = self.__model.predict(X)\n return predictions\n\n def summary(self):\n self.__model.summary()\n\n def save_model(self, file_path):\n self.__model.save(file_path)\n\n def load_model(self, file_path):\n self.__model = models.load_model(file_path)\n",
"<import token>\n\n\nclass Recognizer(object):\n\n def __init__(self):\n input_shape = 17280, 8\n kernel_size_1 = 32\n kernel_size_2 = 32\n pool_size_1 = 2\n pool_size_2 = 2\n strides = 1\n seq_conv_model = [Conv1D(32, kernel_size=kernel_size_1, strides=\n strides, activation=self.leaky_relu), Conv1D(32, kernel_size=\n kernel_size_1, strides=strides, activation=self.leaky_relu),\n MaxPooling1D(pool_size=pool_size_1, strides=strides), Conv1D(64,\n kernel_size=kernel_size_2, strides=strides, activation=self.\n leaky_relu), Conv1D(64, kernel_size=kernel_size_2, strides=\n strides, activation=self.leaky_relu), MaxPooling1D(pool_size=\n pool_size_2, strides=strides), Flatten(), Dense(64, activation=\n activations.sigmoid)]\n seq_model = tf.keras.Sequential(seq_conv_model)\n input_x1 = Input(shape=input_shape)\n input_x2 = Input(shape=input_shape)\n output_x1 = seq_model(input_x1)\n output_x2 = seq_model(input_x2)\n seq_model.summary()\n distance_euclid = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1])\n )([output_x1, output_x2])\n outputs = Dense(1, activation=activations.sigmoid)(distance_euclid)\n self.__model = models.Model([input_x1, input_x2], outputs)\n self.__model.compile(loss=losses.binary_crossentropy, optimizer=\n optimizers.Adam(lr=0.0001), metrics=['accuracy'])\n\n def leaky_relu(self, x):\n return tf.nn.leaky_relu(x, alpha=0.01)\n\n def fit(self, X, Y, hyperparameters):\n initial_time = time.time()\n history = self.__model.fit(X, Y, batch_size=hyperparameters[\n 'batch_size'], epochs=hyperparameters['epochs'], callbacks=\n hyperparameters['callbacks'], validation_data=hyperparameters[\n 'val_data'])\n final_time = time.time()\n eta = final_time - initial_time\n time_unit = 'seconds'\n if eta >= 60:\n eta = eta / 60\n time_unit = 'minutes'\n self.__model.summary()\n print('Elapsed time acquired for {} epoch(s) -> {} {}'.format(\n hyperparameters['epochs'], eta, time_unit))\n return history\n\n def evaluate(self, test_X, test_Y):\n return self.__model.evaluate(test_X, test_Y)\n\n def predict(self, X):\n predictions = self.__model.predict(X)\n return predictions\n\n def summary(self):\n self.__model.summary()\n\n def save_model(self, file_path):\n self.__model.save(file_path)\n\n def load_model(self, file_path):\n self.__model = models.load_model(file_path)\n",
"<import token>\n\n\nclass Recognizer(object):\n\n def __init__(self):\n input_shape = 17280, 8\n kernel_size_1 = 32\n kernel_size_2 = 32\n pool_size_1 = 2\n pool_size_2 = 2\n strides = 1\n seq_conv_model = [Conv1D(32, kernel_size=kernel_size_1, strides=\n strides, activation=self.leaky_relu), Conv1D(32, kernel_size=\n kernel_size_1, strides=strides, activation=self.leaky_relu),\n MaxPooling1D(pool_size=pool_size_1, strides=strides), Conv1D(64,\n kernel_size=kernel_size_2, strides=strides, activation=self.\n leaky_relu), Conv1D(64, kernel_size=kernel_size_2, strides=\n strides, activation=self.leaky_relu), MaxPooling1D(pool_size=\n pool_size_2, strides=strides), Flatten(), Dense(64, activation=\n activations.sigmoid)]\n seq_model = tf.keras.Sequential(seq_conv_model)\n input_x1 = Input(shape=input_shape)\n input_x2 = Input(shape=input_shape)\n output_x1 = seq_model(input_x1)\n output_x2 = seq_model(input_x2)\n seq_model.summary()\n distance_euclid = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1])\n )([output_x1, output_x2])\n outputs = Dense(1, activation=activations.sigmoid)(distance_euclid)\n self.__model = models.Model([input_x1, input_x2], outputs)\n self.__model.compile(loss=losses.binary_crossentropy, optimizer=\n optimizers.Adam(lr=0.0001), metrics=['accuracy'])\n\n def leaky_relu(self, x):\n return tf.nn.leaky_relu(x, alpha=0.01)\n\n def fit(self, X, Y, hyperparameters):\n initial_time = time.time()\n history = self.__model.fit(X, Y, batch_size=hyperparameters[\n 'batch_size'], epochs=hyperparameters['epochs'], callbacks=\n hyperparameters['callbacks'], validation_data=hyperparameters[\n 'val_data'])\n final_time = time.time()\n eta = final_time - initial_time\n time_unit = 'seconds'\n if eta >= 60:\n eta = eta / 60\n time_unit = 'minutes'\n self.__model.summary()\n print('Elapsed time acquired for {} epoch(s) -> {} {}'.format(\n hyperparameters['epochs'], eta, time_unit))\n return history\n\n def evaluate(self, test_X, test_Y):\n return self.__model.evaluate(test_X, test_Y)\n <function token>\n\n def summary(self):\n self.__model.summary()\n\n def save_model(self, file_path):\n self.__model.save(file_path)\n\n def load_model(self, file_path):\n self.__model = models.load_model(file_path)\n",
"<import token>\n\n\nclass Recognizer(object):\n\n def __init__(self):\n input_shape = 17280, 8\n kernel_size_1 = 32\n kernel_size_2 = 32\n pool_size_1 = 2\n pool_size_2 = 2\n strides = 1\n seq_conv_model = [Conv1D(32, kernel_size=kernel_size_1, strides=\n strides, activation=self.leaky_relu), Conv1D(32, kernel_size=\n kernel_size_1, strides=strides, activation=self.leaky_relu),\n MaxPooling1D(pool_size=pool_size_1, strides=strides), Conv1D(64,\n kernel_size=kernel_size_2, strides=strides, activation=self.\n leaky_relu), Conv1D(64, kernel_size=kernel_size_2, strides=\n strides, activation=self.leaky_relu), MaxPooling1D(pool_size=\n pool_size_2, strides=strides), Flatten(), Dense(64, activation=\n activations.sigmoid)]\n seq_model = tf.keras.Sequential(seq_conv_model)\n input_x1 = Input(shape=input_shape)\n input_x2 = Input(shape=input_shape)\n output_x1 = seq_model(input_x1)\n output_x2 = seq_model(input_x2)\n seq_model.summary()\n distance_euclid = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1])\n )([output_x1, output_x2])\n outputs = Dense(1, activation=activations.sigmoid)(distance_euclid)\n self.__model = models.Model([input_x1, input_x2], outputs)\n self.__model.compile(loss=losses.binary_crossentropy, optimizer=\n optimizers.Adam(lr=0.0001), metrics=['accuracy'])\n\n def leaky_relu(self, x):\n return tf.nn.leaky_relu(x, alpha=0.01)\n\n def fit(self, X, Y, hyperparameters):\n initial_time = time.time()\n history = self.__model.fit(X, Y, batch_size=hyperparameters[\n 'batch_size'], epochs=hyperparameters['epochs'], callbacks=\n hyperparameters['callbacks'], validation_data=hyperparameters[\n 'val_data'])\n final_time = time.time()\n eta = final_time - initial_time\n time_unit = 'seconds'\n if eta >= 60:\n eta = eta / 60\n time_unit = 'minutes'\n self.__model.summary()\n print('Elapsed time acquired for {} epoch(s) -> {} {}'.format(\n hyperparameters['epochs'], eta, time_unit))\n return history\n <function token>\n <function token>\n\n def summary(self):\n self.__model.summary()\n\n def save_model(self, file_path):\n self.__model.save(file_path)\n\n def load_model(self, file_path):\n self.__model = models.load_model(file_path)\n",
"<import token>\n\n\nclass Recognizer(object):\n\n def __init__(self):\n input_shape = 17280, 8\n kernel_size_1 = 32\n kernel_size_2 = 32\n pool_size_1 = 2\n pool_size_2 = 2\n strides = 1\n seq_conv_model = [Conv1D(32, kernel_size=kernel_size_1, strides=\n strides, activation=self.leaky_relu), Conv1D(32, kernel_size=\n kernel_size_1, strides=strides, activation=self.leaky_relu),\n MaxPooling1D(pool_size=pool_size_1, strides=strides), Conv1D(64,\n kernel_size=kernel_size_2, strides=strides, activation=self.\n leaky_relu), Conv1D(64, kernel_size=kernel_size_2, strides=\n strides, activation=self.leaky_relu), MaxPooling1D(pool_size=\n pool_size_2, strides=strides), Flatten(), Dense(64, activation=\n activations.sigmoid)]\n seq_model = tf.keras.Sequential(seq_conv_model)\n input_x1 = Input(shape=input_shape)\n input_x2 = Input(shape=input_shape)\n output_x1 = seq_model(input_x1)\n output_x2 = seq_model(input_x2)\n seq_model.summary()\n distance_euclid = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1])\n )([output_x1, output_x2])\n outputs = Dense(1, activation=activations.sigmoid)(distance_euclid)\n self.__model = models.Model([input_x1, input_x2], outputs)\n self.__model.compile(loss=losses.binary_crossentropy, optimizer=\n optimizers.Adam(lr=0.0001), metrics=['accuracy'])\n\n def leaky_relu(self, x):\n return tf.nn.leaky_relu(x, alpha=0.01)\n\n def fit(self, X, Y, hyperparameters):\n initial_time = time.time()\n history = self.__model.fit(X, Y, batch_size=hyperparameters[\n 'batch_size'], epochs=hyperparameters['epochs'], callbacks=\n hyperparameters['callbacks'], validation_data=hyperparameters[\n 'val_data'])\n final_time = time.time()\n eta = final_time - initial_time\n time_unit = 'seconds'\n if eta >= 60:\n eta = eta / 60\n time_unit = 'minutes'\n self.__model.summary()\n print('Elapsed time acquired for {} epoch(s) -> {} {}'.format(\n hyperparameters['epochs'], eta, time_unit))\n return history\n <function token>\n <function token>\n <function token>\n\n def save_model(self, file_path):\n self.__model.save(file_path)\n\n def load_model(self, file_path):\n self.__model = models.load_model(file_path)\n",
"<import token>\n\n\nclass Recognizer(object):\n <function token>\n\n def leaky_relu(self, x):\n return tf.nn.leaky_relu(x, alpha=0.01)\n\n def fit(self, X, Y, hyperparameters):\n initial_time = time.time()\n history = self.__model.fit(X, Y, batch_size=hyperparameters[\n 'batch_size'], epochs=hyperparameters['epochs'], callbacks=\n hyperparameters['callbacks'], validation_data=hyperparameters[\n 'val_data'])\n final_time = time.time()\n eta = final_time - initial_time\n time_unit = 'seconds'\n if eta >= 60:\n eta = eta / 60\n time_unit = 'minutes'\n self.__model.summary()\n print('Elapsed time acquired for {} epoch(s) -> {} {}'.format(\n hyperparameters['epochs'], eta, time_unit))\n return history\n <function token>\n <function token>\n <function token>\n\n def save_model(self, file_path):\n self.__model.save(file_path)\n\n def load_model(self, file_path):\n self.__model = models.load_model(file_path)\n",
"<import token>\n\n\nclass Recognizer(object):\n <function token>\n\n def leaky_relu(self, x):\n return tf.nn.leaky_relu(x, alpha=0.01)\n\n def fit(self, X, Y, hyperparameters):\n initial_time = time.time()\n history = self.__model.fit(X, Y, batch_size=hyperparameters[\n 'batch_size'], epochs=hyperparameters['epochs'], callbacks=\n hyperparameters['callbacks'], validation_data=hyperparameters[\n 'val_data'])\n final_time = time.time()\n eta = final_time - initial_time\n time_unit = 'seconds'\n if eta >= 60:\n eta = eta / 60\n time_unit = 'minutes'\n self.__model.summary()\n print('Elapsed time acquired for {} epoch(s) -> {} {}'.format(\n hyperparameters['epochs'], eta, time_unit))\n return history\n <function token>\n <function token>\n <function token>\n\n def save_model(self, file_path):\n self.__model.save(file_path)\n <function token>\n",
"<import token>\n\n\nclass Recognizer(object):\n <function token>\n\n def leaky_relu(self, x):\n return tf.nn.leaky_relu(x, alpha=0.01)\n\n def fit(self, X, Y, hyperparameters):\n initial_time = time.time()\n history = self.__model.fit(X, Y, batch_size=hyperparameters[\n 'batch_size'], epochs=hyperparameters['epochs'], callbacks=\n hyperparameters['callbacks'], validation_data=hyperparameters[\n 'val_data'])\n final_time = time.time()\n eta = final_time - initial_time\n time_unit = 'seconds'\n if eta >= 60:\n eta = eta / 60\n time_unit = 'minutes'\n self.__model.summary()\n print('Elapsed time acquired for {} epoch(s) -> {} {}'.format(\n hyperparameters['epochs'], eta, time_unit))\n return history\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass Recognizer(object):\n <function token>\n <function token>\n\n def fit(self, X, Y, hyperparameters):\n initial_time = time.time()\n history = self.__model.fit(X, Y, batch_size=hyperparameters[\n 'batch_size'], epochs=hyperparameters['epochs'], callbacks=\n hyperparameters['callbacks'], validation_data=hyperparameters[\n 'val_data'])\n final_time = time.time()\n eta = final_time - initial_time\n time_unit = 'seconds'\n if eta >= 60:\n eta = eta / 60\n time_unit = 'minutes'\n self.__model.summary()\n print('Elapsed time acquired for {} epoch(s) -> {} {}'.format(\n hyperparameters['epochs'], eta, time_unit))\n return history\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass Recognizer(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
98,877 |
44e012978ef6575c0ccbc79c9458fb5b80da2796
|
""" Base components for the Veracity SDK.
"""
class ApiBase(object):
""" Base for API access classes. Provides connection/disconnection.
All web calls are async using aiohttp.
Arguments:
credential (veracity.Credential): Provides oauth access tokens for the
API (the user has to log in to retrieve these unless your client
application has permissions to use the service.)
subscription_key (str): Your application's API subscription key. Gets
sent in th Ocp-Apim-Subscription-Key header.
scope (str): A valid scope for a Veracity API. Only one permitted. See
`identity.ALLOWED_SCOPES` for options.
"""
def __init__(self, credential, subscription_key, scope):
self.credential = credential
self.subscription_key = subscription_key
# By default we ask for access permission the service and data fabric APIs.
self.scopes = [scope]
# Use this session for all HTTP requests. We also add authentication
# headers to all requests by default, so the child API services do not
# need to.
self._session = None
self._headers = {}
@property
def connected(self):
return self._session is not None
@property
def session(self):
if self._session is None:
raise RuntimeError("Must connect API before use.")
return self._session
@property
def default_headers(self):
return self._headers
async def connect(self, reset=False, credential=None, key=None):
""" Create a single HTTP session to call the API.
Optionally reset the existing session or change the credentials.
Args:
reset (bool): Set True to force HTTP session to reconnect.
credential (veracity.Credential): Provides oauth access tokens for the
API (the user has to log in to retrieve these unless your client
application has permissions to use the service.)
subscription_key (str): Your application's API subscription key. Gets
sent in th Ocp-Apim-Subscription-Key header.
"""
# Use this session for all HTTP requests. We also add authentication
# headers to all requests; which we attempt to set now.
import aiohttp
reset_headers = reset or (self._session is None)
if credential is not None:
self.credential = credential
reset_headers = True
if key is not None:
self.subscription_key = key
reset_headers = True
if reset_headers:
token = self.credential.get_token(self.scopes)
if 'error' in token:
raise RuntimeError(f'Failed to get token:\n{token}')
assert 'access_token' in token, 'Token does not provide API access privileges for requested scopes.'
actual_token = token['access_token']
self._headers = {
'Ocp-Apim-Subscription-Key': self.subscription_key,
'Authorization': f'Bearer {actual_token}',
}
if reset:
# This sets _session to None.
await self.disconnect()
if self._session is None:
self._session = aiohttp.ClientSession(headers=self._headers)
return self._session
async def disconnect(self):
""" Disconnects the HTTP session. Not essential but good practice.
"""
from asyncio import shield
if self._session is not None:
await shield(self._session.connector.close())
await shield(self._session.close())
self._session = None
|
[
"\"\"\" Base components for the Veracity SDK.\r\n\"\"\"\r\n\r\n\r\nclass ApiBase(object):\r\n \"\"\" Base for API access classes. Provides connection/disconnection.\r\n\r\n All web calls are async using aiohttp.\r\n\r\n Arguments:\r\n credential (veracity.Credential): Provides oauth access tokens for the\r\n API (the user has to log in to retrieve these unless your client\r\n application has permissions to use the service.)\r\n subscription_key (str): Your application's API subscription key. Gets\r\n sent in th Ocp-Apim-Subscription-Key header.\r\n scope (str): A valid scope for a Veracity API. Only one permitted. See\r\n `identity.ALLOWED_SCOPES` for options.\r\n \"\"\"\r\n\r\n def __init__(self, credential, subscription_key, scope):\r\n self.credential = credential\r\n self.subscription_key = subscription_key\r\n # By default we ask for access permission the service and data fabric APIs.\r\n self.scopes = [scope]\r\n # Use this session for all HTTP requests. We also add authentication\r\n # headers to all requests by default, so the child API services do not\r\n # need to.\r\n self._session = None\r\n self._headers = {}\r\n\r\n @property\r\n def connected(self):\r\n return self._session is not None\r\n\r\n @property\r\n def session(self):\r\n if self._session is None:\r\n raise RuntimeError(\"Must connect API before use.\")\r\n return self._session\r\n\r\n @property\r\n def default_headers(self):\r\n return self._headers\r\n\r\n async def connect(self, reset=False, credential=None, key=None):\r\n \"\"\" Create a single HTTP session to call the API.\r\n Optionally reset the existing session or change the credentials.\r\n\r\n Args:\r\n reset (bool): Set True to force HTTP session to reconnect.\r\n credential (veracity.Credential): Provides oauth access tokens for the\r\n API (the user has to log in to retrieve these unless your client\r\n application has permissions to use the service.)\r\n subscription_key (str): Your application's API subscription key. Gets\r\n sent in th Ocp-Apim-Subscription-Key header.\r\n \"\"\"\r\n # Use this session for all HTTP requests. We also add authentication\r\n # headers to all requests; which we attempt to set now.\r\n import aiohttp\r\n\r\n reset_headers = reset or (self._session is None)\r\n\r\n if credential is not None:\r\n self.credential = credential\r\n reset_headers = True\r\n\r\n if key is not None:\r\n self.subscription_key = key\r\n reset_headers = True\r\n\r\n if reset_headers:\r\n token = self.credential.get_token(self.scopes)\r\n if 'error' in token:\r\n raise RuntimeError(f'Failed to get token:\\n{token}')\r\n assert 'access_token' in token, 'Token does not provide API access privileges for requested scopes.'\r\n actual_token = token['access_token']\r\n self._headers = {\r\n 'Ocp-Apim-Subscription-Key': self.subscription_key,\r\n 'Authorization': f'Bearer {actual_token}',\r\n }\r\n\r\n if reset:\r\n # This sets _session to None.\r\n await self.disconnect()\r\n\r\n if self._session is None:\r\n self._session = aiohttp.ClientSession(headers=self._headers)\r\n\r\n return self._session\r\n\r\n async def disconnect(self):\r\n \"\"\" Disconnects the HTTP session. Not essential but good practice.\r\n \"\"\"\r\n from asyncio import shield\r\n if self._session is not None:\r\n await shield(self._session.connector.close())\r\n await shield(self._session.close())\r\n self._session = None\r\n",
"<docstring token>\n\n\nclass ApiBase(object):\n \"\"\" Base for API access classes. Provides connection/disconnection.\n\n All web calls are async using aiohttp.\n\n Arguments:\n credential (veracity.Credential): Provides oauth access tokens for the\n API (the user has to log in to retrieve these unless your client\n application has permissions to use the service.)\n subscription_key (str): Your application's API subscription key. Gets\n sent in th Ocp-Apim-Subscription-Key header.\n scope (str): A valid scope for a Veracity API. Only one permitted. See\n `identity.ALLOWED_SCOPES` for options.\n \"\"\"\n\n def __init__(self, credential, subscription_key, scope):\n self.credential = credential\n self.subscription_key = subscription_key\n self.scopes = [scope]\n self._session = None\n self._headers = {}\n\n @property\n def connected(self):\n return self._session is not None\n\n @property\n def session(self):\n if self._session is None:\n raise RuntimeError('Must connect API before use.')\n return self._session\n\n @property\n def default_headers(self):\n return self._headers\n\n async def connect(self, reset=False, credential=None, key=None):\n \"\"\" Create a single HTTP session to call the API.\n Optionally reset the existing session or change the credentials.\n\n Args:\n reset (bool): Set True to force HTTP session to reconnect.\n credential (veracity.Credential): Provides oauth access tokens for the\n API (the user has to log in to retrieve these unless your client\n application has permissions to use the service.)\n subscription_key (str): Your application's API subscription key. Gets\n sent in th Ocp-Apim-Subscription-Key header.\n \"\"\"\n import aiohttp\n reset_headers = reset or self._session is None\n if credential is not None:\n self.credential = credential\n reset_headers = True\n if key is not None:\n self.subscription_key = key\n reset_headers = True\n if reset_headers:\n token = self.credential.get_token(self.scopes)\n if 'error' in token:\n raise RuntimeError(f'Failed to get token:\\n{token}')\n assert 'access_token' in token, 'Token does not provide API access privileges for requested scopes.'\n actual_token = token['access_token']\n self._headers = {'Ocp-Apim-Subscription-Key': self.\n subscription_key, 'Authorization': f'Bearer {actual_token}'}\n if reset:\n await self.disconnect()\n if self._session is None:\n self._session = aiohttp.ClientSession(headers=self._headers)\n return self._session\n\n async def disconnect(self):\n \"\"\" Disconnects the HTTP session. Not essential but good practice.\n \"\"\"\n from asyncio import shield\n if self._session is not None:\n await shield(self._session.connector.close())\n await shield(self._session.close())\n self._session = None\n",
"<docstring token>\n\n\nclass ApiBase(object):\n <docstring token>\n\n def __init__(self, credential, subscription_key, scope):\n self.credential = credential\n self.subscription_key = subscription_key\n self.scopes = [scope]\n self._session = None\n self._headers = {}\n\n @property\n def connected(self):\n return self._session is not None\n\n @property\n def session(self):\n if self._session is None:\n raise RuntimeError('Must connect API before use.')\n return self._session\n\n @property\n def default_headers(self):\n return self._headers\n\n async def connect(self, reset=False, credential=None, key=None):\n \"\"\" Create a single HTTP session to call the API.\n Optionally reset the existing session or change the credentials.\n\n Args:\n reset (bool): Set True to force HTTP session to reconnect.\n credential (veracity.Credential): Provides oauth access tokens for the\n API (the user has to log in to retrieve these unless your client\n application has permissions to use the service.)\n subscription_key (str): Your application's API subscription key. Gets\n sent in th Ocp-Apim-Subscription-Key header.\n \"\"\"\n import aiohttp\n reset_headers = reset or self._session is None\n if credential is not None:\n self.credential = credential\n reset_headers = True\n if key is not None:\n self.subscription_key = key\n reset_headers = True\n if reset_headers:\n token = self.credential.get_token(self.scopes)\n if 'error' in token:\n raise RuntimeError(f'Failed to get token:\\n{token}')\n assert 'access_token' in token, 'Token does not provide API access privileges for requested scopes.'\n actual_token = token['access_token']\n self._headers = {'Ocp-Apim-Subscription-Key': self.\n subscription_key, 'Authorization': f'Bearer {actual_token}'}\n if reset:\n await self.disconnect()\n if self._session is None:\n self._session = aiohttp.ClientSession(headers=self._headers)\n return self._session\n\n async def disconnect(self):\n \"\"\" Disconnects the HTTP session. Not essential but good practice.\n \"\"\"\n from asyncio import shield\n if self._session is not None:\n await shield(self._session.connector.close())\n await shield(self._session.close())\n self._session = None\n",
"<docstring token>\n\n\nclass ApiBase(object):\n <docstring token>\n\n def __init__(self, credential, subscription_key, scope):\n self.credential = credential\n self.subscription_key = subscription_key\n self.scopes = [scope]\n self._session = None\n self._headers = {}\n <function token>\n\n @property\n def session(self):\n if self._session is None:\n raise RuntimeError('Must connect API before use.')\n return self._session\n\n @property\n def default_headers(self):\n return self._headers\n\n async def connect(self, reset=False, credential=None, key=None):\n \"\"\" Create a single HTTP session to call the API.\n Optionally reset the existing session or change the credentials.\n\n Args:\n reset (bool): Set True to force HTTP session to reconnect.\n credential (veracity.Credential): Provides oauth access tokens for the\n API (the user has to log in to retrieve these unless your client\n application has permissions to use the service.)\n subscription_key (str): Your application's API subscription key. Gets\n sent in th Ocp-Apim-Subscription-Key header.\n \"\"\"\n import aiohttp\n reset_headers = reset or self._session is None\n if credential is not None:\n self.credential = credential\n reset_headers = True\n if key is not None:\n self.subscription_key = key\n reset_headers = True\n if reset_headers:\n token = self.credential.get_token(self.scopes)\n if 'error' in token:\n raise RuntimeError(f'Failed to get token:\\n{token}')\n assert 'access_token' in token, 'Token does not provide API access privileges for requested scopes.'\n actual_token = token['access_token']\n self._headers = {'Ocp-Apim-Subscription-Key': self.\n subscription_key, 'Authorization': f'Bearer {actual_token}'}\n if reset:\n await self.disconnect()\n if self._session is None:\n self._session = aiohttp.ClientSession(headers=self._headers)\n return self._session\n\n async def disconnect(self):\n \"\"\" Disconnects the HTTP session. Not essential but good practice.\n \"\"\"\n from asyncio import shield\n if self._session is not None:\n await shield(self._session.connector.close())\n await shield(self._session.close())\n self._session = None\n",
"<docstring token>\n\n\nclass ApiBase(object):\n <docstring token>\n\n def __init__(self, credential, subscription_key, scope):\n self.credential = credential\n self.subscription_key = subscription_key\n self.scopes = [scope]\n self._session = None\n self._headers = {}\n <function token>\n <function token>\n\n @property\n def default_headers(self):\n return self._headers\n\n async def connect(self, reset=False, credential=None, key=None):\n \"\"\" Create a single HTTP session to call the API.\n Optionally reset the existing session or change the credentials.\n\n Args:\n reset (bool): Set True to force HTTP session to reconnect.\n credential (veracity.Credential): Provides oauth access tokens for the\n API (the user has to log in to retrieve these unless your client\n application has permissions to use the service.)\n subscription_key (str): Your application's API subscription key. Gets\n sent in th Ocp-Apim-Subscription-Key header.\n \"\"\"\n import aiohttp\n reset_headers = reset or self._session is None\n if credential is not None:\n self.credential = credential\n reset_headers = True\n if key is not None:\n self.subscription_key = key\n reset_headers = True\n if reset_headers:\n token = self.credential.get_token(self.scopes)\n if 'error' in token:\n raise RuntimeError(f'Failed to get token:\\n{token}')\n assert 'access_token' in token, 'Token does not provide API access privileges for requested scopes.'\n actual_token = token['access_token']\n self._headers = {'Ocp-Apim-Subscription-Key': self.\n subscription_key, 'Authorization': f'Bearer {actual_token}'}\n if reset:\n await self.disconnect()\n if self._session is None:\n self._session = aiohttp.ClientSession(headers=self._headers)\n return self._session\n\n async def disconnect(self):\n \"\"\" Disconnects the HTTP session. Not essential but good practice.\n \"\"\"\n from asyncio import shield\n if self._session is not None:\n await shield(self._session.connector.close())\n await shield(self._session.close())\n self._session = None\n",
"<docstring token>\n\n\nclass ApiBase(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n @property\n def default_headers(self):\n return self._headers\n\n async def connect(self, reset=False, credential=None, key=None):\n \"\"\" Create a single HTTP session to call the API.\n Optionally reset the existing session or change the credentials.\n\n Args:\n reset (bool): Set True to force HTTP session to reconnect.\n credential (veracity.Credential): Provides oauth access tokens for the\n API (the user has to log in to retrieve these unless your client\n application has permissions to use the service.)\n subscription_key (str): Your application's API subscription key. Gets\n sent in th Ocp-Apim-Subscription-Key header.\n \"\"\"\n import aiohttp\n reset_headers = reset or self._session is None\n if credential is not None:\n self.credential = credential\n reset_headers = True\n if key is not None:\n self.subscription_key = key\n reset_headers = True\n if reset_headers:\n token = self.credential.get_token(self.scopes)\n if 'error' in token:\n raise RuntimeError(f'Failed to get token:\\n{token}')\n assert 'access_token' in token, 'Token does not provide API access privileges for requested scopes.'\n actual_token = token['access_token']\n self._headers = {'Ocp-Apim-Subscription-Key': self.\n subscription_key, 'Authorization': f'Bearer {actual_token}'}\n if reset:\n await self.disconnect()\n if self._session is None:\n self._session = aiohttp.ClientSession(headers=self._headers)\n return self._session\n\n async def disconnect(self):\n \"\"\" Disconnects the HTTP session. Not essential but good practice.\n \"\"\"\n from asyncio import shield\n if self._session is not None:\n await shield(self._session.connector.close())\n await shield(self._session.close())\n self._session = None\n",
"<docstring token>\n\n\nclass ApiBase(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n async def connect(self, reset=False, credential=None, key=None):\n \"\"\" Create a single HTTP session to call the API.\n Optionally reset the existing session or change the credentials.\n\n Args:\n reset (bool): Set True to force HTTP session to reconnect.\n credential (veracity.Credential): Provides oauth access tokens for the\n API (the user has to log in to retrieve these unless your client\n application has permissions to use the service.)\n subscription_key (str): Your application's API subscription key. Gets\n sent in th Ocp-Apim-Subscription-Key header.\n \"\"\"\n import aiohttp\n reset_headers = reset or self._session is None\n if credential is not None:\n self.credential = credential\n reset_headers = True\n if key is not None:\n self.subscription_key = key\n reset_headers = True\n if reset_headers:\n token = self.credential.get_token(self.scopes)\n if 'error' in token:\n raise RuntimeError(f'Failed to get token:\\n{token}')\n assert 'access_token' in token, 'Token does not provide API access privileges for requested scopes.'\n actual_token = token['access_token']\n self._headers = {'Ocp-Apim-Subscription-Key': self.\n subscription_key, 'Authorization': f'Bearer {actual_token}'}\n if reset:\n await self.disconnect()\n if self._session is None:\n self._session = aiohttp.ClientSession(headers=self._headers)\n return self._session\n\n async def disconnect(self):\n \"\"\" Disconnects the HTTP session. Not essential but good practice.\n \"\"\"\n from asyncio import shield\n if self._session is not None:\n await shield(self._session.connector.close())\n await shield(self._session.close())\n self._session = None\n",
"<docstring token>\n<class token>\n"
] | false |
98,878 |
60596980abd0b5782a4dce9395e7c8a60889e8bb
|
def d(n):
s = 1
t = n ** 0.5
for i in range(2, int(t) + 1):
if n % i == 0: s += i + n // i
if t == int(t):
s -= t # correct s if t is a perfect square
return s
def build_abundant(L):
abn = set()
for n in range(12, L):
if d(n) > n:
abn.add(n)
return abn
limit = 28123
abundant = build_abundant(limit)
for _ in range(int(input().strip())):
num = int(input().strip())
if num < 24:
print('NO')
elif num > 46 and num % 2 == 0:
print('YES')
elif num > limit:
print('YES')
elif any((num - a in abundant) for a in abundant):
print('YES')
else:
print('NO')
|
[
"def d(n):\n s = 1\n t = n ** 0.5\n for i in range(2, int(t) + 1):\n if n % i == 0: s += i + n // i\n if t == int(t):\n s -= t # correct s if t is a perfect square\n return s\n\n\ndef build_abundant(L):\n abn = set()\n for n in range(12, L):\n if d(n) > n:\n abn.add(n)\n return abn\n\nlimit = 28123\nabundant = build_abundant(limit)\n\nfor _ in range(int(input().strip())):\n num = int(input().strip())\n if num < 24:\n print('NO')\n elif num > 46 and num % 2 == 0:\n print('YES')\n elif num > limit:\n print('YES')\n elif any((num - a in abundant) for a in abundant):\n print('YES')\n else:\n print('NO')\n",
"def d(n):\n s = 1\n t = n ** 0.5\n for i in range(2, int(t) + 1):\n if n % i == 0:\n s += i + n // i\n if t == int(t):\n s -= t\n return s\n\n\ndef build_abundant(L):\n abn = set()\n for n in range(12, L):\n if d(n) > n:\n abn.add(n)\n return abn\n\n\nlimit = 28123\nabundant = build_abundant(limit)\nfor _ in range(int(input().strip())):\n num = int(input().strip())\n if num < 24:\n print('NO')\n elif num > 46 and num % 2 == 0:\n print('YES')\n elif num > limit:\n print('YES')\n elif any(num - a in abundant for a in abundant):\n print('YES')\n else:\n print('NO')\n",
"def d(n):\n s = 1\n t = n ** 0.5\n for i in range(2, int(t) + 1):\n if n % i == 0:\n s += i + n // i\n if t == int(t):\n s -= t\n return s\n\n\ndef build_abundant(L):\n abn = set()\n for n in range(12, L):\n if d(n) > n:\n abn.add(n)\n return abn\n\n\n<assignment token>\nfor _ in range(int(input().strip())):\n num = int(input().strip())\n if num < 24:\n print('NO')\n elif num > 46 and num % 2 == 0:\n print('YES')\n elif num > limit:\n print('YES')\n elif any(num - a in abundant for a in abundant):\n print('YES')\n else:\n print('NO')\n",
"def d(n):\n s = 1\n t = n ** 0.5\n for i in range(2, int(t) + 1):\n if n % i == 0:\n s += i + n // i\n if t == int(t):\n s -= t\n return s\n\n\ndef build_abundant(L):\n abn = set()\n for n in range(12, L):\n if d(n) > n:\n abn.add(n)\n return abn\n\n\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef build_abundant(L):\n abn = set()\n for n in range(12, L):\n if d(n) > n:\n abn.add(n)\n return abn\n\n\n<assignment token>\n<code token>\n",
"<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
98,879 |
82ccb55d03466edbce711b983a6c9fc508bd53b6
|
import copy
spam = ['apples', 'bananas', 'tofu', 'cats']
for i in range(len(spam)):
if i != len(spam) - 1:
print(spam[i], end = " and ")
elif i == len(spam) - 1:
print(spam[i] + '.')
grid = [['.', '.', '.', '.', '.', '.'],
['.', 'O', 'O', '.', '.', '.'],
['O', 'O', 'O', 'O', '.', '.'],
['O', 'O', 'O', 'O', 'O', '.'],
['.', 'O', 'O', 'O', 'O', 'O'],
['O', 'O', 'O', 'O', 'O', '.'],
['O', 'O', 'O', 'O', '.', '.'],
['.', 'O', 'O', '.', '.', '.'],
['.', '.', '.', '.', '.', '.']]
for x in range(0,6):
for y in range(0,9):
print(grid[y][x], end='')
print()
|
[
"import copy\r\n\r\nspam = ['apples', 'bananas', 'tofu', 'cats']\r\n\r\nfor i in range(len(spam)):\r\n if i != len(spam) - 1:\r\n print(spam[i], end = \" and \")\r\n elif i == len(spam) - 1:\r\n print(spam[i] + '.')\r\n\r\ngrid = [['.', '.', '.', '.', '.', '.'],\r\n ['.', 'O', 'O', '.', '.', '.'],\r\n ['O', 'O', 'O', 'O', '.', '.'],\r\n ['O', 'O', 'O', 'O', 'O', '.'],\r\n ['.', 'O', 'O', 'O', 'O', 'O'],\r\n ['O', 'O', 'O', 'O', 'O', '.'],\r\n ['O', 'O', 'O', 'O', '.', '.'],\r\n ['.', 'O', 'O', '.', '.', '.'],\r\n ['.', '.', '.', '.', '.', '.']]\r\n\r\n\r\nfor x in range(0,6):\r\n for y in range(0,9):\r\n print(grid[y][x], end='')\r\n print()\r\n",
"import copy\nspam = ['apples', 'bananas', 'tofu', 'cats']\nfor i in range(len(spam)):\n if i != len(spam) - 1:\n print(spam[i], end=' and ')\n elif i == len(spam) - 1:\n print(spam[i] + '.')\ngrid = [['.', '.', '.', '.', '.', '.'], ['.', 'O', 'O', '.', '.', '.'], [\n 'O', 'O', 'O', 'O', '.', '.'], ['O', 'O', 'O', 'O', 'O', '.'], ['.',\n 'O', 'O', 'O', 'O', 'O'], ['O', 'O', 'O', 'O', 'O', '.'], ['O', 'O',\n 'O', 'O', '.', '.'], ['.', 'O', 'O', '.', '.', '.'], ['.', '.', '.',\n '.', '.', '.']]\nfor x in range(0, 6):\n for y in range(0, 9):\n print(grid[y][x], end='')\n print()\n",
"<import token>\nspam = ['apples', 'bananas', 'tofu', 'cats']\nfor i in range(len(spam)):\n if i != len(spam) - 1:\n print(spam[i], end=' and ')\n elif i == len(spam) - 1:\n print(spam[i] + '.')\ngrid = [['.', '.', '.', '.', '.', '.'], ['.', 'O', 'O', '.', '.', '.'], [\n 'O', 'O', 'O', 'O', '.', '.'], ['O', 'O', 'O', 'O', 'O', '.'], ['.',\n 'O', 'O', 'O', 'O', 'O'], ['O', 'O', 'O', 'O', 'O', '.'], ['O', 'O',\n 'O', 'O', '.', '.'], ['.', 'O', 'O', '.', '.', '.'], ['.', '.', '.',\n '.', '.', '.']]\nfor x in range(0, 6):\n for y in range(0, 9):\n print(grid[y][x], end='')\n print()\n",
"<import token>\n<assignment token>\nfor i in range(len(spam)):\n if i != len(spam) - 1:\n print(spam[i], end=' and ')\n elif i == len(spam) - 1:\n print(spam[i] + '.')\n<assignment token>\nfor x in range(0, 6):\n for y in range(0, 9):\n print(grid[y][x], end='')\n print()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,880 |
642f7256ffe461b37bcadff847addc99729ff2d2
|
import numpy as np
from PIL import Image
#将每一个二进制都填补成8bit
def padstring(s):
s_len=len(s)
return (10-s_len)*"0"+s[2:]
#每一个十进制进来编码成对应二进制形成一个总列表
def dec2bin(bin_str):
img2 = np.fromfile(bin_str, dtype=np.uint8)
x=img2.size
sum_list=list()
for i in range(x):
sum_list.append(padstring(bin(img2[i])))
return sum_list
def made_row(s,r_len,c_len):
a=np.zeros((r_len,c_len),dtype=np.uint8)
strg=""
for j in range(len(s)):
strg=strg+s[j]
for i in range(8*len(s)):
if strg[i]=="0":
a[0:20,20*i:20*(i+1)]=0
else:
a[0:20,20*i:20*(i+1)]=255
return a
def arr2byte(s):
strg=""
for i in range(8):
if s[10,i*20+10]<128:
strg=strg+"0"
else:
strg=strg+"1"
return int(strg,2)
|
[
"import numpy as np\r\nfrom PIL import Image\r\n\r\n#将每一个二进制都填补成8bit\r\ndef padstring(s):\r\n s_len=len(s)\r\n return (10-s_len)*\"0\"+s[2:]\r\n \r\n#每一个十进制进来编码成对应二进制形成一个总列表\r\ndef dec2bin(bin_str):\r\n img2 = np.fromfile(bin_str, dtype=np.uint8)\r\n x=img2.size\r\n sum_list=list()\r\n for i in range(x):\r\n sum_list.append(padstring(bin(img2[i])))\r\n return sum_list\r\n\r\ndef made_row(s,r_len,c_len):\r\n a=np.zeros((r_len,c_len),dtype=np.uint8)\r\n strg=\"\"\r\n for j in range(len(s)):\r\n strg=strg+s[j]\r\n for i in range(8*len(s)):\r\n if strg[i]==\"0\":\r\n a[0:20,20*i:20*(i+1)]=0\r\n else:\r\n a[0:20,20*i:20*(i+1)]=255\r\n return a\r\n\r\ndef arr2byte(s):\r\n strg=\"\"\r\n for i in range(8):\r\n if s[10,i*20+10]<128:\r\n strg=strg+\"0\"\r\n else:\r\n strg=strg+\"1\"\r\n return int(strg,2)\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n",
"import numpy as np\nfrom PIL import Image\n\n\ndef padstring(s):\n s_len = len(s)\n return (10 - s_len) * '0' + s[2:]\n\n\ndef dec2bin(bin_str):\n img2 = np.fromfile(bin_str, dtype=np.uint8)\n x = img2.size\n sum_list = list()\n for i in range(x):\n sum_list.append(padstring(bin(img2[i])))\n return sum_list\n\n\ndef made_row(s, r_len, c_len):\n a = np.zeros((r_len, c_len), dtype=np.uint8)\n strg = ''\n for j in range(len(s)):\n strg = strg + s[j]\n for i in range(8 * len(s)):\n if strg[i] == '0':\n a[0:20, 20 * i:20 * (i + 1)] = 0\n else:\n a[0:20, 20 * i:20 * (i + 1)] = 255\n return a\n\n\ndef arr2byte(s):\n strg = ''\n for i in range(8):\n if s[10, i * 20 + 10] < 128:\n strg = strg + '0'\n else:\n strg = strg + '1'\n return int(strg, 2)\n",
"<import token>\n\n\ndef padstring(s):\n s_len = len(s)\n return (10 - s_len) * '0' + s[2:]\n\n\ndef dec2bin(bin_str):\n img2 = np.fromfile(bin_str, dtype=np.uint8)\n x = img2.size\n sum_list = list()\n for i in range(x):\n sum_list.append(padstring(bin(img2[i])))\n return sum_list\n\n\ndef made_row(s, r_len, c_len):\n a = np.zeros((r_len, c_len), dtype=np.uint8)\n strg = ''\n for j in range(len(s)):\n strg = strg + s[j]\n for i in range(8 * len(s)):\n if strg[i] == '0':\n a[0:20, 20 * i:20 * (i + 1)] = 0\n else:\n a[0:20, 20 * i:20 * (i + 1)] = 255\n return a\n\n\ndef arr2byte(s):\n strg = ''\n for i in range(8):\n if s[10, i * 20 + 10] < 128:\n strg = strg + '0'\n else:\n strg = strg + '1'\n return int(strg, 2)\n",
"<import token>\n\n\ndef padstring(s):\n s_len = len(s)\n return (10 - s_len) * '0' + s[2:]\n\n\n<function token>\n\n\ndef made_row(s, r_len, c_len):\n a = np.zeros((r_len, c_len), dtype=np.uint8)\n strg = ''\n for j in range(len(s)):\n strg = strg + s[j]\n for i in range(8 * len(s)):\n if strg[i] == '0':\n a[0:20, 20 * i:20 * (i + 1)] = 0\n else:\n a[0:20, 20 * i:20 * (i + 1)] = 255\n return a\n\n\ndef arr2byte(s):\n strg = ''\n for i in range(8):\n if s[10, i * 20 + 10] < 128:\n strg = strg + '0'\n else:\n strg = strg + '1'\n return int(strg, 2)\n",
"<import token>\n\n\ndef padstring(s):\n s_len = len(s)\n return (10 - s_len) * '0' + s[2:]\n\n\n<function token>\n<function token>\n\n\ndef arr2byte(s):\n strg = ''\n for i in range(8):\n if s[10, i * 20 + 10] < 128:\n strg = strg + '0'\n else:\n strg = strg + '1'\n return int(strg, 2)\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef arr2byte(s):\n strg = ''\n for i in range(8):\n if s[10, i * 20 + 10] < 128:\n strg = strg + '0'\n else:\n strg = strg + '1'\n return int(strg, 2)\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,881 |
ee204ae32d6d59a5cf5ec97dda68aab80688ff76
|
#!/usr/bin/env python3
from plox import Lox
if __name__ == "__main__":
Lox.main()
|
[
"#!/usr/bin/env python3\nfrom plox import Lox\n\nif __name__ == \"__main__\":\n Lox.main()",
"from plox import Lox\nif __name__ == '__main__':\n Lox.main()\n",
"<import token>\nif __name__ == '__main__':\n Lox.main()\n",
"<import token>\n<code token>\n"
] | false |
98,882 |
c31421d181d4a30542c7117cba24cd58ac749fc3
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 11 09:43:43 2020
@author: Admin
"""
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import pickle
from moviepy.editor import *
fin=[]
out = np.arange(0,250)/250
#print(out.shape)
out1= np.ones(100)
#print(out1.shape)
out2=np.arange(400,350,-1)/400
#print(out2.shape)
out3=np.zeros(400)
#print(out3.shape)
out4=np.arange(800,850,1)/850
#print(out4.shape)
out5=np.ones(100)
#print(out5.shape)
out6 = np.arange(1100,950,-1)/1100
out7=np.zeros(180)
fin = np.concatenate((out, out1, out2,out3,out4,out5,out6,out7))
fin = np.expand_dims(fin,axis=1)
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Calculate directional gradient
# Apply threshold
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
if orient=='x':
sobel = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel)
else:
sobel = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel)
absolute = np.absolute(sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
grad_binary = np.zeros_like(scaled)
grad_binary[(scaled >= thresh[0])&(scaled <= thresh[1])] = 1
return grad_binary
def mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):
# Calculate gradient magnitude
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
mag_sobel = np.sqrt((sobelx)**2 + (sobely)**2)
absolute = np.absolute(mag_sobel)
scaled = np.uint8(255*absolute/np.max(absolute))
mag_binary = np.zeros_like(scaled)
mag_binary[(scaled >= mag_thresh[0])&(scaled <= mag_thresh[1])] = 1
return mag_binary
def dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):
# Calculate gradient direction
# Apply threshold
gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)
absx = np.absolute(sobelx)
absy = np.absolute(sobely)
direction = np.arctan2(absy,absx)
dir_binary = np.zeros_like(gray_img)
dir_binary[(direction >= thresh[0])&(direction <= thresh[1])] = 1
return dir_binary
def hls_select(image,thresh=(0,255)):
hls = cv2.cvtColor(image,cv2.COLOR_BGR2HLS)
s = hls[:,:,2]
binary_output = np.zeros_like(s)
binary_output[(s>thresh[0])&(s<=thresh[1])]=1
return binary_output
def equalize(image):
image_yuv = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#image_yuv[:,:,0] = cv2.equalizeHist(image_yuv[:,:,0])
#histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])
#plt.plot(histo)
#plt.show()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20,20))
image_yuv[:,:,0] = clahe.apply(image_yuv[:,:,0])
img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)
return img_output
def yuv_select_lumin(image,thresh=(0,255)):
yuv_img = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)
lumin = yuv_img[:,:,0]
binary_output = np.zeros_like(lumin)
binary_output[(lumin>thresh[0])&(lumin<=thresh[1])]=1
return binary_output
def hist(img,left_fit1,right_fit1,win=True):
#img = img[:,:,0]/255
img = img/255
img = np.expand_dims(img,axis=-1)
bottom_half = img[img.shape[0]//2:,:]
histogram = np.sum(bottom_half,axis=0)
# out = np.arange(600)
# out1 = np.arange(600,-1,-1)
# out3=np.zeros(79)
# out2=np.concatenate((out, out1, out3))
# out3 = np.expand_dims(out2,axis=1)
histogram = np.multiply(histogram,fin)
#print(img.shape)
out_img = np.dstack((img,img,img))
#print(out_img.shape)
#print(histogram.shape)
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:])+midpoint
nwindows = 9
margin = 100
minpix =50
searchmargin = 100
window_height = np.int(img.shape[0]//nwindows)
nonzero = img.nonzero()
#**Beware y and then x**
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
left_lane_ids=[]
right_lane_ids=[]
if win:
for window in range(nwindows):
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - (window)*window_height
win_xleft_low = leftx_current - margin
win_xleft_high =leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0),2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0),2)
good_left_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) &(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) &(nonzerox < win_xright_high)).nonzero()[0]
left_lane_ids.append(good_left_inds)
right_lane_ids.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
try:
left_lane_ids = np.concatenate(left_lane_ids)
right_lane_ids = np.concatenate(right_lane_ids)
except ValueError:
pass
else:
left_lane_ids = ((nonzerox > (left_fit1[0]*(nonzeroy**2) + left_fit1[1]*nonzeroy +
left_fit1[2] - searchmargin)) & (nonzerox < (left_fit1[0]*(nonzeroy**2) +
left_fit1[1]*nonzeroy + left_fit1[2] + searchmargin)))
right_lane_ids = ((nonzerox > (right_fit1[0]*(nonzeroy**2) + right_fit1[1]*nonzeroy +
right_fit1[2] - searchmargin)) & (nonzerox < (right_fit1[0]*(nonzeroy**2) +
right_fit1[1]*nonzeroy + right_fit1[2] + searchmargin)))
leftx = nonzerox[left_lane_ids]
lefty = nonzeroy[left_lane_ids]
rightx = nonzerox[right_lane_ids]
righty = nonzeroy[right_lane_ids]
return histogram,leftx,lefty,rightx,righty,out_img
cap = cv2.VideoCapture('./project_video.mp4')
#cap.set(cv2.CAP_PROP_POS_FRAMES, 1000)
size=(int(cap.get(3)),int(cap.get(4)))
result1 = cv2.VideoWriter('./output_images/project_video.mp4',
cv2.VideoWriter_fourcc(*'MJPG'),
10, size)
#cap = cv2.VideoCapture('./challenge_video.mp4')
left_fit = []
right_fit =[]
prev_left_fit=[]
prev_right_fit=[]
count=0
radoffset=150
prev_left_fit=[]
prev_right_fit=[]
width=0
validation_fails=0
#image_no=0
while(True):
count+=1
ret, image = cap.read()
dist_pickle = pickle.load(open('./camera_cal/matrix.p','rb'))
dst = dist_pickle["dist"]
mtx = dist_pickle["mtx"]
if ret:
ksize = 3
img_undist = cv2.undistort(image,mtx,dst,None,mtx)
final_img = np.copy(img_undist)
#final_img = equalize(final_img)
#cv2.imwrite('D:/Self Driving Car Engineer/Course 4/SampleImages/'+str(image_no)+'.jpg',final_img)
#image_no+=1
gradx = abs_sobel_thresh(img_undist, orient='x', sobel_kernel=ksize, thresh=(52, 238))
grady = abs_sobel_thresh(img_undist, orient='y', sobel_kernel=ksize, thresh=(59, 249))
mag_binary = mag_thresh(img_undist, sobel_kernel=ksize, mag_thresh=(68, 255))
dir_binary = dir_threshold(img_undist, sobel_kernel=ksize, thresh=(0.02, 1.57))
#s_binary = hls_select(img_undist,thresh=(212,255)) #98-255 works even in brighter areas
s_binary = hls_select(img_undist,thresh=(151,255)) #151
luminiscence = yuv_select_lumin(img_undist,thresh=(14,255))
combined = np.zeros_like(dir_binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) |(s_binary == 1)&(luminiscence==1)] = 1
#top left,bottom left,bottom right,top right
src = np.float32([[585-20, 460+10],[203-20, 720],[1127+30, 720],[695+30, 460+10]])
#src = np.float32([[620, 460-30],[203, 720],[1127, 720],[660, 460-30]])
points = np.int32(np.copy(src))
# cv2.polylines(img_undist,[points] ,True,(0,0,255),5)
#** Key here is keep the destination top boundary as closer as possible for effective transform**
dst = np.array([[320-20, 0],[320-20, 720],[960+30, 720],[960+30, 0]],dtype='float32')
img_size=(combined.shape[1],combined.shape[0])
M = cv2.getPerspectiveTransform(src,dst)
Minv = cv2.getPerspectiveTransform(dst,src)
warped = cv2.warpPerspective(combined,M,img_size,flags=cv2.INTER_LINEAR)
#Testing
output4 = np.dstack([warped*255,warped*255,warped*255])
output4 = cv2.resize(output4,(320, 180), interpolation = cv2.INTER_AREA)
#Testing ends
output3 = cv2.warpPerspective(final_img,M,img_size,flags=cv2.INTER_LINEAR)
output3 = cv2.resize(output3,(320, 180), interpolation = cv2.INTER_AREA)
#Testing
#cv2.imshow('warped',warped*255)
kernel = np.ones((320, 1),np.uint8)
warped1 = cv2.morphologyEx(warped.astype(np.uint8), cv2.MORPH_DILATE, kernel, iterations = 1)
warped = cv2.morphologyEx(warped1.astype(np.uint8), cv2.MORPH_ERODE, kernel, iterations = 1)
#cv2.imshow('warped1',warped*255)
#Testing ends
if((len(left_fit)==0 or len(right_fit)==0) or count==100 or validation_fails>5):
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,True)
count=0
validation_fails = 0
else:
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,False)
if(len(leftx)==0 or len(rightx)==0):
histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,True)
count=0
ploty = np.linspace(0,warped.shape[0]-1,warped.shape[0])
left_fit = np.polyfit(lefty,leftx,2)
right_fit = np.polyfit(righty,rightx,2)
#Testing
t2 = right_fit[2]/left_fit[2]
t1 = right_fit[1]/left_fit[1]
t0 = right_fit[0]/left_fit[0]
#print(t2,t1,t0)
if(abs(t2) >20 or abs(t1)>20 or abs(t0)>20):
validation_fails+=1
if(len(prev_left_fit)!=0):
left_fit = prev_left_fit
if(len(prev_right_fit)!=0):
right_fit = prev_right_fit
print('valid fails')
prev_left_fit = np.copy(left_fit)
prev_right_fit = np.copy(right_fit)
#Testing ends
try:
leftfitx = left_fit[0]*ploty**2 + left_fit[1]*ploty+left_fit[2]
rightfitx = right_fit[0]*ploty**2+right_fit[1]*ploty+right_fit[2]
except TypeError:
print('The function failed to fit a line!')
final_out_img = np.copy(out_img).astype(np.uint8)
#testing
out_img[lefty,leftx] = [255,0,0]
out_img[righty,rightx] = [0,0,255]
#output4 = cv2.resize(out_img,(320, 180), interpolation = cv2.INTER_AREA)
#testing ends
leftpoints_draw = (np.asarray([leftfitx,ploty]).T).astype(np.int32)
rightpoints_draw = (np.asarray([rightfitx,ploty]).T).astype(np.int32)
#testing
# width = abs(np.max(leftpoints_draw) - np.max(rightpoints_draw))
# print(width)
cv2.polylines(out_img,[leftpoints_draw],False,(0,255,255),3)
cv2.polylines(out_img,[rightpoints_draw],False,(0,255,255),3)
#testing ends
#**Drwaing on image the lane**
pts_left = np.array([np.transpose(np.vstack([leftfitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([rightfitx, ploty])))])
#flipud is just reversing the order of the points which are from top to bottom to make them bottom to top so that we can have an anticlockwise ordering of the corners.
pts = np.hstack((pts_left, pts_right))
#print(pts.shape)
#Testing
left_side_points_mean = np.mean(pts_left)
right_side_points_mean = np.mean(pts_right)
#Testing ends
#**Measuring Curvature radius**
y_eval = np.max(ploty)
ym_per_pixel = 30/720 #meters per pixel in y dimension
xm_per_pixel = 3.7/700 #meters per pixel in x dimension
#Testing
left_fit_0_metres = left_fit[0] * (xm_per_pixel / (ym_per_pixel**2))
left_fit_1_metres = left_fit[1] * (xm_per_pixel / ym_per_pixel)
right_fit_0_metres = right_fit[0] * (xm_per_pixel / (ym_per_pixel**2))
right_fit_1_metres = right_fit[1] * (xm_per_pixel / ym_per_pixel)
#Testing ends
left_curved = ((1 + (2*left_fit_0_metres*y_eval*ym_per_pixel + left_fit_1_metres)**2)**1.5)/(np.absolute(2*left_fit_0_metres))
right_curved = ((1 + (2*right_fit_0_metres*y_eval*ym_per_pixel + right_fit_1_metres)**2)**1.5)/(np.absolute(2*right_fit_0_metres))
#print('left_curved: '+str(left_curved))
#print('right_curved: '+str(right_curved))
#testing
output2 = cv2.resize(out_img,(320, 180), interpolation = cv2.INTER_AREA)
#testing ends
cv2.fillPoly(final_out_img,np.int_([pts]),(0,255,0))
#cv2.imwrite('./test_images/test.jpg',combined*255)
newwarp = cv2.warpPerspective(final_out_img, Minv, (image.shape[1], image.shape[0]))
result = cv2.addWeighted(final_img, 1, newwarp, 0.3, 0)
vis = np.zeros((720, 1280 ,3),dtype=np.uint8)
vis[:720, :1280,:] = result
ltext = "left Curvature(m): " + str(round(left_curved,3))
rtext = "right Curvature(m): " + str(round(right_curved,3))
cent_out = round((left_side_points_mean + right_side_points_mean)/2,3)
distance_from_center = round(abs(img_size[0]/2 - cent_out)*xm_per_pixel,3)
cent = "Vehicle is left from center(m): " + str(distance_from_center)
cv2.putText(result,ltext,(200,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(result,rtext,(750,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(result,cent,(350,200),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
#cv2.imshow('result',result)
output1 = cv2.resize(combined*255,(320, 180), interpolation = cv2.INTER_AREA)
vis[:180, 0:320,:] = np.dstack([output1,output1,output1])
vis[:180, 320:640,:] = output2
vis[:180, 640:960,:] = output3
vis[:180, 960:1280,:] = output4
cv2.putText(vis,ltext,(200,210),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(vis,rtext,(750,210),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.putText(vis,cent,(350,250),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)
cv2.imshow('result',vis)
result1.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
result1.release()
cv2.destroyAllWindows()
|
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 11 09:43:43 2020\r\n\r\n@author: Admin\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nimport numpy as np\r\nimport cv2\r\nimport pickle\r\nfrom moviepy.editor import *\r\n\r\nfin=[]\r\nout = np.arange(0,250)/250\r\n#print(out.shape)\r\nout1= np.ones(100)\r\n#print(out1.shape)\r\nout2=np.arange(400,350,-1)/400\r\n#print(out2.shape)\r\nout3=np.zeros(400)\r\n#print(out3.shape)\r\n\r\nout4=np.arange(800,850,1)/850\r\n#print(out4.shape)\r\nout5=np.ones(100)\r\n#print(out5.shape)\r\nout6 = np.arange(1100,950,-1)/1100\r\nout7=np.zeros(180)\r\n\r\n\r\nfin = np.concatenate((out, out1, out2,out3,out4,out5,out6,out7))\r\nfin = np.expand_dims(fin,axis=1)\r\n\r\n\r\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\r\n # Calculate directional gradient\r\n # Apply threshold\r\n \r\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n if orient=='x':\r\n sobel = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=sobel_kernel)\r\n else:\r\n sobel = cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=sobel_kernel)\r\n absolute = np.absolute(sobel)\r\n scaled = np.uint8(255*absolute/np.max(absolute))\r\n grad_binary = np.zeros_like(scaled)\r\n grad_binary[(scaled >= thresh[0])&(scaled <= thresh[1])] = 1\r\n \r\n return grad_binary\r\n\r\ndef mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):\r\n # Calculate gradient magnitude\r\n # Apply threshold\r\n gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\r\n sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)\r\n sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)\r\n mag_sobel = np.sqrt((sobelx)**2 + (sobely)**2)\r\n absolute = np.absolute(mag_sobel)\r\n scaled = np.uint8(255*absolute/np.max(absolute))\r\n mag_binary = np.zeros_like(scaled)\r\n mag_binary[(scaled >= mag_thresh[0])&(scaled <= mag_thresh[1])] = 1\r\n return mag_binary\r\n\r\ndef dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi/2)):\r\n # Calculate gradient direction\r\n # Apply threshold\r\n gray_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\r\n sobelx = cv2.Sobel(gray_img,cv2.CV_64F,1,0,ksize=sobel_kernel)\r\n sobely = cv2.Sobel(gray_img,cv2.CV_64F,0,1,ksize=sobel_kernel)\r\n absx = np.absolute(sobelx)\r\n absy = np.absolute(sobely)\r\n direction = np.arctan2(absy,absx)\r\n dir_binary = np.zeros_like(gray_img)\r\n dir_binary[(direction >= thresh[0])&(direction <= thresh[1])] = 1\r\n return dir_binary\r\n\r\ndef hls_select(image,thresh=(0,255)):\r\n hls = cv2.cvtColor(image,cv2.COLOR_BGR2HLS)\r\n s = hls[:,:,2]\r\n binary_output = np.zeros_like(s)\r\n binary_output[(s>thresh[0])&(s<=thresh[1])]=1\r\n return binary_output\r\n\r\ndef equalize(image):\r\n image_yuv = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)\r\n #histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])\r\n #image_yuv[:,:,0] = cv2.equalizeHist(image_yuv[:,:,0])\r\n #histo = cv2.calcHist([image_yuv],[0],None,[256],[0,256])\r\n #plt.plot(histo)\r\n #plt.show()\r\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20,20))\r\n image_yuv[:,:,0] = clahe.apply(image_yuv[:,:,0])\r\n img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)\r\n return img_output\r\n\r\ndef yuv_select_lumin(image,thresh=(0,255)):\r\n yuv_img = cv2.cvtColor(image,cv2.COLOR_BGR2YUV)\r\n lumin = yuv_img[:,:,0]\r\n binary_output = np.zeros_like(lumin)\r\n binary_output[(lumin>thresh[0])&(lumin<=thresh[1])]=1\r\n return binary_output\r\n\r\n\r\n\r\ndef hist(img,left_fit1,right_fit1,win=True):\r\n #img = img[:,:,0]/255\r\n img = img/255\r\n img = np.expand_dims(img,axis=-1)\r\n bottom_half = img[img.shape[0]//2:,:]\r\n histogram = np.sum(bottom_half,axis=0)\r\n# out = np.arange(600)\r\n# out1 = np.arange(600,-1,-1)\r\n# out3=np.zeros(79)\r\n# out2=np.concatenate((out, out1, out3))\r\n# out3 = np.expand_dims(out2,axis=1)\r\n histogram = np.multiply(histogram,fin)\r\n #print(img.shape)\r\n out_img = np.dstack((img,img,img))\r\n #print(out_img.shape)\r\n #print(histogram.shape)\r\n midpoint = np.int(histogram.shape[0]//2)\r\n leftx_base = np.argmax(histogram[:midpoint])\r\n rightx_base = np.argmax(histogram[midpoint:])+midpoint\r\n \r\n nwindows = 9\r\n margin = 100\r\n minpix =50\r\n searchmargin = 100 \r\n \r\n \r\n window_height = np.int(img.shape[0]//nwindows)\r\n nonzero = img.nonzero()\r\n #**Beware y and then x**\r\n nonzeroy = np.array(nonzero[0])\r\n nonzerox = np.array(nonzero[1])\r\n \r\n leftx_current = leftx_base\r\n rightx_current = rightx_base\r\n \r\n left_lane_ids=[]\r\n right_lane_ids=[]\r\n \r\n if win:\r\n \r\n for window in range(nwindows):\r\n win_y_low = img.shape[0] - (window+1)*window_height\r\n win_y_high = img.shape[0] - (window)*window_height\r\n \r\n win_xleft_low = leftx_current - margin\r\n win_xleft_high =leftx_current + margin\r\n \r\n win_xright_low = rightx_current - margin\r\n win_xright_high = rightx_current + margin\r\n \r\n cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0),2)\r\n cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0),2)\r\n \r\n good_left_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) &(nonzerox < win_xleft_high)).nonzero()[0]\r\n good_right_inds = ((nonzeroy >= win_y_low )& (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) &(nonzerox < win_xright_high)).nonzero()[0]\r\n \r\n \r\n left_lane_ids.append(good_left_inds)\r\n right_lane_ids.append(good_right_inds)\r\n \r\n if len(good_left_inds) > minpix:\r\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\r\n if len(good_right_inds) > minpix: \r\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\r\n \r\n try:\r\n left_lane_ids = np.concatenate(left_lane_ids)\r\n right_lane_ids = np.concatenate(right_lane_ids)\r\n except ValueError:\r\n pass\r\n else:\r\n \r\n left_lane_ids = ((nonzerox > (left_fit1[0]*(nonzeroy**2) + left_fit1[1]*nonzeroy + \r\n left_fit1[2] - searchmargin)) & (nonzerox < (left_fit1[0]*(nonzeroy**2) + \r\n left_fit1[1]*nonzeroy + left_fit1[2] + searchmargin)))\r\n right_lane_ids = ((nonzerox > (right_fit1[0]*(nonzeroy**2) + right_fit1[1]*nonzeroy + \r\n right_fit1[2] - searchmargin)) & (nonzerox < (right_fit1[0]*(nonzeroy**2) + \r\n right_fit1[1]*nonzeroy + right_fit1[2] + searchmargin)))\r\n \r\n \r\n leftx = nonzerox[left_lane_ids]\r\n lefty = nonzeroy[left_lane_ids]\r\n rightx = nonzerox[right_lane_ids]\r\n righty = nonzeroy[right_lane_ids]\r\n\r\n \r\n \r\n return histogram,leftx,lefty,rightx,righty,out_img\r\n\r\n\r\ncap = cv2.VideoCapture('./project_video.mp4')\r\n#cap.set(cv2.CAP_PROP_POS_FRAMES, 1000)\r\n\r\nsize=(int(cap.get(3)),int(cap.get(4)))\r\nresult1 = cv2.VideoWriter('./output_images/project_video.mp4', \r\n cv2.VideoWriter_fourcc(*'MJPG'), \r\n 10, size) \r\n#cap = cv2.VideoCapture('./challenge_video.mp4')\r\nleft_fit = []\r\nright_fit =[]\r\nprev_left_fit=[]\r\nprev_right_fit=[]\r\ncount=0\r\nradoffset=150\r\nprev_left_fit=[]\r\nprev_right_fit=[]\r\nwidth=0\r\nvalidation_fails=0\r\n#image_no=0\r\nwhile(True):\r\n count+=1\r\n ret, image = cap.read()\r\n dist_pickle = pickle.load(open('./camera_cal/matrix.p','rb'))\r\n dst = dist_pickle[\"dist\"]\r\n mtx = dist_pickle[\"mtx\"]\r\n \r\n \r\n if ret:\r\n ksize = 3 \r\n img_undist = cv2.undistort(image,mtx,dst,None,mtx)\r\n final_img = np.copy(img_undist)\r\n \r\n #final_img = equalize(final_img)\r\n #cv2.imwrite('D:/Self Driving Car Engineer/Course 4/SampleImages/'+str(image_no)+'.jpg',final_img)\r\n #image_no+=1\r\n gradx = abs_sobel_thresh(img_undist, orient='x', sobel_kernel=ksize, thresh=(52, 238))\r\n grady = abs_sobel_thresh(img_undist, orient='y', sobel_kernel=ksize, thresh=(59, 249))\r\n mag_binary = mag_thresh(img_undist, sobel_kernel=ksize, mag_thresh=(68, 255))\r\n dir_binary = dir_threshold(img_undist, sobel_kernel=ksize, thresh=(0.02, 1.57))\r\n #s_binary = hls_select(img_undist,thresh=(212,255)) #98-255 works even in brighter areas\r\n s_binary = hls_select(img_undist,thresh=(151,255)) #151\r\n luminiscence = yuv_select_lumin(img_undist,thresh=(14,255))\r\n \r\n \r\n combined = np.zeros_like(dir_binary)\r\n combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1)) |(s_binary == 1)&(luminiscence==1)] = 1\r\n#top left,bottom left,bottom right,top right\r\n src = np.float32([[585-20, 460+10],[203-20, 720],[1127+30, 720],[695+30, 460+10]])\r\n#src = np.float32([[620, 460-30],[203, 720],[1127, 720],[660, 460-30]])\r\n points = np.int32(np.copy(src))\r\n # cv2.polylines(img_undist,[points] ,True,(0,0,255),5)\r\n#** Key here is keep the destination top boundary as closer as possible for effective transform**\r\n dst = np.array([[320-20, 0],[320-20, 720],[960+30, 720],[960+30, 0]],dtype='float32')\r\n \r\n img_size=(combined.shape[1],combined.shape[0])\r\n M = cv2.getPerspectiveTransform(src,dst)\r\n Minv = cv2.getPerspectiveTransform(dst,src)\r\n warped = cv2.warpPerspective(combined,M,img_size,flags=cv2.INTER_LINEAR)\r\n \r\n #Testing\r\n \r\n output4 = np.dstack([warped*255,warped*255,warped*255])\r\n output4 = cv2.resize(output4,(320, 180), interpolation = cv2.INTER_AREA)\r\n #Testing ends\r\n \r\n output3 = cv2.warpPerspective(final_img,M,img_size,flags=cv2.INTER_LINEAR)\r\n output3 = cv2.resize(output3,(320, 180), interpolation = cv2.INTER_AREA)\r\n #Testing\r\n #cv2.imshow('warped',warped*255)\r\n kernel = np.ones((320, 1),np.uint8)\r\n warped1 = cv2.morphologyEx(warped.astype(np.uint8), cv2.MORPH_DILATE, kernel, iterations = 1)\r\n warped = cv2.morphologyEx(warped1.astype(np.uint8), cv2.MORPH_ERODE, kernel, iterations = 1)\r\n #cv2.imshow('warped1',warped*255)\r\n #Testing ends\r\n \r\n if((len(left_fit)==0 or len(right_fit)==0) or count==100 or validation_fails>5):\r\n histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,True)\r\n count=0\r\n validation_fails = 0\r\n else:\r\n histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,False)\r\n \r\n \r\n \r\n if(len(leftx)==0 or len(rightx)==0):\r\n histogram_img,leftx,lefty,rightx,righty,out_img = hist(warped,left_fit,right_fit,True)\r\n count=0\r\n \r\n ploty = np.linspace(0,warped.shape[0]-1,warped.shape[0])\r\n left_fit = np.polyfit(lefty,leftx,2)\r\n right_fit = np.polyfit(righty,rightx,2)\r\n \r\n \r\n #Testing\r\n t2 = right_fit[2]/left_fit[2]\r\n t1 = right_fit[1]/left_fit[1]\r\n t0 = right_fit[0]/left_fit[0]\r\n #print(t2,t1,t0)\r\n if(abs(t2) >20 or abs(t1)>20 or abs(t0)>20):\r\n validation_fails+=1\r\n if(len(prev_left_fit)!=0):\r\n left_fit = prev_left_fit\r\n if(len(prev_right_fit)!=0):\r\n right_fit = prev_right_fit\r\n print('valid fails')\r\n \r\n prev_left_fit = np.copy(left_fit)\r\n prev_right_fit = np.copy(right_fit)\r\n #Testing ends\r\n \r\n try:\r\n leftfitx = left_fit[0]*ploty**2 + left_fit[1]*ploty+left_fit[2]\r\n rightfitx = right_fit[0]*ploty**2+right_fit[1]*ploty+right_fit[2]\r\n except TypeError:\r\n print('The function failed to fit a line!')\r\n \r\n final_out_img = np.copy(out_img).astype(np.uint8)\r\n \r\n \r\n #testing\r\n out_img[lefty,leftx] = [255,0,0]\r\n out_img[righty,rightx] = [0,0,255]\r\n #output4 = cv2.resize(out_img,(320, 180), interpolation = cv2.INTER_AREA)\r\n #testing ends\r\n \r\n leftpoints_draw = (np.asarray([leftfitx,ploty]).T).astype(np.int32)\r\n rightpoints_draw = (np.asarray([rightfitx,ploty]).T).astype(np.int32)\r\n \r\n #testing\r\n# width = abs(np.max(leftpoints_draw) - np.max(rightpoints_draw))\r\n# print(width) \r\n cv2.polylines(out_img,[leftpoints_draw],False,(0,255,255),3)\r\n cv2.polylines(out_img,[rightpoints_draw],False,(0,255,255),3)\r\n #testing ends\r\n\r\n\r\n#**Drwaing on image the lane**\r\n pts_left = np.array([np.transpose(np.vstack([leftfitx, ploty]))])\r\n pts_right = np.array([np.flipud(np.transpose(np.vstack([rightfitx, ploty])))])\r\n#flipud is just reversing the order of the points which are from top to bottom to make them bottom to top so that we can have an anticlockwise ordering of the corners.\r\n pts = np.hstack((pts_left, pts_right))\r\n#print(pts.shape)\r\n \r\n #Testing\r\n left_side_points_mean = np.mean(pts_left)\r\n right_side_points_mean = np.mean(pts_right)\r\n #Testing ends\r\n \r\n #**Measuring Curvature radius**\r\n y_eval = np.max(ploty)\r\n ym_per_pixel = 30/720 #meters per pixel in y dimension\r\n xm_per_pixel = 3.7/700 #meters per pixel in x dimension\r\n #Testing\r\n left_fit_0_metres = left_fit[0] * (xm_per_pixel / (ym_per_pixel**2))\r\n left_fit_1_metres = left_fit[1] * (xm_per_pixel / ym_per_pixel)\r\n \r\n right_fit_0_metres = right_fit[0] * (xm_per_pixel / (ym_per_pixel**2))\r\n right_fit_1_metres = right_fit[1] * (xm_per_pixel / ym_per_pixel)\r\n #Testing ends\r\n left_curved = ((1 + (2*left_fit_0_metres*y_eval*ym_per_pixel + left_fit_1_metres)**2)**1.5)/(np.absolute(2*left_fit_0_metres))\r\n right_curved = ((1 + (2*right_fit_0_metres*y_eval*ym_per_pixel + right_fit_1_metres)**2)**1.5)/(np.absolute(2*right_fit_0_metres))\r\n\r\n #print('left_curved: '+str(left_curved))\r\n #print('right_curved: '+str(right_curved))\r\n \r\n #testing\r\n output2 = cv2.resize(out_img,(320, 180), interpolation = cv2.INTER_AREA)\r\n #testing ends\r\n cv2.fillPoly(final_out_img,np.int_([pts]),(0,255,0))\r\n#cv2.imwrite('./test_images/test.jpg',combined*255)\r\n newwarp = cv2.warpPerspective(final_out_img, Minv, (image.shape[1], image.shape[0])) \r\n result = cv2.addWeighted(final_img, 1, newwarp, 0.3, 0)\r\n vis = np.zeros((720, 1280 ,3),dtype=np.uint8)\r\n vis[:720, :1280,:] = result\r\n ltext = \"left Curvature(m): \" + str(round(left_curved,3))\r\n rtext = \"right Curvature(m): \" + str(round(right_curved,3))\r\n cent_out = round((left_side_points_mean + right_side_points_mean)/2,3)\r\n distance_from_center = round(abs(img_size[0]/2 - cent_out)*xm_per_pixel,3)\r\n cent = \"Vehicle is left from center(m): \" + str(distance_from_center)\r\n cv2.putText(result,ltext,(200,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)\r\n cv2.putText(result,rtext,(750,100),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)\r\n cv2.putText(result,cent,(350,200),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)\r\n #cv2.imshow('result',result)\r\n \r\n \r\n \r\n \r\n \r\n output1 = cv2.resize(combined*255,(320, 180), interpolation = cv2.INTER_AREA)\r\n \r\n \r\n vis[:180, 0:320,:] = np.dstack([output1,output1,output1])\r\n vis[:180, 320:640,:] = output2\r\n vis[:180, 640:960,:] = output3\r\n vis[:180, 960:1280,:] = output4\r\n \r\n cv2.putText(vis,ltext,(200,210),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)\r\n cv2.putText(vis,rtext,(750,210),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)\r\n cv2.putText(vis,cent,(350,250),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),5,cv2.LINE_4)\r\n\r\n \r\n cv2.imshow('result',vis)\r\n \r\n \r\n result1.write(result)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n else:\r\n break\r\n\r\ncap.release()\r\nresult1.release() \r\ncv2.destroyAllWindows()",
"<docstring token>\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\nimport pickle\nfrom moviepy.editor import *\nfin = []\nout = np.arange(0, 250) / 250\nout1 = np.ones(100)\nout2 = np.arange(400, 350, -1) / 400\nout3 = np.zeros(400)\nout4 = np.arange(800, 850, 1) / 850\nout5 = np.ones(100)\nout6 = np.arange(1100, 950, -1) / 1100\nout7 = np.zeros(180)\nfin = np.concatenate((out, out1, out2, out3, out4, out5, out6, out7))\nfin = np.expand_dims(fin, axis=1)\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if orient == 'x':\n sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n else:\n sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absolute = np.absolute(sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n grad_binary = np.zeros_like(scaled)\n grad_binary[(scaled >= thresh[0]) & (scaled <= thresh[1])] = 1\n return grad_binary\n\n\ndef mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n mag_sobel = np.sqrt(sobelx ** 2 + sobely ** 2)\n absolute = np.absolute(mag_sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n mag_binary = np.zeros_like(scaled)\n mag_binary[(scaled >= mag_thresh[0]) & (scaled <= mag_thresh[1])] = 1\n return mag_binary\n\n\ndef dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi / 2)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absx = np.absolute(sobelx)\n absy = np.absolute(sobely)\n direction = np.arctan2(absy, absx)\n dir_binary = np.zeros_like(gray_img)\n dir_binary[(direction >= thresh[0]) & (direction <= thresh[1])] = 1\n return dir_binary\n\n\ndef hls_select(image, thresh=(0, 255)):\n hls = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)\n s = hls[:, :, 2]\n binary_output = np.zeros_like(s)\n binary_output[(s > thresh[0]) & (s <= thresh[1])] = 1\n return binary_output\n\n\ndef equalize(image):\n image_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20, 20))\n image_yuv[:, :, 0] = clahe.apply(image_yuv[:, :, 0])\n img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)\n return img_output\n\n\ndef yuv_select_lumin(image, thresh=(0, 255)):\n yuv_img = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n lumin = yuv_img[:, :, 0]\n binary_output = np.zeros_like(lumin)\n binary_output[(lumin > thresh[0]) & (lumin <= thresh[1])] = 1\n return binary_output\n\n\ndef hist(img, left_fit1, right_fit1, win=True):\n img = img / 255\n img = np.expand_dims(img, axis=-1)\n bottom_half = img[img.shape[0] // 2:, :]\n histogram = np.sum(bottom_half, axis=0)\n histogram = np.multiply(histogram, fin)\n out_img = np.dstack((img, img, img))\n midpoint = np.int(histogram.shape[0] // 2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n nwindows = 9\n margin = 100\n minpix = 50\n searchmargin = 100\n window_height = np.int(img.shape[0] // nwindows)\n nonzero = img.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n leftx_current = leftx_base\n rightx_current = rightx_base\n left_lane_ids = []\n right_lane_ids = []\n if win:\n for window in range(nwindows):\n win_y_low = img.shape[0] - (window + 1) * window_height\n win_y_high = img.shape[0] - window * window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n cv2.rectangle(out_img, (win_xleft_low, win_y_low), (\n win_xleft_high, win_y_high), (0, 255, 0), 2)\n cv2.rectangle(out_img, (win_xright_low, win_y_low), (\n win_xright_high, win_y_high), (0, 255, 0), 2)\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy <\n win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox <\n win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy <\n win_y_high) & (nonzerox >= win_xright_low) & (nonzerox <\n win_xright_high)).nonzero()[0]\n left_lane_ids.append(good_left_inds)\n right_lane_ids.append(good_right_inds)\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n try:\n left_lane_ids = np.concatenate(left_lane_ids)\n right_lane_ids = np.concatenate(right_lane_ids)\n except ValueError:\n pass\n else:\n left_lane_ids = (nonzerox > left_fit1[0] * nonzeroy ** 2 + \n left_fit1[1] * nonzeroy + left_fit1[2] - searchmargin) & (nonzerox\n < left_fit1[0] * nonzeroy ** 2 + left_fit1[1] * nonzeroy +\n left_fit1[2] + searchmargin)\n right_lane_ids = (nonzerox > right_fit1[0] * nonzeroy ** 2 + \n right_fit1[1] * nonzeroy + right_fit1[2] - searchmargin) & (\n nonzerox < right_fit1[0] * nonzeroy ** 2 + right_fit1[1] *\n nonzeroy + right_fit1[2] + searchmargin)\n leftx = nonzerox[left_lane_ids]\n lefty = nonzeroy[left_lane_ids]\n rightx = nonzerox[right_lane_ids]\n righty = nonzeroy[right_lane_ids]\n return histogram, leftx, lefty, rightx, righty, out_img\n\n\ncap = cv2.VideoCapture('./project_video.mp4')\nsize = int(cap.get(3)), int(cap.get(4))\nresult1 = cv2.VideoWriter('./output_images/project_video.mp4', cv2.\n VideoWriter_fourcc(*'MJPG'), 10, size)\nleft_fit = []\nright_fit = []\nprev_left_fit = []\nprev_right_fit = []\ncount = 0\nradoffset = 150\nprev_left_fit = []\nprev_right_fit = []\nwidth = 0\nvalidation_fails = 0\nwhile True:\n count += 1\n ret, image = cap.read()\n dist_pickle = pickle.load(open('./camera_cal/matrix.p', 'rb'))\n dst = dist_pickle['dist']\n mtx = dist_pickle['mtx']\n if ret:\n ksize = 3\n img_undist = cv2.undistort(image, mtx, dst, None, mtx)\n final_img = np.copy(img_undist)\n gradx = abs_sobel_thresh(img_undist, orient='x', sobel_kernel=ksize,\n thresh=(52, 238))\n grady = abs_sobel_thresh(img_undist, orient='y', sobel_kernel=ksize,\n thresh=(59, 249))\n mag_binary = mag_thresh(img_undist, sobel_kernel=ksize, mag_thresh=\n (68, 255))\n dir_binary = dir_threshold(img_undist, sobel_kernel=ksize, thresh=(\n 0.02, 1.57))\n s_binary = hls_select(img_undist, thresh=(151, 255))\n luminiscence = yuv_select_lumin(img_undist, thresh=(14, 255))\n combined = np.zeros_like(dir_binary)\n combined[(gradx == 1) & (grady == 1) | (mag_binary == 1) & (\n dir_binary == 1) | (s_binary == 1) & (luminiscence == 1)] = 1\n src = np.float32([[585 - 20, 460 + 10], [203 - 20, 720], [1127 + 30,\n 720], [695 + 30, 460 + 10]])\n points = np.int32(np.copy(src))\n dst = np.array([[320 - 20, 0], [320 - 20, 720], [960 + 30, 720], [\n 960 + 30, 0]], dtype='float32')\n img_size = combined.shape[1], combined.shape[0]\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n warped = cv2.warpPerspective(combined, M, img_size, flags=cv2.\n INTER_LINEAR)\n output4 = np.dstack([warped * 255, warped * 255, warped * 255])\n output4 = cv2.resize(output4, (320, 180), interpolation=cv2.INTER_AREA)\n output3 = cv2.warpPerspective(final_img, M, img_size, flags=cv2.\n INTER_LINEAR)\n output3 = cv2.resize(output3, (320, 180), interpolation=cv2.INTER_AREA)\n kernel = np.ones((320, 1), np.uint8)\n warped1 = cv2.morphologyEx(warped.astype(np.uint8), cv2.\n MORPH_DILATE, kernel, iterations=1)\n warped = cv2.morphologyEx(warped1.astype(np.uint8), cv2.MORPH_ERODE,\n kernel, iterations=1)\n if (len(left_fit) == 0 or len(right_fit) == 0\n ) or count == 100 or validation_fails > 5:\n histogram_img, leftx, lefty, rightx, righty, out_img = hist(warped,\n left_fit, right_fit, True)\n count = 0\n validation_fails = 0\n else:\n histogram_img, leftx, lefty, rightx, righty, out_img = hist(warped,\n left_fit, right_fit, False)\n if len(leftx) == 0 or len(rightx) == 0:\n histogram_img, leftx, lefty, rightx, righty, out_img = hist(warped,\n left_fit, right_fit, True)\n count = 0\n ploty = np.linspace(0, warped.shape[0] - 1, warped.shape[0])\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n t2 = right_fit[2] / left_fit[2]\n t1 = right_fit[1] / left_fit[1]\n t0 = right_fit[0] / left_fit[0]\n if abs(t2) > 20 or abs(t1) > 20 or abs(t0) > 20:\n validation_fails += 1\n if len(prev_left_fit) != 0:\n left_fit = prev_left_fit\n if len(prev_right_fit) != 0:\n right_fit = prev_right_fit\n print('valid fails')\n prev_left_fit = np.copy(left_fit)\n prev_right_fit = np.copy(right_fit)\n try:\n leftfitx = left_fit[0] * ploty ** 2 + left_fit[1\n ] * ploty + left_fit[2]\n rightfitx = right_fit[0] * ploty ** 2 + right_fit[1\n ] * ploty + right_fit[2]\n except TypeError:\n print('The function failed to fit a line!')\n final_out_img = np.copy(out_img).astype(np.uint8)\n out_img[lefty, leftx] = [255, 0, 0]\n out_img[righty, rightx] = [0, 0, 255]\n leftpoints_draw = np.asarray([leftfitx, ploty]).T.astype(np.int32)\n rightpoints_draw = np.asarray([rightfitx, ploty]).T.astype(np.int32)\n cv2.polylines(out_img, [leftpoints_draw], False, (0, 255, 255), 3)\n cv2.polylines(out_img, [rightpoints_draw], False, (0, 255, 255), 3)\n pts_left = np.array([np.transpose(np.vstack([leftfitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([rightfitx,\n ploty])))])\n pts = np.hstack((pts_left, pts_right))\n left_side_points_mean = np.mean(pts_left)\n right_side_points_mean = np.mean(pts_right)\n y_eval = np.max(ploty)\n ym_per_pixel = 30 / 720\n xm_per_pixel = 3.7 / 700\n left_fit_0_metres = left_fit[0] * (xm_per_pixel / ym_per_pixel ** 2)\n left_fit_1_metres = left_fit[1] * (xm_per_pixel / ym_per_pixel)\n right_fit_0_metres = right_fit[0] * (xm_per_pixel / ym_per_pixel ** 2)\n right_fit_1_metres = right_fit[1] * (xm_per_pixel / ym_per_pixel)\n left_curved = (1 + (2 * left_fit_0_metres * y_eval * ym_per_pixel +\n left_fit_1_metres) ** 2) ** 1.5 / np.absolute(2 * left_fit_0_metres\n )\n right_curved = (1 + (2 * right_fit_0_metres * y_eval * ym_per_pixel +\n right_fit_1_metres) ** 2) ** 1.5 / np.absolute(2 *\n right_fit_0_metres)\n output2 = cv2.resize(out_img, (320, 180), interpolation=cv2.INTER_AREA)\n cv2.fillPoly(final_out_img, np.int_([pts]), (0, 255, 0))\n newwarp = cv2.warpPerspective(final_out_img, Minv, (image.shape[1],\n image.shape[0]))\n result = cv2.addWeighted(final_img, 1, newwarp, 0.3, 0)\n vis = np.zeros((720, 1280, 3), dtype=np.uint8)\n vis[:720, :1280, :] = result\n ltext = 'left Curvature(m): ' + str(round(left_curved, 3))\n rtext = 'right Curvature(m): ' + str(round(right_curved, 3))\n cent_out = round((left_side_points_mean + right_side_points_mean) /\n 2, 3)\n distance_from_center = round(abs(img_size[0] / 2 - cent_out) *\n xm_per_pixel, 3)\n cent = 'Vehicle is left from center(m): ' + str(distance_from_center)\n cv2.putText(result, ltext, (200, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 5, cv2.LINE_4)\n cv2.putText(result, rtext, (750, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 5, cv2.LINE_4)\n cv2.putText(result, cent, (350, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 5, cv2.LINE_4)\n output1 = cv2.resize(combined * 255, (320, 180), interpolation=cv2.\n INTER_AREA)\n vis[:180, 0:320, :] = np.dstack([output1, output1, output1])\n vis[:180, 320:640, :] = output2\n vis[:180, 640:960, :] = output3\n vis[:180, 960:1280, :] = output4\n cv2.putText(vis, ltext, (200, 210), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 255, 255, 255), 5, cv2.LINE_4)\n cv2.putText(vis, rtext, (750, 210), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 255, 255, 255), 5, cv2.LINE_4)\n cv2.putText(vis, cent, (350, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 255, 255, 255), 5, cv2.LINE_4)\n cv2.imshow('result', vis)\n result1.write(result)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\ncap.release()\nresult1.release()\ncv2.destroyAllWindows()\n",
"<docstring token>\n<import token>\nfin = []\nout = np.arange(0, 250) / 250\nout1 = np.ones(100)\nout2 = np.arange(400, 350, -1) / 400\nout3 = np.zeros(400)\nout4 = np.arange(800, 850, 1) / 850\nout5 = np.ones(100)\nout6 = np.arange(1100, 950, -1) / 1100\nout7 = np.zeros(180)\nfin = np.concatenate((out, out1, out2, out3, out4, out5, out6, out7))\nfin = np.expand_dims(fin, axis=1)\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if orient == 'x':\n sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n else:\n sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absolute = np.absolute(sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n grad_binary = np.zeros_like(scaled)\n grad_binary[(scaled >= thresh[0]) & (scaled <= thresh[1])] = 1\n return grad_binary\n\n\ndef mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n mag_sobel = np.sqrt(sobelx ** 2 + sobely ** 2)\n absolute = np.absolute(mag_sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n mag_binary = np.zeros_like(scaled)\n mag_binary[(scaled >= mag_thresh[0]) & (scaled <= mag_thresh[1])] = 1\n return mag_binary\n\n\ndef dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi / 2)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absx = np.absolute(sobelx)\n absy = np.absolute(sobely)\n direction = np.arctan2(absy, absx)\n dir_binary = np.zeros_like(gray_img)\n dir_binary[(direction >= thresh[0]) & (direction <= thresh[1])] = 1\n return dir_binary\n\n\ndef hls_select(image, thresh=(0, 255)):\n hls = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)\n s = hls[:, :, 2]\n binary_output = np.zeros_like(s)\n binary_output[(s > thresh[0]) & (s <= thresh[1])] = 1\n return binary_output\n\n\ndef equalize(image):\n image_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20, 20))\n image_yuv[:, :, 0] = clahe.apply(image_yuv[:, :, 0])\n img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)\n return img_output\n\n\ndef yuv_select_lumin(image, thresh=(0, 255)):\n yuv_img = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n lumin = yuv_img[:, :, 0]\n binary_output = np.zeros_like(lumin)\n binary_output[(lumin > thresh[0]) & (lumin <= thresh[1])] = 1\n return binary_output\n\n\ndef hist(img, left_fit1, right_fit1, win=True):\n img = img / 255\n img = np.expand_dims(img, axis=-1)\n bottom_half = img[img.shape[0] // 2:, :]\n histogram = np.sum(bottom_half, axis=0)\n histogram = np.multiply(histogram, fin)\n out_img = np.dstack((img, img, img))\n midpoint = np.int(histogram.shape[0] // 2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n nwindows = 9\n margin = 100\n minpix = 50\n searchmargin = 100\n window_height = np.int(img.shape[0] // nwindows)\n nonzero = img.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n leftx_current = leftx_base\n rightx_current = rightx_base\n left_lane_ids = []\n right_lane_ids = []\n if win:\n for window in range(nwindows):\n win_y_low = img.shape[0] - (window + 1) * window_height\n win_y_high = img.shape[0] - window * window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n cv2.rectangle(out_img, (win_xleft_low, win_y_low), (\n win_xleft_high, win_y_high), (0, 255, 0), 2)\n cv2.rectangle(out_img, (win_xright_low, win_y_low), (\n win_xright_high, win_y_high), (0, 255, 0), 2)\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy <\n win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox <\n win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy <\n win_y_high) & (nonzerox >= win_xright_low) & (nonzerox <\n win_xright_high)).nonzero()[0]\n left_lane_ids.append(good_left_inds)\n right_lane_ids.append(good_right_inds)\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n try:\n left_lane_ids = np.concatenate(left_lane_ids)\n right_lane_ids = np.concatenate(right_lane_ids)\n except ValueError:\n pass\n else:\n left_lane_ids = (nonzerox > left_fit1[0] * nonzeroy ** 2 + \n left_fit1[1] * nonzeroy + left_fit1[2] - searchmargin) & (nonzerox\n < left_fit1[0] * nonzeroy ** 2 + left_fit1[1] * nonzeroy +\n left_fit1[2] + searchmargin)\n right_lane_ids = (nonzerox > right_fit1[0] * nonzeroy ** 2 + \n right_fit1[1] * nonzeroy + right_fit1[2] - searchmargin) & (\n nonzerox < right_fit1[0] * nonzeroy ** 2 + right_fit1[1] *\n nonzeroy + right_fit1[2] + searchmargin)\n leftx = nonzerox[left_lane_ids]\n lefty = nonzeroy[left_lane_ids]\n rightx = nonzerox[right_lane_ids]\n righty = nonzeroy[right_lane_ids]\n return histogram, leftx, lefty, rightx, righty, out_img\n\n\ncap = cv2.VideoCapture('./project_video.mp4')\nsize = int(cap.get(3)), int(cap.get(4))\nresult1 = cv2.VideoWriter('./output_images/project_video.mp4', cv2.\n VideoWriter_fourcc(*'MJPG'), 10, size)\nleft_fit = []\nright_fit = []\nprev_left_fit = []\nprev_right_fit = []\ncount = 0\nradoffset = 150\nprev_left_fit = []\nprev_right_fit = []\nwidth = 0\nvalidation_fails = 0\nwhile True:\n count += 1\n ret, image = cap.read()\n dist_pickle = pickle.load(open('./camera_cal/matrix.p', 'rb'))\n dst = dist_pickle['dist']\n mtx = dist_pickle['mtx']\n if ret:\n ksize = 3\n img_undist = cv2.undistort(image, mtx, dst, None, mtx)\n final_img = np.copy(img_undist)\n gradx = abs_sobel_thresh(img_undist, orient='x', sobel_kernel=ksize,\n thresh=(52, 238))\n grady = abs_sobel_thresh(img_undist, orient='y', sobel_kernel=ksize,\n thresh=(59, 249))\n mag_binary = mag_thresh(img_undist, sobel_kernel=ksize, mag_thresh=\n (68, 255))\n dir_binary = dir_threshold(img_undist, sobel_kernel=ksize, thresh=(\n 0.02, 1.57))\n s_binary = hls_select(img_undist, thresh=(151, 255))\n luminiscence = yuv_select_lumin(img_undist, thresh=(14, 255))\n combined = np.zeros_like(dir_binary)\n combined[(gradx == 1) & (grady == 1) | (mag_binary == 1) & (\n dir_binary == 1) | (s_binary == 1) & (luminiscence == 1)] = 1\n src = np.float32([[585 - 20, 460 + 10], [203 - 20, 720], [1127 + 30,\n 720], [695 + 30, 460 + 10]])\n points = np.int32(np.copy(src))\n dst = np.array([[320 - 20, 0], [320 - 20, 720], [960 + 30, 720], [\n 960 + 30, 0]], dtype='float32')\n img_size = combined.shape[1], combined.shape[0]\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n warped = cv2.warpPerspective(combined, M, img_size, flags=cv2.\n INTER_LINEAR)\n output4 = np.dstack([warped * 255, warped * 255, warped * 255])\n output4 = cv2.resize(output4, (320, 180), interpolation=cv2.INTER_AREA)\n output3 = cv2.warpPerspective(final_img, M, img_size, flags=cv2.\n INTER_LINEAR)\n output3 = cv2.resize(output3, (320, 180), interpolation=cv2.INTER_AREA)\n kernel = np.ones((320, 1), np.uint8)\n warped1 = cv2.morphologyEx(warped.astype(np.uint8), cv2.\n MORPH_DILATE, kernel, iterations=1)\n warped = cv2.morphologyEx(warped1.astype(np.uint8), cv2.MORPH_ERODE,\n kernel, iterations=1)\n if (len(left_fit) == 0 or len(right_fit) == 0\n ) or count == 100 or validation_fails > 5:\n histogram_img, leftx, lefty, rightx, righty, out_img = hist(warped,\n left_fit, right_fit, True)\n count = 0\n validation_fails = 0\n else:\n histogram_img, leftx, lefty, rightx, righty, out_img = hist(warped,\n left_fit, right_fit, False)\n if len(leftx) == 0 or len(rightx) == 0:\n histogram_img, leftx, lefty, rightx, righty, out_img = hist(warped,\n left_fit, right_fit, True)\n count = 0\n ploty = np.linspace(0, warped.shape[0] - 1, warped.shape[0])\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n t2 = right_fit[2] / left_fit[2]\n t1 = right_fit[1] / left_fit[1]\n t0 = right_fit[0] / left_fit[0]\n if abs(t2) > 20 or abs(t1) > 20 or abs(t0) > 20:\n validation_fails += 1\n if len(prev_left_fit) != 0:\n left_fit = prev_left_fit\n if len(prev_right_fit) != 0:\n right_fit = prev_right_fit\n print('valid fails')\n prev_left_fit = np.copy(left_fit)\n prev_right_fit = np.copy(right_fit)\n try:\n leftfitx = left_fit[0] * ploty ** 2 + left_fit[1\n ] * ploty + left_fit[2]\n rightfitx = right_fit[0] * ploty ** 2 + right_fit[1\n ] * ploty + right_fit[2]\n except TypeError:\n print('The function failed to fit a line!')\n final_out_img = np.copy(out_img).astype(np.uint8)\n out_img[lefty, leftx] = [255, 0, 0]\n out_img[righty, rightx] = [0, 0, 255]\n leftpoints_draw = np.asarray([leftfitx, ploty]).T.astype(np.int32)\n rightpoints_draw = np.asarray([rightfitx, ploty]).T.astype(np.int32)\n cv2.polylines(out_img, [leftpoints_draw], False, (0, 255, 255), 3)\n cv2.polylines(out_img, [rightpoints_draw], False, (0, 255, 255), 3)\n pts_left = np.array([np.transpose(np.vstack([leftfitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([rightfitx,\n ploty])))])\n pts = np.hstack((pts_left, pts_right))\n left_side_points_mean = np.mean(pts_left)\n right_side_points_mean = np.mean(pts_right)\n y_eval = np.max(ploty)\n ym_per_pixel = 30 / 720\n xm_per_pixel = 3.7 / 700\n left_fit_0_metres = left_fit[0] * (xm_per_pixel / ym_per_pixel ** 2)\n left_fit_1_metres = left_fit[1] * (xm_per_pixel / ym_per_pixel)\n right_fit_0_metres = right_fit[0] * (xm_per_pixel / ym_per_pixel ** 2)\n right_fit_1_metres = right_fit[1] * (xm_per_pixel / ym_per_pixel)\n left_curved = (1 + (2 * left_fit_0_metres * y_eval * ym_per_pixel +\n left_fit_1_metres) ** 2) ** 1.5 / np.absolute(2 * left_fit_0_metres\n )\n right_curved = (1 + (2 * right_fit_0_metres * y_eval * ym_per_pixel +\n right_fit_1_metres) ** 2) ** 1.5 / np.absolute(2 *\n right_fit_0_metres)\n output2 = cv2.resize(out_img, (320, 180), interpolation=cv2.INTER_AREA)\n cv2.fillPoly(final_out_img, np.int_([pts]), (0, 255, 0))\n newwarp = cv2.warpPerspective(final_out_img, Minv, (image.shape[1],\n image.shape[0]))\n result = cv2.addWeighted(final_img, 1, newwarp, 0.3, 0)\n vis = np.zeros((720, 1280, 3), dtype=np.uint8)\n vis[:720, :1280, :] = result\n ltext = 'left Curvature(m): ' + str(round(left_curved, 3))\n rtext = 'right Curvature(m): ' + str(round(right_curved, 3))\n cent_out = round((left_side_points_mean + right_side_points_mean) /\n 2, 3)\n distance_from_center = round(abs(img_size[0] / 2 - cent_out) *\n xm_per_pixel, 3)\n cent = 'Vehicle is left from center(m): ' + str(distance_from_center)\n cv2.putText(result, ltext, (200, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 5, cv2.LINE_4)\n cv2.putText(result, rtext, (750, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 5, cv2.LINE_4)\n cv2.putText(result, cent, (350, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 5, cv2.LINE_4)\n output1 = cv2.resize(combined * 255, (320, 180), interpolation=cv2.\n INTER_AREA)\n vis[:180, 0:320, :] = np.dstack([output1, output1, output1])\n vis[:180, 320:640, :] = output2\n vis[:180, 640:960, :] = output3\n vis[:180, 960:1280, :] = output4\n cv2.putText(vis, ltext, (200, 210), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 255, 255, 255), 5, cv2.LINE_4)\n cv2.putText(vis, rtext, (750, 210), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 255, 255, 255), 5, cv2.LINE_4)\n cv2.putText(vis, cent, (350, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 255, 255, 255), 5, cv2.LINE_4)\n cv2.imshow('result', vis)\n result1.write(result)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\ncap.release()\nresult1.release()\ncv2.destroyAllWindows()\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if orient == 'x':\n sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n else:\n sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absolute = np.absolute(sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n grad_binary = np.zeros_like(scaled)\n grad_binary[(scaled >= thresh[0]) & (scaled <= thresh[1])] = 1\n return grad_binary\n\n\ndef mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n mag_sobel = np.sqrt(sobelx ** 2 + sobely ** 2)\n absolute = np.absolute(mag_sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n mag_binary = np.zeros_like(scaled)\n mag_binary[(scaled >= mag_thresh[0]) & (scaled <= mag_thresh[1])] = 1\n return mag_binary\n\n\ndef dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi / 2)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absx = np.absolute(sobelx)\n absy = np.absolute(sobely)\n direction = np.arctan2(absy, absx)\n dir_binary = np.zeros_like(gray_img)\n dir_binary[(direction >= thresh[0]) & (direction <= thresh[1])] = 1\n return dir_binary\n\n\ndef hls_select(image, thresh=(0, 255)):\n hls = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)\n s = hls[:, :, 2]\n binary_output = np.zeros_like(s)\n binary_output[(s > thresh[0]) & (s <= thresh[1])] = 1\n return binary_output\n\n\ndef equalize(image):\n image_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20, 20))\n image_yuv[:, :, 0] = clahe.apply(image_yuv[:, :, 0])\n img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)\n return img_output\n\n\ndef yuv_select_lumin(image, thresh=(0, 255)):\n yuv_img = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n lumin = yuv_img[:, :, 0]\n binary_output = np.zeros_like(lumin)\n binary_output[(lumin > thresh[0]) & (lumin <= thresh[1])] = 1\n return binary_output\n\n\ndef hist(img, left_fit1, right_fit1, win=True):\n img = img / 255\n img = np.expand_dims(img, axis=-1)\n bottom_half = img[img.shape[0] // 2:, :]\n histogram = np.sum(bottom_half, axis=0)\n histogram = np.multiply(histogram, fin)\n out_img = np.dstack((img, img, img))\n midpoint = np.int(histogram.shape[0] // 2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n nwindows = 9\n margin = 100\n minpix = 50\n searchmargin = 100\n window_height = np.int(img.shape[0] // nwindows)\n nonzero = img.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n leftx_current = leftx_base\n rightx_current = rightx_base\n left_lane_ids = []\n right_lane_ids = []\n if win:\n for window in range(nwindows):\n win_y_low = img.shape[0] - (window + 1) * window_height\n win_y_high = img.shape[0] - window * window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n cv2.rectangle(out_img, (win_xleft_low, win_y_low), (\n win_xleft_high, win_y_high), (0, 255, 0), 2)\n cv2.rectangle(out_img, (win_xright_low, win_y_low), (\n win_xright_high, win_y_high), (0, 255, 0), 2)\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy <\n win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox <\n win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy <\n win_y_high) & (nonzerox >= win_xright_low) & (nonzerox <\n win_xright_high)).nonzero()[0]\n left_lane_ids.append(good_left_inds)\n right_lane_ids.append(good_right_inds)\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n try:\n left_lane_ids = np.concatenate(left_lane_ids)\n right_lane_ids = np.concatenate(right_lane_ids)\n except ValueError:\n pass\n else:\n left_lane_ids = (nonzerox > left_fit1[0] * nonzeroy ** 2 + \n left_fit1[1] * nonzeroy + left_fit1[2] - searchmargin) & (nonzerox\n < left_fit1[0] * nonzeroy ** 2 + left_fit1[1] * nonzeroy +\n left_fit1[2] + searchmargin)\n right_lane_ids = (nonzerox > right_fit1[0] * nonzeroy ** 2 + \n right_fit1[1] * nonzeroy + right_fit1[2] - searchmargin) & (\n nonzerox < right_fit1[0] * nonzeroy ** 2 + right_fit1[1] *\n nonzeroy + right_fit1[2] + searchmargin)\n leftx = nonzerox[left_lane_ids]\n lefty = nonzeroy[left_lane_ids]\n rightx = nonzerox[right_lane_ids]\n righty = nonzeroy[right_lane_ids]\n return histogram, leftx, lefty, rightx, righty, out_img\n\n\n<assignment token>\nwhile True:\n count += 1\n ret, image = cap.read()\n dist_pickle = pickle.load(open('./camera_cal/matrix.p', 'rb'))\n dst = dist_pickle['dist']\n mtx = dist_pickle['mtx']\n if ret:\n ksize = 3\n img_undist = cv2.undistort(image, mtx, dst, None, mtx)\n final_img = np.copy(img_undist)\n gradx = abs_sobel_thresh(img_undist, orient='x', sobel_kernel=ksize,\n thresh=(52, 238))\n grady = abs_sobel_thresh(img_undist, orient='y', sobel_kernel=ksize,\n thresh=(59, 249))\n mag_binary = mag_thresh(img_undist, sobel_kernel=ksize, mag_thresh=\n (68, 255))\n dir_binary = dir_threshold(img_undist, sobel_kernel=ksize, thresh=(\n 0.02, 1.57))\n s_binary = hls_select(img_undist, thresh=(151, 255))\n luminiscence = yuv_select_lumin(img_undist, thresh=(14, 255))\n combined = np.zeros_like(dir_binary)\n combined[(gradx == 1) & (grady == 1) | (mag_binary == 1) & (\n dir_binary == 1) | (s_binary == 1) & (luminiscence == 1)] = 1\n src = np.float32([[585 - 20, 460 + 10], [203 - 20, 720], [1127 + 30,\n 720], [695 + 30, 460 + 10]])\n points = np.int32(np.copy(src))\n dst = np.array([[320 - 20, 0], [320 - 20, 720], [960 + 30, 720], [\n 960 + 30, 0]], dtype='float32')\n img_size = combined.shape[1], combined.shape[0]\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n warped = cv2.warpPerspective(combined, M, img_size, flags=cv2.\n INTER_LINEAR)\n output4 = np.dstack([warped * 255, warped * 255, warped * 255])\n output4 = cv2.resize(output4, (320, 180), interpolation=cv2.INTER_AREA)\n output3 = cv2.warpPerspective(final_img, M, img_size, flags=cv2.\n INTER_LINEAR)\n output3 = cv2.resize(output3, (320, 180), interpolation=cv2.INTER_AREA)\n kernel = np.ones((320, 1), np.uint8)\n warped1 = cv2.morphologyEx(warped.astype(np.uint8), cv2.\n MORPH_DILATE, kernel, iterations=1)\n warped = cv2.morphologyEx(warped1.astype(np.uint8), cv2.MORPH_ERODE,\n kernel, iterations=1)\n if (len(left_fit) == 0 or len(right_fit) == 0\n ) or count == 100 or validation_fails > 5:\n histogram_img, leftx, lefty, rightx, righty, out_img = hist(warped,\n left_fit, right_fit, True)\n count = 0\n validation_fails = 0\n else:\n histogram_img, leftx, lefty, rightx, righty, out_img = hist(warped,\n left_fit, right_fit, False)\n if len(leftx) == 0 or len(rightx) == 0:\n histogram_img, leftx, lefty, rightx, righty, out_img = hist(warped,\n left_fit, right_fit, True)\n count = 0\n ploty = np.linspace(0, warped.shape[0] - 1, warped.shape[0])\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n t2 = right_fit[2] / left_fit[2]\n t1 = right_fit[1] / left_fit[1]\n t0 = right_fit[0] / left_fit[0]\n if abs(t2) > 20 or abs(t1) > 20 or abs(t0) > 20:\n validation_fails += 1\n if len(prev_left_fit) != 0:\n left_fit = prev_left_fit\n if len(prev_right_fit) != 0:\n right_fit = prev_right_fit\n print('valid fails')\n prev_left_fit = np.copy(left_fit)\n prev_right_fit = np.copy(right_fit)\n try:\n leftfitx = left_fit[0] * ploty ** 2 + left_fit[1\n ] * ploty + left_fit[2]\n rightfitx = right_fit[0] * ploty ** 2 + right_fit[1\n ] * ploty + right_fit[2]\n except TypeError:\n print('The function failed to fit a line!')\n final_out_img = np.copy(out_img).astype(np.uint8)\n out_img[lefty, leftx] = [255, 0, 0]\n out_img[righty, rightx] = [0, 0, 255]\n leftpoints_draw = np.asarray([leftfitx, ploty]).T.astype(np.int32)\n rightpoints_draw = np.asarray([rightfitx, ploty]).T.astype(np.int32)\n cv2.polylines(out_img, [leftpoints_draw], False, (0, 255, 255), 3)\n cv2.polylines(out_img, [rightpoints_draw], False, (0, 255, 255), 3)\n pts_left = np.array([np.transpose(np.vstack([leftfitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([rightfitx,\n ploty])))])\n pts = np.hstack((pts_left, pts_right))\n left_side_points_mean = np.mean(pts_left)\n right_side_points_mean = np.mean(pts_right)\n y_eval = np.max(ploty)\n ym_per_pixel = 30 / 720\n xm_per_pixel = 3.7 / 700\n left_fit_0_metres = left_fit[0] * (xm_per_pixel / ym_per_pixel ** 2)\n left_fit_1_metres = left_fit[1] * (xm_per_pixel / ym_per_pixel)\n right_fit_0_metres = right_fit[0] * (xm_per_pixel / ym_per_pixel ** 2)\n right_fit_1_metres = right_fit[1] * (xm_per_pixel / ym_per_pixel)\n left_curved = (1 + (2 * left_fit_0_metres * y_eval * ym_per_pixel +\n left_fit_1_metres) ** 2) ** 1.5 / np.absolute(2 * left_fit_0_metres\n )\n right_curved = (1 + (2 * right_fit_0_metres * y_eval * ym_per_pixel +\n right_fit_1_metres) ** 2) ** 1.5 / np.absolute(2 *\n right_fit_0_metres)\n output2 = cv2.resize(out_img, (320, 180), interpolation=cv2.INTER_AREA)\n cv2.fillPoly(final_out_img, np.int_([pts]), (0, 255, 0))\n newwarp = cv2.warpPerspective(final_out_img, Minv, (image.shape[1],\n image.shape[0]))\n result = cv2.addWeighted(final_img, 1, newwarp, 0.3, 0)\n vis = np.zeros((720, 1280, 3), dtype=np.uint8)\n vis[:720, :1280, :] = result\n ltext = 'left Curvature(m): ' + str(round(left_curved, 3))\n rtext = 'right Curvature(m): ' + str(round(right_curved, 3))\n cent_out = round((left_side_points_mean + right_side_points_mean) /\n 2, 3)\n distance_from_center = round(abs(img_size[0] / 2 - cent_out) *\n xm_per_pixel, 3)\n cent = 'Vehicle is left from center(m): ' + str(distance_from_center)\n cv2.putText(result, ltext, (200, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 5, cv2.LINE_4)\n cv2.putText(result, rtext, (750, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 5, cv2.LINE_4)\n cv2.putText(result, cent, (350, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 5, cv2.LINE_4)\n output1 = cv2.resize(combined * 255, (320, 180), interpolation=cv2.\n INTER_AREA)\n vis[:180, 0:320, :] = np.dstack([output1, output1, output1])\n vis[:180, 320:640, :] = output2\n vis[:180, 640:960, :] = output3\n vis[:180, 960:1280, :] = output4\n cv2.putText(vis, ltext, (200, 210), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 255, 255, 255), 5, cv2.LINE_4)\n cv2.putText(vis, rtext, (750, 210), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 255, 255, 255), 5, cv2.LINE_4)\n cv2.putText(vis, cent, (350, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, (\n 255, 255, 255), 5, cv2.LINE_4)\n cv2.imshow('result', vis)\n result1.write(result)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n else:\n break\ncap.release()\nresult1.release()\ncv2.destroyAllWindows()\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if orient == 'x':\n sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n else:\n sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absolute = np.absolute(sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n grad_binary = np.zeros_like(scaled)\n grad_binary[(scaled >= thresh[0]) & (scaled <= thresh[1])] = 1\n return grad_binary\n\n\ndef mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n mag_sobel = np.sqrt(sobelx ** 2 + sobely ** 2)\n absolute = np.absolute(mag_sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n mag_binary = np.zeros_like(scaled)\n mag_binary[(scaled >= mag_thresh[0]) & (scaled <= mag_thresh[1])] = 1\n return mag_binary\n\n\ndef dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi / 2)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absx = np.absolute(sobelx)\n absy = np.absolute(sobely)\n direction = np.arctan2(absy, absx)\n dir_binary = np.zeros_like(gray_img)\n dir_binary[(direction >= thresh[0]) & (direction <= thresh[1])] = 1\n return dir_binary\n\n\ndef hls_select(image, thresh=(0, 255)):\n hls = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)\n s = hls[:, :, 2]\n binary_output = np.zeros_like(s)\n binary_output[(s > thresh[0]) & (s <= thresh[1])] = 1\n return binary_output\n\n\ndef equalize(image):\n image_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20, 20))\n image_yuv[:, :, 0] = clahe.apply(image_yuv[:, :, 0])\n img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)\n return img_output\n\n\ndef yuv_select_lumin(image, thresh=(0, 255)):\n yuv_img = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n lumin = yuv_img[:, :, 0]\n binary_output = np.zeros_like(lumin)\n binary_output[(lumin > thresh[0]) & (lumin <= thresh[1])] = 1\n return binary_output\n\n\ndef hist(img, left_fit1, right_fit1, win=True):\n img = img / 255\n img = np.expand_dims(img, axis=-1)\n bottom_half = img[img.shape[0] // 2:, :]\n histogram = np.sum(bottom_half, axis=0)\n histogram = np.multiply(histogram, fin)\n out_img = np.dstack((img, img, img))\n midpoint = np.int(histogram.shape[0] // 2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n nwindows = 9\n margin = 100\n minpix = 50\n searchmargin = 100\n window_height = np.int(img.shape[0] // nwindows)\n nonzero = img.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n leftx_current = leftx_base\n rightx_current = rightx_base\n left_lane_ids = []\n right_lane_ids = []\n if win:\n for window in range(nwindows):\n win_y_low = img.shape[0] - (window + 1) * window_height\n win_y_high = img.shape[0] - window * window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n cv2.rectangle(out_img, (win_xleft_low, win_y_low), (\n win_xleft_high, win_y_high), (0, 255, 0), 2)\n cv2.rectangle(out_img, (win_xright_low, win_y_low), (\n win_xright_high, win_y_high), (0, 255, 0), 2)\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy <\n win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox <\n win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy <\n win_y_high) & (nonzerox >= win_xright_low) & (nonzerox <\n win_xright_high)).nonzero()[0]\n left_lane_ids.append(good_left_inds)\n right_lane_ids.append(good_right_inds)\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n try:\n left_lane_ids = np.concatenate(left_lane_ids)\n right_lane_ids = np.concatenate(right_lane_ids)\n except ValueError:\n pass\n else:\n left_lane_ids = (nonzerox > left_fit1[0] * nonzeroy ** 2 + \n left_fit1[1] * nonzeroy + left_fit1[2] - searchmargin) & (nonzerox\n < left_fit1[0] * nonzeroy ** 2 + left_fit1[1] * nonzeroy +\n left_fit1[2] + searchmargin)\n right_lane_ids = (nonzerox > right_fit1[0] * nonzeroy ** 2 + \n right_fit1[1] * nonzeroy + right_fit1[2] - searchmargin) & (\n nonzerox < right_fit1[0] * nonzeroy ** 2 + right_fit1[1] *\n nonzeroy + right_fit1[2] + searchmargin)\n leftx = nonzerox[left_lane_ids]\n lefty = nonzeroy[left_lane_ids]\n rightx = nonzerox[right_lane_ids]\n righty = nonzeroy[right_lane_ids]\n return histogram, leftx, lefty, rightx, righty, out_img\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if orient == 'x':\n sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n else:\n sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absolute = np.absolute(sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n grad_binary = np.zeros_like(scaled)\n grad_binary[(scaled >= thresh[0]) & (scaled <= thresh[1])] = 1\n return grad_binary\n\n\ndef mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n mag_sobel = np.sqrt(sobelx ** 2 + sobely ** 2)\n absolute = np.absolute(mag_sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n mag_binary = np.zeros_like(scaled)\n mag_binary[(scaled >= mag_thresh[0]) & (scaled <= mag_thresh[1])] = 1\n return mag_binary\n\n\ndef dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi / 2)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absx = np.absolute(sobelx)\n absy = np.absolute(sobely)\n direction = np.arctan2(absy, absx)\n dir_binary = np.zeros_like(gray_img)\n dir_binary[(direction >= thresh[0]) & (direction <= thresh[1])] = 1\n return dir_binary\n\n\ndef hls_select(image, thresh=(0, 255)):\n hls = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)\n s = hls[:, :, 2]\n binary_output = np.zeros_like(s)\n binary_output[(s > thresh[0]) & (s <= thresh[1])] = 1\n return binary_output\n\n\ndef equalize(image):\n image_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20, 20))\n image_yuv[:, :, 0] = clahe.apply(image_yuv[:, :, 0])\n img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)\n return img_output\n\n\n<function token>\n\n\ndef hist(img, left_fit1, right_fit1, win=True):\n img = img / 255\n img = np.expand_dims(img, axis=-1)\n bottom_half = img[img.shape[0] // 2:, :]\n histogram = np.sum(bottom_half, axis=0)\n histogram = np.multiply(histogram, fin)\n out_img = np.dstack((img, img, img))\n midpoint = np.int(histogram.shape[0] // 2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n nwindows = 9\n margin = 100\n minpix = 50\n searchmargin = 100\n window_height = np.int(img.shape[0] // nwindows)\n nonzero = img.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n leftx_current = leftx_base\n rightx_current = rightx_base\n left_lane_ids = []\n right_lane_ids = []\n if win:\n for window in range(nwindows):\n win_y_low = img.shape[0] - (window + 1) * window_height\n win_y_high = img.shape[0] - window * window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n cv2.rectangle(out_img, (win_xleft_low, win_y_low), (\n win_xleft_high, win_y_high), (0, 255, 0), 2)\n cv2.rectangle(out_img, (win_xright_low, win_y_low), (\n win_xright_high, win_y_high), (0, 255, 0), 2)\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy <\n win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox <\n win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy <\n win_y_high) & (nonzerox >= win_xright_low) & (nonzerox <\n win_xright_high)).nonzero()[0]\n left_lane_ids.append(good_left_inds)\n right_lane_ids.append(good_right_inds)\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n try:\n left_lane_ids = np.concatenate(left_lane_ids)\n right_lane_ids = np.concatenate(right_lane_ids)\n except ValueError:\n pass\n else:\n left_lane_ids = (nonzerox > left_fit1[0] * nonzeroy ** 2 + \n left_fit1[1] * nonzeroy + left_fit1[2] - searchmargin) & (nonzerox\n < left_fit1[0] * nonzeroy ** 2 + left_fit1[1] * nonzeroy +\n left_fit1[2] + searchmargin)\n right_lane_ids = (nonzerox > right_fit1[0] * nonzeroy ** 2 + \n right_fit1[1] * nonzeroy + right_fit1[2] - searchmargin) & (\n nonzerox < right_fit1[0] * nonzeroy ** 2 + right_fit1[1] *\n nonzeroy + right_fit1[2] + searchmargin)\n leftx = nonzerox[left_lane_ids]\n lefty = nonzeroy[left_lane_ids]\n rightx = nonzerox[right_lane_ids]\n righty = nonzeroy[right_lane_ids]\n return histogram, leftx, lefty, rightx, righty, out_img\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if orient == 'x':\n sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n else:\n sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absolute = np.absolute(sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n grad_binary = np.zeros_like(scaled)\n grad_binary[(scaled >= thresh[0]) & (scaled <= thresh[1])] = 1\n return grad_binary\n\n\ndef mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n mag_sobel = np.sqrt(sobelx ** 2 + sobely ** 2)\n absolute = np.absolute(mag_sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n mag_binary = np.zeros_like(scaled)\n mag_binary[(scaled >= mag_thresh[0]) & (scaled <= mag_thresh[1])] = 1\n return mag_binary\n\n\ndef dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi / 2)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absx = np.absolute(sobelx)\n absy = np.absolute(sobely)\n direction = np.arctan2(absy, absx)\n dir_binary = np.zeros_like(gray_img)\n dir_binary[(direction >= thresh[0]) & (direction <= thresh[1])] = 1\n return dir_binary\n\n\ndef hls_select(image, thresh=(0, 255)):\n hls = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)\n s = hls[:, :, 2]\n binary_output = np.zeros_like(s)\n binary_output[(s > thresh[0]) & (s <= thresh[1])] = 1\n return binary_output\n\n\ndef equalize(image):\n image_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20, 20))\n image_yuv[:, :, 0] = clahe.apply(image_yuv[:, :, 0])\n img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)\n return img_output\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if orient == 'x':\n sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n else:\n sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absolute = np.absolute(sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n grad_binary = np.zeros_like(scaled)\n grad_binary[(scaled >= thresh[0]) & (scaled <= thresh[1])] = 1\n return grad_binary\n\n\ndef mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n mag_sobel = np.sqrt(sobelx ** 2 + sobely ** 2)\n absolute = np.absolute(mag_sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n mag_binary = np.zeros_like(scaled)\n mag_binary[(scaled >= mag_thresh[0]) & (scaled <= mag_thresh[1])] = 1\n return mag_binary\n\n\ndef dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi / 2)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absx = np.absolute(sobelx)\n absy = np.absolute(sobely)\n direction = np.arctan2(absy, absx)\n dir_binary = np.zeros_like(gray_img)\n dir_binary[(direction >= thresh[0]) & (direction <= thresh[1])] = 1\n return dir_binary\n\n\n<function token>\n\n\ndef equalize(image):\n image_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20, 20))\n image_yuv[:, :, 0] = clahe.apply(image_yuv[:, :, 0])\n img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)\n return img_output\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if orient == 'x':\n sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n else:\n sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absolute = np.absolute(sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n grad_binary = np.zeros_like(scaled)\n grad_binary[(scaled >= thresh[0]) & (scaled <= thresh[1])] = 1\n return grad_binary\n\n\n<function token>\n\n\ndef dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi / 2)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absx = np.absolute(sobelx)\n absy = np.absolute(sobely)\n direction = np.arctan2(absy, absx)\n dir_binary = np.zeros_like(gray_img)\n dir_binary[(direction >= thresh[0]) & (direction <= thresh[1])] = 1\n return dir_binary\n\n\n<function token>\n\n\ndef equalize(image):\n image_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(20, 20))\n image_yuv[:, :, 0] = clahe.apply(image_yuv[:, :, 0])\n img_output = cv2.cvtColor(image_yuv, cv2.COLOR_YUV2BGR)\n return img_output\n\n\n<function token>\n<function token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if orient == 'x':\n sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n else:\n sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absolute = np.absolute(sobel)\n scaled = np.uint8(255 * absolute / np.max(absolute))\n grad_binary = np.zeros_like(scaled)\n grad_binary[(scaled >= thresh[0]) & (scaled <= thresh[1])] = 1\n return grad_binary\n\n\n<function token>\n\n\ndef dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi / 2)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absx = np.absolute(sobelx)\n absy = np.absolute(sobely)\n direction = np.arctan2(absy, absx)\n dir_binary = np.zeros_like(gray_img)\n dir_binary[(direction >= thresh[0]) & (direction <= thresh[1])] = 1\n return dir_binary\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef dir_threshold(image, sobel_kernel=3, thresh=(0, np.pi / 2)):\n gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n absx = np.absolute(sobelx)\n absy = np.absolute(sobely)\n direction = np.arctan2(absy, absx)\n dir_binary = np.zeros_like(gray_img)\n dir_binary[(direction >= thresh[0]) & (direction <= thresh[1])] = 1\n return dir_binary\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
98,883 |
9f1af2bca93dc31d37cd1ca781d840f77c21ac3f
|
import os
import sys
import web
def migrate(db, directory):
try:
db.where("schema_info")
except:
db.query("create table schema_info (version int not null)")
sys.path.append(directory)
for migration in sorted_migrations(os.listdir(directory)):
if db.where("schema_info", version=migration.version):
print "Skipping %s since it's already applied." % migration
continue
print "Applying %s" % migration
__import__(migration.name).up(db)
db.insert("schema_info", version=migration.version)
def sorted_migrations(fnames):
def build_migration(fname):
name = os.path.splitext(fname)[0]
version = int(name.split("_")[-1])
return web.storage(fname=fname, name=name, version=version)
migrations = [build_migration(f) for f in fnames if f.endswith('.py')]
return sorted(migrations, key=lambda m: m.version)
if __name__ == "__main__":
import config
migrate(config.dbn, "config/migrations")
|
[
"import os\nimport sys\n\nimport web\n\n\ndef migrate(db, directory):\n try:\n db.where(\"schema_info\")\n except:\n db.query(\"create table schema_info (version int not null)\")\n\n sys.path.append(directory)\n for migration in sorted_migrations(os.listdir(directory)):\n if db.where(\"schema_info\", version=migration.version):\n print \"Skipping %s since it's already applied.\" % migration\n continue\n print \"Applying %s\" % migration\n __import__(migration.name).up(db)\n db.insert(\"schema_info\", version=migration.version)\n\n\ndef sorted_migrations(fnames):\n def build_migration(fname):\n name = os.path.splitext(fname)[0]\n version = int(name.split(\"_\")[-1])\n return web.storage(fname=fname, name=name, version=version)\n\n migrations = [build_migration(f) for f in fnames if f.endswith('.py')]\n return sorted(migrations, key=lambda m: m.version)\n\n\nif __name__ == \"__main__\":\n import config\n migrate(config.dbn, \"config/migrations\")\n"
] | true |
98,884 |
b4e28e834118194a51f32ae0bf42118bbeaec7c6
|
top_dir = '/oak/stanford/groups/khavari/users/dfporter/seq/all/'
top_dir = '/Users/dfporter/pma/dataAndScripts/clip/miseq/Runs/hiseq_pcbp1_190416/'
top_dir = '/Users/dfporter/pma/dataAndScripts/clip/miseq/Runs/hiseq_rbfox_190418/'
scheme_file = top_dir + '/scheme.xlsx'
ann_counts_file = top_dir + '/ann_counts.txt'
bed_file_dir = top_dir + '/beds/'
positive_proteins = [
'Rbfox1', 'Rbfox2', 'hnRNPD',
]
|
[
"\ntop_dir = '/oak/stanford/groups/khavari/users/dfporter/seq/all/'\ntop_dir = '/Users/dfporter/pma/dataAndScripts/clip/miseq/Runs/hiseq_pcbp1_190416/'\ntop_dir = '/Users/dfporter/pma/dataAndScripts/clip/miseq/Runs/hiseq_rbfox_190418/'\n\nscheme_file = top_dir + '/scheme.xlsx'\nann_counts_file = top_dir + '/ann_counts.txt'\nbed_file_dir = top_dir + '/beds/'\n\npositive_proteins = [\n\t'Rbfox1', 'Rbfox2', 'hnRNPD',\n]",
"top_dir = '/oak/stanford/groups/khavari/users/dfporter/seq/all/'\ntop_dir = (\n '/Users/dfporter/pma/dataAndScripts/clip/miseq/Runs/hiseq_pcbp1_190416/')\ntop_dir = (\n '/Users/dfporter/pma/dataAndScripts/clip/miseq/Runs/hiseq_rbfox_190418/')\nscheme_file = top_dir + '/scheme.xlsx'\nann_counts_file = top_dir + '/ann_counts.txt'\nbed_file_dir = top_dir + '/beds/'\npositive_proteins = ['Rbfox1', 'Rbfox2', 'hnRNPD']\n",
"<assignment token>\n"
] | false |
98,885 |
065b78926404abde0293fe43008397ed30b873e7
|
from flask_restful import Resource
from flask_restful import reqparse
from celery.result import AsyncResult
def add_resource_status(name, api, celery):
class TaskStatus(Resource):
"""Class to be added to api's resources."""
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('task-id',
type=str,
help='Task id has to be a string',
location='form')
args = parser.parse_args()
result = AsyncResult(args['task-id'], app=celery)
return {
'task-id': args['task-id'],
'status': result.status,
}
api.add_resource(TaskStatus, name)
|
[
"from flask_restful import Resource\nfrom flask_restful import reqparse\nfrom celery.result import AsyncResult\n\n\ndef add_resource_status(name, api, celery):\n class TaskStatus(Resource):\n \"\"\"Class to be added to api's resources.\"\"\"\n\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('task-id',\n type=str,\n help='Task id has to be a string',\n location='form')\n args = parser.parse_args()\n result = AsyncResult(args['task-id'], app=celery)\n return {\n 'task-id': args['task-id'],\n 'status': result.status,\n }\n api.add_resource(TaskStatus, name)\n",
"from flask_restful import Resource\nfrom flask_restful import reqparse\nfrom celery.result import AsyncResult\n\n\ndef add_resource_status(name, api, celery):\n\n\n class TaskStatus(Resource):\n \"\"\"Class to be added to api's resources.\"\"\"\n\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('task-id', type=str, help=\n 'Task id has to be a string', location='form')\n args = parser.parse_args()\n result = AsyncResult(args['task-id'], app=celery)\n return {'task-id': args['task-id'], 'status': result.status}\n api.add_resource(TaskStatus, name)\n",
"<import token>\n\n\ndef add_resource_status(name, api, celery):\n\n\n class TaskStatus(Resource):\n \"\"\"Class to be added to api's resources.\"\"\"\n\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('task-id', type=str, help=\n 'Task id has to be a string', location='form')\n args = parser.parse_args()\n result = AsyncResult(args['task-id'], app=celery)\n return {'task-id': args['task-id'], 'status': result.status}\n api.add_resource(TaskStatus, name)\n",
"<import token>\n<function token>\n"
] | false |
98,886 |
829ad0513708cb0a12e0a3efccde5754523a9c39
|
# -*- coding: utf-8 -*-
import os
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import tushare as ts
ts.set_token('29eaf3bcac23df4c6d025de157ab2d53beead3391fbe6e83b4ebcb6c')
pro = ts.pro_api()
from matplotlib.pylab import date2num
#mpl.rcParams['font.family'] = 'sans-serif'
#mpl.rcParams['font.sans-serif'] = 'SimHei' # Chinese
from mylab.stock.myfeature import getKdj
from mylab.stock.myfeature import getMacd
__all__ = ["getIndexBasic","getIndexDaily", "getIndexWeekly","getIndexMonthly","getIndex",
"getStockBasic","getStockDaily","getStockWeekly","getStockMonthly","getStock",
"getIndustryBasic","getIndustryDaily","getIndustryWeekly","getIndustryMonthly","getIndustry",
"resetIndex","readData",
"mergeDailyWeeklyMonthly","mergeWeeklyMonthly","mergeStockIndex",
"deleteSTKC","deleteNew",
"myMerge",
]
def myMerge(df1,df2,on = [], how = "left" ):
cols = [i for i in df2.columns.values if i in df1.columns.values] # in df1 and df2
cols = [i for i in cols if i not in on ] # not in on
df2 = df2.drop(cols, axis = 1 )
df = pd.merge( df1, df2, on = on , how = how )
return df
def deleteSTKC(pool_df):
pool_df["name1"] = [i[0] for i in pool_df["name"].values]
pool_df["code1"] = [i[0] for i in pool_df["ts_code"].values]
pool_df["code3"] = [i[0:3] for i in pool_df["ts_code"].values]
pool_df = pool_df.loc[pool_df["name1"] != "*", :]
pool_df = pool_df.loc[pool_df["name1"] != "S", :]
pool_df = pool_df.loc[pool_df["code1"] != "3", :]
pool_df = pool_df.loc[pool_df["code3"] != "688", :]
pool_df = pool_df.drop(["name1","code1","code3"], axis = 1)
pool_df = pool_df.reset_index(drop = True)
return pool_df
def deleteNew(pool_df, list_data = "20190101"):
pool_df = pool_df.loc[pool_df.list_date.values < list_data,:]
pool_df = pool_df.reset_index(drop = True)
return pool_df
def getStockBasic(LOCAL = True, noSTKC = True, list_data = "20190101"):
if LOCAL:
pool_df = pd.read_csv("./data/stock/stock_basic_info.csv")
pool_df["list_date"] = pool_df["list_date"].astype("str")
else:
fields='ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'
pool_df = pro.stock_basic(list_status='L', fields=fields)
if noSTKC:
pool_df = deleteSTKC(pool_df)
if list_data:
pool_df = deleteNew(pool_df, list_data )
return pool_df
def getIndexBasic(LOCAL = True, market = "SZSE" ):
if LOCAL:
pool_df = pd.read_csv("./data/index/index_basic_info_"+market+".csv")
else:
pool_df = pro.index_basic(market= market)
return pool_df
def getIndexDaily(stock_code , start_date = "20100101", end_date = "20200314", LOCAL = True, market = "SZSE" ):
dir_file = "./data/index/"+market+"/daily/"
if LOCAL:
daily_df = readData(dir_file, stock_code, start_date , end_date )
else:
daily_df = pro.index_daily(ts_code = stock_code,start_date = start_date, end_date = end_date )
daily_df = resetIndex(daily_df)
return daily_df
def getIndexWeekly(stock_code, start_date = "20100101", end_date = "20200314", LOCAL = True, market = "SZSE" ):
dir_file = "./data/index/"+market+"/weekly/"
if LOCAL:
daily_df = readData(dir_file, stock_code, start_date , end_date )
else:
daily_df = pro.index_weekly(ts_code = stock_code,start_date = start_date, end_date = end_date )
daily_df = resetIndex(daily_df)
return daily_df
def getIndexMonthly(stock_code, start_date = "20100101", end_date = "20200314", LOCAL = True, market = "SZSE" ):
dir_file = "./data/index/"+market+"/monthly/"
if LOCAL:
daily_df = readData(dir_file, stock_code, start_date , end_date )
else:
daily_df = pro.index_monthly(ts_code = stock_code,start_date = start_date, end_date = end_date )
daily_df = resetIndex(daily_df)
return daily_df
def getStockDaily(stock_code, start_date = "20100101", end_date = "20200314", LOCAL = True ):
dir_file = "./data/stock/daily/"
if LOCAL:
daily_df = readData(dir_file, stock_code, start_date , end_date )
else:
daily_df = pro.daily(ts_code = stock_code,start_date = start_date, end_date = end_date )
daily_df = resetIndex(daily_df)
return daily_df
def getStockWeekly(stock_code, start_date = "20100101", end_date = "20200314", LOCAL = True ):
dir_file = "./data/stock/weekly/"
if LOCAL:
daily_df = readData(dir_file, stock_code, start_date , end_date )
else:
daily_df = pro.daily(ts_code = stock_code,start_date = start_date, end_date = end_date )
daily_df = resetIndex(daily_df)
return daily_df
def getStockMonthly(stock_code, start_date = "20100101", end_date = "20200314", LOCAL = True ):
dir_file = "./data/stock/monthly/"
if LOCAL:
daily_df = readData(dir_file, stock_code, start_date , end_date )
else:
daily_df = pro.daily(ts_code = stock_code,start_date = start_date, end_date = end_date )
daily_df = resetIndex(daily_df)
return daily_df
def getIndustryBasic( ):
pool_df = pd.read_csv("./data/industry/all_industry_basic_info.csv")
return pool_df
def getIndustryDaily(stock_code , start_date = "20100101", end_date = "20200314" ):
dir_file = "./data/industry/daily/"
daily_df = readData(dir_file, stock_code, start_date , end_date )
daily_df = resetIndex(daily_df)
return daily_df
def getIndustryWeekly(stock_code, start_date = "20100101", end_date = "20200314" ):
dir_file = "./data/industry/weekly/"
daily_df = readData(dir_file, stock_code, start_date , end_date )
daily_df = resetIndex(daily_df)
return daily_df
def getIndustryMonthly(stock_code, start_date = "20100101", end_date = "20200314" ):
dir_file = "./data/industry/monthly/"
daily_df = readData(dir_file, stock_code, start_date , end_date )
daily_df = resetIndex(daily_df)
return daily_df
def resetIndex(daily_df):
# reset ascending
daily_df["trade_date_stamp"] = daily_df["trade_date"].copy()
daily_df["trade_date_stamp"] = pd.to_datetime(daily_df["trade_date_stamp"]).map(date2num)
daily_df.sort_values(by="trade_date_stamp", ascending=True,inplace=True)
daily_df.reset_index(drop=True,inplace=True)
return daily_df
def readData(dir_file, stock_code, start_date = "20100101", end_date = "20200314" ):
for file_dir , _ , files in os.walk(dir_file):
for i,file_name in enumerate(files):
if file_name[:9] == stock_code:
daily_df = pd.read_csv(file_dir+file_name)
daily_df["trade_date"] = daily_df["trade_date"].astype("str")
daily_df = daily_df.loc[daily_df["trade_date"] >= start_date,:].reset_index(drop=True)
daily_df = daily_df.loc[daily_df["trade_date"] <= end_date,:].reset_index(drop=True)
break
return daily_df
def mergeDailyWeeklyMonthly(daily_df,weekly_df,monthly_df):
weekly_df.drop(["ts_code", "trade_date_stamp"],axis = 1, inplace = True)
cols = [i+'_weekly' for i in weekly_df.columns ]
weekly_df.columns = cols
weekly_df.rename(columns = {"trade_date_weekly":"trade_date"}, inplace = True)
all_df = pd.merge(daily_df, weekly_df, how= "left" ,on= "trade_date")
monthly_df.drop(["ts_code", "trade_date_stamp"],axis = 1, inplace = True)
cols = [i+'_monthly' for i in monthly_df.columns ]
monthly_df.columns = cols
monthly_df.rename(columns = {"trade_date_monthly":"trade_date"}, inplace = True)
all_df = pd.merge(all_df, monthly_df, how= "left" ,on= "trade_date")
all_df.fillna(method= "ffill", inplace=True)
return all_df
def mergeWeeklyMonthly(weekly_df,monthly_df):
cols = [i+'_weekly' for i in weekly_df.columns ]
weekly_df.columns = cols
col_dic = {"trade_date_weekly":"trade_date","ts_code_weekly":"ts_code","trade_date_stamp_weekly":"trade_date_stamp"}
weekly_df.rename(columns = col_dic, inplace = True)
monthly_df.drop(["ts_code", "trade_date_stamp"],axis = 1, inplace = True)
cols = [i+'_monthly' for i in monthly_df.columns ]
monthly_df.columns = cols
monthly_df.rename(columns = {"trade_date_monthly":"trade_date"}, inplace = True)
all_df = pd.merge(weekly_df, monthly_df, how= "outer" ,on= "trade_date")
all_df.fillna(method= "ffill", inplace=True)
return all_df
def mergeStockIndex(stock_df, df):
index_df = df.copy(deep = True)
index_df.drop(["ts_code", "trade_date_stamp"],axis = 1, inplace = True)
cols = [i+'_index' for i in index_df.columns.values ]
index_df.columns = cols
index_df.rename(columns = {"trade_date_index":"trade_date"}, inplace = True)
all_df = pd.merge(left = stock_df, right = index_df, how= "left" ,on= "trade_date")
all_df.fillna(method= "ffill", inplace=True)
return all_df
def getStock(stock_code,start_date, end_date , LOCAL = True):
daily_df = getStockDaily(stock_code,start_date, end_date , LOCAL = LOCAL)
weekly_df = getStockWeekly(stock_code,start_date , end_date , LOCAL = LOCAL)
monthly_df = getStockMonthly(stock_code,start_date , end_date , LOCAL = LOCAL)
# KDJ and MACD
daily_df = getKdj(daily_df)
daily_df = getMacd(daily_df)
weekly_df = getKdj(weekly_df)
weekly_df = getMacd(weekly_df)
monthly_df = getKdj(monthly_df)
monthly_df = getMacd(monthly_df)
# merge
all_df = mergeDailyWeeklyMonthly(daily_df,weekly_df,monthly_df)
return all_df
def getIndex(stock_code,start_date, end_date , LOCAL = True, merge_daily = True):
if merge_daily:
daily_df = getIndexDaily(stock_code,start_date, end_date , LOCAL = True)
daily_df = getKdj(daily_df)
daily_df = getMacd(daily_df)
weekly_df = getIndexWeekly(stock_code,start_date , end_date , LOCAL = True)
monthly_df = getIndexMonthly(stock_code,start_date , end_date , LOCAL = True)
# KDJ
weekly_df = getKdj(weekly_df)
weekly_df = getMacd(weekly_df)
monthly_df = getKdj(monthly_df)
monthly_df = getMacd(monthly_df)
# merge
if merge_daily:
all_df = mergeDailyWeeklyMonthly(daily_df,weekly_df,monthly_df)
else:
all_df = mergeWeeklyMonthly(weekly_df,monthly_df)
return all_df
def getIndustry(stock_code,start_date = "20100101", end_date = "20200314" , LOCAL = True, merge_daily = True):
if merge_daily:
daily_df = getIndustryDaily(stock_code,start_date, end_date )
daily_df = getKdj(daily_df)
daily_df = getMacd(daily_df)
weekly_df = getIndustryWeekly(stock_code,start_date , end_date )
monthly_df = getIndustryMonthly(stock_code,start_date , end_date )
# KDJ and MACD
weekly_df = getKdj(weekly_df)
weekly_df = getMacd(weekly_df)
monthly_df = getKdj(monthly_df)
monthly_df = getMacd(monthly_df)
# merge
if merge_daily:
all_df = mergeDailyWeeklyMonthly(daily_df,weekly_df,monthly_df)
else:
all_df = mergeWeeklyMonthly(weekly_df,monthly_df)
return all_df
|
[
"# -*- coding: utf-8 -*-\nimport os\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nimport pandas as pd\nimport numpy as np\nimport tushare as ts\nts.set_token('29eaf3bcac23df4c6d025de157ab2d53beead3391fbe6e83b4ebcb6c')\npro = ts.pro_api()\n\nfrom matplotlib.pylab import date2num\n#mpl.rcParams['font.family'] = 'sans-serif'\n#mpl.rcParams['font.sans-serif'] = 'SimHei' # Chinese \n\nfrom mylab.stock.myfeature import getKdj\nfrom mylab.stock.myfeature import getMacd\n\n\n__all__ = [\"getIndexBasic\",\"getIndexDaily\", \"getIndexWeekly\",\"getIndexMonthly\",\"getIndex\",\n \"getStockBasic\",\"getStockDaily\",\"getStockWeekly\",\"getStockMonthly\",\"getStock\",\n \"getIndustryBasic\",\"getIndustryDaily\",\"getIndustryWeekly\",\"getIndustryMonthly\",\"getIndustry\",\n \"resetIndex\",\"readData\",\n \"mergeDailyWeeklyMonthly\",\"mergeWeeklyMonthly\",\"mergeStockIndex\",\n \"deleteSTKC\",\"deleteNew\",\n \"myMerge\",\n ]\n\ndef myMerge(df1,df2,on = [], how = \"left\" ):\n cols = [i for i in df2.columns.values if i in df1.columns.values] # in df1 and df2\n cols = [i for i in cols if i not in on ] # not in on\n df2 = df2.drop(cols, axis = 1 )\n df = pd.merge( df1, df2, on = on , how = how ) \n return df\n\ndef deleteSTKC(pool_df):\n pool_df[\"name1\"] = [i[0] for i in pool_df[\"name\"].values]\n pool_df[\"code1\"] = [i[0] for i in pool_df[\"ts_code\"].values]\n pool_df[\"code3\"] = [i[0:3] for i in pool_df[\"ts_code\"].values]\n pool_df = pool_df.loc[pool_df[\"name1\"] != \"*\", :]\n pool_df = pool_df.loc[pool_df[\"name1\"] != \"S\", :]\n pool_df = pool_df.loc[pool_df[\"code1\"] != \"3\", :]\n pool_df = pool_df.loc[pool_df[\"code3\"] != \"688\", :]\n pool_df = pool_df.drop([\"name1\",\"code1\",\"code3\"], axis = 1)\n pool_df = pool_df.reset_index(drop = True)\n return pool_df\n\ndef deleteNew(pool_df, list_data = \"20190101\"):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data,:]\n pool_df = pool_df.reset_index(drop = True)\n return pool_df \n\ndef getStockBasic(LOCAL = True, noSTKC = True, list_data = \"20190101\"):\n if LOCAL:\n pool_df = pd.read_csv(\"./data/stock/stock_basic_info.csv\")\n pool_df[\"list_date\"] = pool_df[\"list_date\"].astype(\"str\")\n else:\n fields='ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'\n pool_df = pro.stock_basic(list_status='L', fields=fields)\n if noSTKC:\n pool_df = deleteSTKC(pool_df)\n if list_data:\n pool_df = deleteNew(pool_df, list_data )\n return pool_df\n\ndef getIndexBasic(LOCAL = True, market = \"SZSE\" ):\n if LOCAL:\n pool_df = pd.read_csv(\"./data/index/index_basic_info_\"+market+\".csv\")\n else:\n pool_df = pro.index_basic(market= market)\n return pool_df\n\ndef getIndexDaily(stock_code , start_date = \"20100101\", end_date = \"20200314\", LOCAL = True, market = \"SZSE\" ):\n dir_file = \"./data/index/\"+market+\"/daily/\"\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date , end_date )\n else:\n daily_df = pro.index_daily(ts_code = stock_code,start_date = start_date, end_date = end_date )\n daily_df = resetIndex(daily_df)\n return daily_df\n\ndef getIndexWeekly(stock_code, start_date = \"20100101\", end_date = \"20200314\", LOCAL = True, market = \"SZSE\" ):\n dir_file = \"./data/index/\"+market+\"/weekly/\"\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date , end_date )\n else:\n daily_df = pro.index_weekly(ts_code = stock_code,start_date = start_date, end_date = end_date )\n daily_df = resetIndex(daily_df)\n return daily_df\n\ndef getIndexMonthly(stock_code, start_date = \"20100101\", end_date = \"20200314\", LOCAL = True, market = \"SZSE\" ):\n dir_file = \"./data/index/\"+market+\"/monthly/\"\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date , end_date )\n else:\n daily_df = pro.index_monthly(ts_code = stock_code,start_date = start_date, end_date = end_date )\n daily_df = resetIndex(daily_df)\n return daily_df\n\ndef getStockDaily(stock_code, start_date = \"20100101\", end_date = \"20200314\", LOCAL = True ):\n dir_file = \"./data/stock/daily/\"\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date , end_date )\n else:\n daily_df = pro.daily(ts_code = stock_code,start_date = start_date, end_date = end_date )\n daily_df = resetIndex(daily_df)\n return daily_df\n\ndef getStockWeekly(stock_code, start_date = \"20100101\", end_date = \"20200314\", LOCAL = True ):\n dir_file = \"./data/stock/weekly/\"\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date , end_date )\n else:\n daily_df = pro.daily(ts_code = stock_code,start_date = start_date, end_date = end_date )\n daily_df = resetIndex(daily_df)\n return daily_df\n\ndef getStockMonthly(stock_code, start_date = \"20100101\", end_date = \"20200314\", LOCAL = True ):\n dir_file = \"./data/stock/monthly/\"\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date , end_date )\n else:\n daily_df = pro.daily(ts_code = stock_code,start_date = start_date, end_date = end_date )\n daily_df = resetIndex(daily_df)\n return daily_df\n\ndef getIndustryBasic( ):\n pool_df = pd.read_csv(\"./data/industry/all_industry_basic_info.csv\")\n return pool_df\n\ndef getIndustryDaily(stock_code , start_date = \"20100101\", end_date = \"20200314\" ):\n dir_file = \"./data/industry/daily/\"\n daily_df = readData(dir_file, stock_code, start_date , end_date )\n daily_df = resetIndex(daily_df)\n return daily_df\n\ndef getIndustryWeekly(stock_code, start_date = \"20100101\", end_date = \"20200314\" ):\n dir_file = \"./data/industry/weekly/\"\n daily_df = readData(dir_file, stock_code, start_date , end_date )\n daily_df = resetIndex(daily_df)\n return daily_df\n\ndef getIndustryMonthly(stock_code, start_date = \"20100101\", end_date = \"20200314\" ):\n dir_file = \"./data/industry/monthly/\"\n daily_df = readData(dir_file, stock_code, start_date , end_date )\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n # reset ascending\n daily_df[\"trade_date_stamp\"] = daily_df[\"trade_date\"].copy()\n daily_df[\"trade_date_stamp\"] = pd.to_datetime(daily_df[\"trade_date_stamp\"]).map(date2num)\n daily_df.sort_values(by=\"trade_date_stamp\", ascending=True,inplace=True)\n daily_df.reset_index(drop=True,inplace=True)\n return daily_df\n\ndef readData(dir_file, stock_code, start_date = \"20100101\", end_date = \"20200314\" ):\n for file_dir , _ , files in os.walk(dir_file):\n for i,file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir+file_name)\n daily_df[\"trade_date\"] = daily_df[\"trade_date\"].astype(\"str\")\n daily_df = daily_df.loc[daily_df[\"trade_date\"] >= start_date,:].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df[\"trade_date\"] <= end_date,:].reset_index(drop=True)\n break\n return daily_df\n\ndef mergeDailyWeeklyMonthly(daily_df,weekly_df,monthly_df):\n weekly_df.drop([\"ts_code\", \"trade_date_stamp\"],axis = 1, inplace = True)\n cols = [i+'_weekly' for i in weekly_df.columns ]\n weekly_df.columns = cols\n weekly_df.rename(columns = {\"trade_date_weekly\":\"trade_date\"}, inplace = True)\n all_df = pd.merge(daily_df, weekly_df, how= \"left\" ,on= \"trade_date\")\n monthly_df.drop([\"ts_code\", \"trade_date_stamp\"],axis = 1, inplace = True)\n cols = [i+'_monthly' for i in monthly_df.columns ]\n monthly_df.columns = cols\n monthly_df.rename(columns = {\"trade_date_monthly\":\"trade_date\"}, inplace = True)\n all_df = pd.merge(all_df, monthly_df, how= \"left\" ,on= \"trade_date\")\n \n all_df.fillna(method= \"ffill\", inplace=True)\n return all_df\n\ndef mergeWeeklyMonthly(weekly_df,monthly_df):\n cols = [i+'_weekly' for i in weekly_df.columns ]\n weekly_df.columns = cols\n col_dic = {\"trade_date_weekly\":\"trade_date\",\"ts_code_weekly\":\"ts_code\",\"trade_date_stamp_weekly\":\"trade_date_stamp\"}\n weekly_df.rename(columns = col_dic, inplace = True)\n\n monthly_df.drop([\"ts_code\", \"trade_date_stamp\"],axis = 1, inplace = True)\n cols = [i+'_monthly' for i in monthly_df.columns ]\n monthly_df.columns = cols\n monthly_df.rename(columns = {\"trade_date_monthly\":\"trade_date\"}, inplace = True)\n \n all_df = pd.merge(weekly_df, monthly_df, how= \"outer\" ,on= \"trade_date\")\n \n all_df.fillna(method= \"ffill\", inplace=True)\n return all_df\n\ndef mergeStockIndex(stock_df, df):\n index_df = df.copy(deep = True)\n index_df.drop([\"ts_code\", \"trade_date_stamp\"],axis = 1, inplace = True)\n cols = [i+'_index' for i in index_df.columns.values ]\n index_df.columns = cols\n index_df.rename(columns = {\"trade_date_index\":\"trade_date\"}, inplace = True)\n all_df = pd.merge(left = stock_df, right = index_df, how= \"left\" ,on= \"trade_date\")\n all_df.fillna(method= \"ffill\", inplace=True)\n return all_df\n\ndef getStock(stock_code,start_date, end_date , LOCAL = True):\n daily_df = getStockDaily(stock_code,start_date, end_date , LOCAL = LOCAL)\n weekly_df = getStockWeekly(stock_code,start_date , end_date , LOCAL = LOCAL)\n monthly_df = getStockMonthly(stock_code,start_date , end_date , LOCAL = LOCAL)\n # KDJ and MACD\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n # merge\n all_df = mergeDailyWeeklyMonthly(daily_df,weekly_df,monthly_df)\n return all_df\n\ndef getIndex(stock_code,start_date, end_date , LOCAL = True, merge_daily = True):\n if merge_daily:\n daily_df = getIndexDaily(stock_code,start_date, end_date , LOCAL = True)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndexWeekly(stock_code,start_date , end_date , LOCAL = True)\n monthly_df = getIndexMonthly(stock_code,start_date , end_date , LOCAL = True)\n # KDJ\n \n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n # merge\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df,weekly_df,monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df,monthly_df)\n \n return all_df\n\ndef getIndustry(stock_code,start_date = \"20100101\", end_date = \"20200314\" , LOCAL = True, merge_daily = True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code,start_date, end_date )\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code,start_date , end_date )\n monthly_df = getIndustryMonthly(stock_code,start_date , end_date )\n # KDJ and MACD\n \n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n # merge\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df,weekly_df,monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df,monthly_df)\n return all_df\n\n",
"import os\nimport warnings\nwarnings.filterwarnings('ignore')\nimport pandas as pd\nimport numpy as np\nimport tushare as ts\nts.set_token('29eaf3bcac23df4c6d025de157ab2d53beead3391fbe6e83b4ebcb6c')\npro = ts.pro_api()\nfrom matplotlib.pylab import date2num\nfrom mylab.stock.myfeature import getKdj\nfrom mylab.stock.myfeature import getMacd\n__all__ = ['getIndexBasic', 'getIndexDaily', 'getIndexWeekly',\n 'getIndexMonthly', 'getIndex', 'getStockBasic', 'getStockDaily',\n 'getStockWeekly', 'getStockMonthly', 'getStock', 'getIndustryBasic',\n 'getIndustryDaily', 'getIndustryWeekly', 'getIndustryMonthly',\n 'getIndustry', 'resetIndex', 'readData', 'mergeDailyWeeklyMonthly',\n 'mergeWeeklyMonthly', 'mergeStockIndex', 'deleteSTKC', 'deleteNew',\n 'myMerge']\n\n\ndef myMerge(df1, df2, on=[], how='left'):\n cols = [i for i in df2.columns.values if i in df1.columns.values]\n cols = [i for i in cols if i not in on]\n df2 = df2.drop(cols, axis=1)\n df = pd.merge(df1, df2, on=on, how=how)\n return df\n\n\ndef deleteSTKC(pool_df):\n pool_df['name1'] = [i[0] for i in pool_df['name'].values]\n pool_df['code1'] = [i[0] for i in pool_df['ts_code'].values]\n pool_df['code3'] = [i[0:3] for i in pool_df['ts_code'].values]\n pool_df = pool_df.loc[pool_df['name1'] != '*', :]\n pool_df = pool_df.loc[pool_df['name1'] != 'S', :]\n pool_df = pool_df.loc[pool_df['code1'] != '3', :]\n pool_df = pool_df.loc[pool_df['code3'] != '688', :]\n pool_df = pool_df.drop(['name1', 'code1', 'code3'], axis=1)\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef getStockBasic(LOCAL=True, noSTKC=True, list_data='20190101'):\n if LOCAL:\n pool_df = pd.read_csv('./data/stock/stock_basic_info.csv')\n pool_df['list_date'] = pool_df['list_date'].astype('str')\n else:\n fields = (\n 'ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'\n )\n pool_df = pro.stock_basic(list_status='L', fields=fields)\n if noSTKC:\n pool_df = deleteSTKC(pool_df)\n if list_data:\n pool_df = deleteNew(pool_df, list_data)\n return pool_df\n\n\ndef getIndexBasic(LOCAL=True, market='SZSE'):\n if LOCAL:\n pool_df = pd.read_csv('./data/index/index_basic_info_' + market +\n '.csv')\n else:\n pool_df = pro.index_basic(market=market)\n return pool_df\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryBasic():\n pool_df = pd.read_csv('./data/industry/all_industry_basic_info.csv')\n return pool_df\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryWeekly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/weekly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\ndef mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df):\n weekly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_weekly') for i in weekly_df.columns]\n weekly_df.columns = cols\n weekly_df.rename(columns={'trade_date_weekly': 'trade_date'}, inplace=True)\n all_df = pd.merge(daily_df, weekly_df, how='left', on='trade_date')\n monthly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_monthly') for i in monthly_df.columns]\n monthly_df.columns = cols\n monthly_df.rename(columns={'trade_date_monthly': 'trade_date'}, inplace\n =True)\n all_df = pd.merge(all_df, monthly_df, how='left', on='trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef mergeWeeklyMonthly(weekly_df, monthly_df):\n cols = [(i + '_weekly') for i in weekly_df.columns]\n weekly_df.columns = cols\n col_dic = {'trade_date_weekly': 'trade_date', 'ts_code_weekly':\n 'ts_code', 'trade_date_stamp_weekly': 'trade_date_stamp'}\n weekly_df.rename(columns=col_dic, inplace=True)\n monthly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_monthly') for i in monthly_df.columns]\n monthly_df.columns = cols\n monthly_df.rename(columns={'trade_date_monthly': 'trade_date'}, inplace\n =True)\n all_df = pd.merge(weekly_df, monthly_df, how='outer', on='trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef mergeStockIndex(stock_df, df):\n index_df = df.copy(deep=True)\n index_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_index') for i in index_df.columns.values]\n index_df.columns = cols\n index_df.rename(columns={'trade_date_index': 'trade_date'}, inplace=True)\n all_df = pd.merge(left=stock_df, right=index_df, how='left', on=\n 'trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef getStock(stock_code, start_date, end_date, LOCAL=True):\n daily_df = getStockDaily(stock_code, start_date, end_date, LOCAL=LOCAL)\n weekly_df = getStockWeekly(stock_code, start_date, end_date, LOCAL=LOCAL)\n monthly_df = getStockMonthly(stock_code, start_date, end_date, LOCAL=LOCAL)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n return all_df\n\n\ndef getIndex(stock_code, start_date, end_date, LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndexDaily(stock_code, start_date, end_date, LOCAL=True)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndexWeekly(stock_code, start_date, end_date, LOCAL=True)\n monthly_df = getIndexMonthly(stock_code, start_date, end_date, LOCAL=True)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\nwarnings.filterwarnings('ignore')\n<import token>\nts.set_token('29eaf3bcac23df4c6d025de157ab2d53beead3391fbe6e83b4ebcb6c')\npro = ts.pro_api()\n<import token>\n__all__ = ['getIndexBasic', 'getIndexDaily', 'getIndexWeekly',\n 'getIndexMonthly', 'getIndex', 'getStockBasic', 'getStockDaily',\n 'getStockWeekly', 'getStockMonthly', 'getStock', 'getIndustryBasic',\n 'getIndustryDaily', 'getIndustryWeekly', 'getIndustryMonthly',\n 'getIndustry', 'resetIndex', 'readData', 'mergeDailyWeeklyMonthly',\n 'mergeWeeklyMonthly', 'mergeStockIndex', 'deleteSTKC', 'deleteNew',\n 'myMerge']\n\n\ndef myMerge(df1, df2, on=[], how='left'):\n cols = [i for i in df2.columns.values if i in df1.columns.values]\n cols = [i for i in cols if i not in on]\n df2 = df2.drop(cols, axis=1)\n df = pd.merge(df1, df2, on=on, how=how)\n return df\n\n\ndef deleteSTKC(pool_df):\n pool_df['name1'] = [i[0] for i in pool_df['name'].values]\n pool_df['code1'] = [i[0] for i in pool_df['ts_code'].values]\n pool_df['code3'] = [i[0:3] for i in pool_df['ts_code'].values]\n pool_df = pool_df.loc[pool_df['name1'] != '*', :]\n pool_df = pool_df.loc[pool_df['name1'] != 'S', :]\n pool_df = pool_df.loc[pool_df['code1'] != '3', :]\n pool_df = pool_df.loc[pool_df['code3'] != '688', :]\n pool_df = pool_df.drop(['name1', 'code1', 'code3'], axis=1)\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef getStockBasic(LOCAL=True, noSTKC=True, list_data='20190101'):\n if LOCAL:\n pool_df = pd.read_csv('./data/stock/stock_basic_info.csv')\n pool_df['list_date'] = pool_df['list_date'].astype('str')\n else:\n fields = (\n 'ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'\n )\n pool_df = pro.stock_basic(list_status='L', fields=fields)\n if noSTKC:\n pool_df = deleteSTKC(pool_df)\n if list_data:\n pool_df = deleteNew(pool_df, list_data)\n return pool_df\n\n\ndef getIndexBasic(LOCAL=True, market='SZSE'):\n if LOCAL:\n pool_df = pd.read_csv('./data/index/index_basic_info_' + market +\n '.csv')\n else:\n pool_df = pro.index_basic(market=market)\n return pool_df\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryBasic():\n pool_df = pd.read_csv('./data/industry/all_industry_basic_info.csv')\n return pool_df\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryWeekly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/weekly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\ndef mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df):\n weekly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_weekly') for i in weekly_df.columns]\n weekly_df.columns = cols\n weekly_df.rename(columns={'trade_date_weekly': 'trade_date'}, inplace=True)\n all_df = pd.merge(daily_df, weekly_df, how='left', on='trade_date')\n monthly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_monthly') for i in monthly_df.columns]\n monthly_df.columns = cols\n monthly_df.rename(columns={'trade_date_monthly': 'trade_date'}, inplace\n =True)\n all_df = pd.merge(all_df, monthly_df, how='left', on='trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef mergeWeeklyMonthly(weekly_df, monthly_df):\n cols = [(i + '_weekly') for i in weekly_df.columns]\n weekly_df.columns = cols\n col_dic = {'trade_date_weekly': 'trade_date', 'ts_code_weekly':\n 'ts_code', 'trade_date_stamp_weekly': 'trade_date_stamp'}\n weekly_df.rename(columns=col_dic, inplace=True)\n monthly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_monthly') for i in monthly_df.columns]\n monthly_df.columns = cols\n monthly_df.rename(columns={'trade_date_monthly': 'trade_date'}, inplace\n =True)\n all_df = pd.merge(weekly_df, monthly_df, how='outer', on='trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef mergeStockIndex(stock_df, df):\n index_df = df.copy(deep=True)\n index_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_index') for i in index_df.columns.values]\n index_df.columns = cols\n index_df.rename(columns={'trade_date_index': 'trade_date'}, inplace=True)\n all_df = pd.merge(left=stock_df, right=index_df, how='left', on=\n 'trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef getStock(stock_code, start_date, end_date, LOCAL=True):\n daily_df = getStockDaily(stock_code, start_date, end_date, LOCAL=LOCAL)\n weekly_df = getStockWeekly(stock_code, start_date, end_date, LOCAL=LOCAL)\n monthly_df = getStockMonthly(stock_code, start_date, end_date, LOCAL=LOCAL)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n return all_df\n\n\ndef getIndex(stock_code, start_date, end_date, LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndexDaily(stock_code, start_date, end_date, LOCAL=True)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndexWeekly(stock_code, start_date, end_date, LOCAL=True)\n monthly_df = getIndexMonthly(stock_code, start_date, end_date, LOCAL=True)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\nwarnings.filterwarnings('ignore')\n<import token>\nts.set_token('29eaf3bcac23df4c6d025de157ab2d53beead3391fbe6e83b4ebcb6c')\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef myMerge(df1, df2, on=[], how='left'):\n cols = [i for i in df2.columns.values if i in df1.columns.values]\n cols = [i for i in cols if i not in on]\n df2 = df2.drop(cols, axis=1)\n df = pd.merge(df1, df2, on=on, how=how)\n return df\n\n\ndef deleteSTKC(pool_df):\n pool_df['name1'] = [i[0] for i in pool_df['name'].values]\n pool_df['code1'] = [i[0] for i in pool_df['ts_code'].values]\n pool_df['code3'] = [i[0:3] for i in pool_df['ts_code'].values]\n pool_df = pool_df.loc[pool_df['name1'] != '*', :]\n pool_df = pool_df.loc[pool_df['name1'] != 'S', :]\n pool_df = pool_df.loc[pool_df['code1'] != '3', :]\n pool_df = pool_df.loc[pool_df['code3'] != '688', :]\n pool_df = pool_df.drop(['name1', 'code1', 'code3'], axis=1)\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef getStockBasic(LOCAL=True, noSTKC=True, list_data='20190101'):\n if LOCAL:\n pool_df = pd.read_csv('./data/stock/stock_basic_info.csv')\n pool_df['list_date'] = pool_df['list_date'].astype('str')\n else:\n fields = (\n 'ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'\n )\n pool_df = pro.stock_basic(list_status='L', fields=fields)\n if noSTKC:\n pool_df = deleteSTKC(pool_df)\n if list_data:\n pool_df = deleteNew(pool_df, list_data)\n return pool_df\n\n\ndef getIndexBasic(LOCAL=True, market='SZSE'):\n if LOCAL:\n pool_df = pd.read_csv('./data/index/index_basic_info_' + market +\n '.csv')\n else:\n pool_df = pro.index_basic(market=market)\n return pool_df\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryBasic():\n pool_df = pd.read_csv('./data/industry/all_industry_basic_info.csv')\n return pool_df\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryWeekly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/weekly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\ndef mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df):\n weekly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_weekly') for i in weekly_df.columns]\n weekly_df.columns = cols\n weekly_df.rename(columns={'trade_date_weekly': 'trade_date'}, inplace=True)\n all_df = pd.merge(daily_df, weekly_df, how='left', on='trade_date')\n monthly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_monthly') for i in monthly_df.columns]\n monthly_df.columns = cols\n monthly_df.rename(columns={'trade_date_monthly': 'trade_date'}, inplace\n =True)\n all_df = pd.merge(all_df, monthly_df, how='left', on='trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef mergeWeeklyMonthly(weekly_df, monthly_df):\n cols = [(i + '_weekly') for i in weekly_df.columns]\n weekly_df.columns = cols\n col_dic = {'trade_date_weekly': 'trade_date', 'ts_code_weekly':\n 'ts_code', 'trade_date_stamp_weekly': 'trade_date_stamp'}\n weekly_df.rename(columns=col_dic, inplace=True)\n monthly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_monthly') for i in monthly_df.columns]\n monthly_df.columns = cols\n monthly_df.rename(columns={'trade_date_monthly': 'trade_date'}, inplace\n =True)\n all_df = pd.merge(weekly_df, monthly_df, how='outer', on='trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef mergeStockIndex(stock_df, df):\n index_df = df.copy(deep=True)\n index_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_index') for i in index_df.columns.values]\n index_df.columns = cols\n index_df.rename(columns={'trade_date_index': 'trade_date'}, inplace=True)\n all_df = pd.merge(left=stock_df, right=index_df, how='left', on=\n 'trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef getStock(stock_code, start_date, end_date, LOCAL=True):\n daily_df = getStockDaily(stock_code, start_date, end_date, LOCAL=LOCAL)\n weekly_df = getStockWeekly(stock_code, start_date, end_date, LOCAL=LOCAL)\n monthly_df = getStockMonthly(stock_code, start_date, end_date, LOCAL=LOCAL)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n return all_df\n\n\ndef getIndex(stock_code, start_date, end_date, LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndexDaily(stock_code, start_date, end_date, LOCAL=True)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndexWeekly(stock_code, start_date, end_date, LOCAL=True)\n monthly_df = getIndexMonthly(stock_code, start_date, end_date, LOCAL=True)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef myMerge(df1, df2, on=[], how='left'):\n cols = [i for i in df2.columns.values if i in df1.columns.values]\n cols = [i for i in cols if i not in on]\n df2 = df2.drop(cols, axis=1)\n df = pd.merge(df1, df2, on=on, how=how)\n return df\n\n\ndef deleteSTKC(pool_df):\n pool_df['name1'] = [i[0] for i in pool_df['name'].values]\n pool_df['code1'] = [i[0] for i in pool_df['ts_code'].values]\n pool_df['code3'] = [i[0:3] for i in pool_df['ts_code'].values]\n pool_df = pool_df.loc[pool_df['name1'] != '*', :]\n pool_df = pool_df.loc[pool_df['name1'] != 'S', :]\n pool_df = pool_df.loc[pool_df['code1'] != '3', :]\n pool_df = pool_df.loc[pool_df['code3'] != '688', :]\n pool_df = pool_df.drop(['name1', 'code1', 'code3'], axis=1)\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef getStockBasic(LOCAL=True, noSTKC=True, list_data='20190101'):\n if LOCAL:\n pool_df = pd.read_csv('./data/stock/stock_basic_info.csv')\n pool_df['list_date'] = pool_df['list_date'].astype('str')\n else:\n fields = (\n 'ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'\n )\n pool_df = pro.stock_basic(list_status='L', fields=fields)\n if noSTKC:\n pool_df = deleteSTKC(pool_df)\n if list_data:\n pool_df = deleteNew(pool_df, list_data)\n return pool_df\n\n\ndef getIndexBasic(LOCAL=True, market='SZSE'):\n if LOCAL:\n pool_df = pd.read_csv('./data/index/index_basic_info_' + market +\n '.csv')\n else:\n pool_df = pro.index_basic(market=market)\n return pool_df\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryBasic():\n pool_df = pd.read_csv('./data/industry/all_industry_basic_info.csv')\n return pool_df\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryWeekly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/weekly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\ndef mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df):\n weekly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_weekly') for i in weekly_df.columns]\n weekly_df.columns = cols\n weekly_df.rename(columns={'trade_date_weekly': 'trade_date'}, inplace=True)\n all_df = pd.merge(daily_df, weekly_df, how='left', on='trade_date')\n monthly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_monthly') for i in monthly_df.columns]\n monthly_df.columns = cols\n monthly_df.rename(columns={'trade_date_monthly': 'trade_date'}, inplace\n =True)\n all_df = pd.merge(all_df, monthly_df, how='left', on='trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef mergeWeeklyMonthly(weekly_df, monthly_df):\n cols = [(i + '_weekly') for i in weekly_df.columns]\n weekly_df.columns = cols\n col_dic = {'trade_date_weekly': 'trade_date', 'ts_code_weekly':\n 'ts_code', 'trade_date_stamp_weekly': 'trade_date_stamp'}\n weekly_df.rename(columns=col_dic, inplace=True)\n monthly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_monthly') for i in monthly_df.columns]\n monthly_df.columns = cols\n monthly_df.rename(columns={'trade_date_monthly': 'trade_date'}, inplace\n =True)\n all_df = pd.merge(weekly_df, monthly_df, how='outer', on='trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef mergeStockIndex(stock_df, df):\n index_df = df.copy(deep=True)\n index_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_index') for i in index_df.columns.values]\n index_df.columns = cols\n index_df.rename(columns={'trade_date_index': 'trade_date'}, inplace=True)\n all_df = pd.merge(left=stock_df, right=index_df, how='left', on=\n 'trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef getStock(stock_code, start_date, end_date, LOCAL=True):\n daily_df = getStockDaily(stock_code, start_date, end_date, LOCAL=LOCAL)\n weekly_df = getStockWeekly(stock_code, start_date, end_date, LOCAL=LOCAL)\n monthly_df = getStockMonthly(stock_code, start_date, end_date, LOCAL=LOCAL)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n return all_df\n\n\ndef getIndex(stock_code, start_date, end_date, LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndexDaily(stock_code, start_date, end_date, LOCAL=True)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndexWeekly(stock_code, start_date, end_date, LOCAL=True)\n monthly_df = getIndexMonthly(stock_code, start_date, end_date, LOCAL=True)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef myMerge(df1, df2, on=[], how='left'):\n cols = [i for i in df2.columns.values if i in df1.columns.values]\n cols = [i for i in cols if i not in on]\n df2 = df2.drop(cols, axis=1)\n df = pd.merge(df1, df2, on=on, how=how)\n return df\n\n\ndef deleteSTKC(pool_df):\n pool_df['name1'] = [i[0] for i in pool_df['name'].values]\n pool_df['code1'] = [i[0] for i in pool_df['ts_code'].values]\n pool_df['code3'] = [i[0:3] for i in pool_df['ts_code'].values]\n pool_df = pool_df.loc[pool_df['name1'] != '*', :]\n pool_df = pool_df.loc[pool_df['name1'] != 'S', :]\n pool_df = pool_df.loc[pool_df['code1'] != '3', :]\n pool_df = pool_df.loc[pool_df['code3'] != '688', :]\n pool_df = pool_df.drop(['name1', 'code1', 'code3'], axis=1)\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef getStockBasic(LOCAL=True, noSTKC=True, list_data='20190101'):\n if LOCAL:\n pool_df = pd.read_csv('./data/stock/stock_basic_info.csv')\n pool_df['list_date'] = pool_df['list_date'].astype('str')\n else:\n fields = (\n 'ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'\n )\n pool_df = pro.stock_basic(list_status='L', fields=fields)\n if noSTKC:\n pool_df = deleteSTKC(pool_df)\n if list_data:\n pool_df = deleteNew(pool_df, list_data)\n return pool_df\n\n\ndef getIndexBasic(LOCAL=True, market='SZSE'):\n if LOCAL:\n pool_df = pd.read_csv('./data/index/index_basic_info_' + market +\n '.csv')\n else:\n pool_df = pro.index_basic(market=market)\n return pool_df\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryBasic():\n pool_df = pd.read_csv('./data/industry/all_industry_basic_info.csv')\n return pool_df\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryWeekly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/weekly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n\n\ndef mergeWeeklyMonthly(weekly_df, monthly_df):\n cols = [(i + '_weekly') for i in weekly_df.columns]\n weekly_df.columns = cols\n col_dic = {'trade_date_weekly': 'trade_date', 'ts_code_weekly':\n 'ts_code', 'trade_date_stamp_weekly': 'trade_date_stamp'}\n weekly_df.rename(columns=col_dic, inplace=True)\n monthly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_monthly') for i in monthly_df.columns]\n monthly_df.columns = cols\n monthly_df.rename(columns={'trade_date_monthly': 'trade_date'}, inplace\n =True)\n all_df = pd.merge(weekly_df, monthly_df, how='outer', on='trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef mergeStockIndex(stock_df, df):\n index_df = df.copy(deep=True)\n index_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_index') for i in index_df.columns.values]\n index_df.columns = cols\n index_df.rename(columns={'trade_date_index': 'trade_date'}, inplace=True)\n all_df = pd.merge(left=stock_df, right=index_df, how='left', on=\n 'trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef getStock(stock_code, start_date, end_date, LOCAL=True):\n daily_df = getStockDaily(stock_code, start_date, end_date, LOCAL=LOCAL)\n weekly_df = getStockWeekly(stock_code, start_date, end_date, LOCAL=LOCAL)\n monthly_df = getStockMonthly(stock_code, start_date, end_date, LOCAL=LOCAL)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n return all_df\n\n\ndef getIndex(stock_code, start_date, end_date, LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndexDaily(stock_code, start_date, end_date, LOCAL=True)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndexWeekly(stock_code, start_date, end_date, LOCAL=True)\n monthly_df = getIndexMonthly(stock_code, start_date, end_date, LOCAL=True)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef deleteSTKC(pool_df):\n pool_df['name1'] = [i[0] for i in pool_df['name'].values]\n pool_df['code1'] = [i[0] for i in pool_df['ts_code'].values]\n pool_df['code3'] = [i[0:3] for i in pool_df['ts_code'].values]\n pool_df = pool_df.loc[pool_df['name1'] != '*', :]\n pool_df = pool_df.loc[pool_df['name1'] != 'S', :]\n pool_df = pool_df.loc[pool_df['code1'] != '3', :]\n pool_df = pool_df.loc[pool_df['code3'] != '688', :]\n pool_df = pool_df.drop(['name1', 'code1', 'code3'], axis=1)\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef getStockBasic(LOCAL=True, noSTKC=True, list_data='20190101'):\n if LOCAL:\n pool_df = pd.read_csv('./data/stock/stock_basic_info.csv')\n pool_df['list_date'] = pool_df['list_date'].astype('str')\n else:\n fields = (\n 'ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'\n )\n pool_df = pro.stock_basic(list_status='L', fields=fields)\n if noSTKC:\n pool_df = deleteSTKC(pool_df)\n if list_data:\n pool_df = deleteNew(pool_df, list_data)\n return pool_df\n\n\ndef getIndexBasic(LOCAL=True, market='SZSE'):\n if LOCAL:\n pool_df = pd.read_csv('./data/index/index_basic_info_' + market +\n '.csv')\n else:\n pool_df = pro.index_basic(market=market)\n return pool_df\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryBasic():\n pool_df = pd.read_csv('./data/industry/all_industry_basic_info.csv')\n return pool_df\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryWeekly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/weekly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n\n\ndef mergeWeeklyMonthly(weekly_df, monthly_df):\n cols = [(i + '_weekly') for i in weekly_df.columns]\n weekly_df.columns = cols\n col_dic = {'trade_date_weekly': 'trade_date', 'ts_code_weekly':\n 'ts_code', 'trade_date_stamp_weekly': 'trade_date_stamp'}\n weekly_df.rename(columns=col_dic, inplace=True)\n monthly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_monthly') for i in monthly_df.columns]\n monthly_df.columns = cols\n monthly_df.rename(columns={'trade_date_monthly': 'trade_date'}, inplace\n =True)\n all_df = pd.merge(weekly_df, monthly_df, how='outer', on='trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef mergeStockIndex(stock_df, df):\n index_df = df.copy(deep=True)\n index_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_index') for i in index_df.columns.values]\n index_df.columns = cols\n index_df.rename(columns={'trade_date_index': 'trade_date'}, inplace=True)\n all_df = pd.merge(left=stock_df, right=index_df, how='left', on=\n 'trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\ndef getStock(stock_code, start_date, end_date, LOCAL=True):\n daily_df = getStockDaily(stock_code, start_date, end_date, LOCAL=LOCAL)\n weekly_df = getStockWeekly(stock_code, start_date, end_date, LOCAL=LOCAL)\n monthly_df = getStockMonthly(stock_code, start_date, end_date, LOCAL=LOCAL)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n return all_df\n\n\ndef getIndex(stock_code, start_date, end_date, LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndexDaily(stock_code, start_date, end_date, LOCAL=True)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndexWeekly(stock_code, start_date, end_date, LOCAL=True)\n monthly_df = getIndexMonthly(stock_code, start_date, end_date, LOCAL=True)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef deleteSTKC(pool_df):\n pool_df['name1'] = [i[0] for i in pool_df['name'].values]\n pool_df['code1'] = [i[0] for i in pool_df['ts_code'].values]\n pool_df['code3'] = [i[0:3] for i in pool_df['ts_code'].values]\n pool_df = pool_df.loc[pool_df['name1'] != '*', :]\n pool_df = pool_df.loc[pool_df['name1'] != 'S', :]\n pool_df = pool_df.loc[pool_df['code1'] != '3', :]\n pool_df = pool_df.loc[pool_df['code3'] != '688', :]\n pool_df = pool_df.drop(['name1', 'code1', 'code3'], axis=1)\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef getStockBasic(LOCAL=True, noSTKC=True, list_data='20190101'):\n if LOCAL:\n pool_df = pd.read_csv('./data/stock/stock_basic_info.csv')\n pool_df['list_date'] = pool_df['list_date'].astype('str')\n else:\n fields = (\n 'ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'\n )\n pool_df = pro.stock_basic(list_status='L', fields=fields)\n if noSTKC:\n pool_df = deleteSTKC(pool_df)\n if list_data:\n pool_df = deleteNew(pool_df, list_data)\n return pool_df\n\n\ndef getIndexBasic(LOCAL=True, market='SZSE'):\n if LOCAL:\n pool_df = pd.read_csv('./data/index/index_basic_info_' + market +\n '.csv')\n else:\n pool_df = pro.index_basic(market=market)\n return pool_df\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryBasic():\n pool_df = pd.read_csv('./data/industry/all_industry_basic_info.csv')\n return pool_df\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryWeekly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/weekly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n\n\ndef mergeWeeklyMonthly(weekly_df, monthly_df):\n cols = [(i + '_weekly') for i in weekly_df.columns]\n weekly_df.columns = cols\n col_dic = {'trade_date_weekly': 'trade_date', 'ts_code_weekly':\n 'ts_code', 'trade_date_stamp_weekly': 'trade_date_stamp'}\n weekly_df.rename(columns=col_dic, inplace=True)\n monthly_df.drop(['ts_code', 'trade_date_stamp'], axis=1, inplace=True)\n cols = [(i + '_monthly') for i in monthly_df.columns]\n monthly_df.columns = cols\n monthly_df.rename(columns={'trade_date_monthly': 'trade_date'}, inplace\n =True)\n all_df = pd.merge(weekly_df, monthly_df, how='outer', on='trade_date')\n all_df.fillna(method='ffill', inplace=True)\n return all_df\n\n\n<function token>\n\n\ndef getStock(stock_code, start_date, end_date, LOCAL=True):\n daily_df = getStockDaily(stock_code, start_date, end_date, LOCAL=LOCAL)\n weekly_df = getStockWeekly(stock_code, start_date, end_date, LOCAL=LOCAL)\n monthly_df = getStockMonthly(stock_code, start_date, end_date, LOCAL=LOCAL)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n return all_df\n\n\ndef getIndex(stock_code, start_date, end_date, LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndexDaily(stock_code, start_date, end_date, LOCAL=True)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndexWeekly(stock_code, start_date, end_date, LOCAL=True)\n monthly_df = getIndexMonthly(stock_code, start_date, end_date, LOCAL=True)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef deleteSTKC(pool_df):\n pool_df['name1'] = [i[0] for i in pool_df['name'].values]\n pool_df['code1'] = [i[0] for i in pool_df['ts_code'].values]\n pool_df['code3'] = [i[0:3] for i in pool_df['ts_code'].values]\n pool_df = pool_df.loc[pool_df['name1'] != '*', :]\n pool_df = pool_df.loc[pool_df['name1'] != 'S', :]\n pool_df = pool_df.loc[pool_df['code1'] != '3', :]\n pool_df = pool_df.loc[pool_df['code3'] != '688', :]\n pool_df = pool_df.drop(['name1', 'code1', 'code3'], axis=1)\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef getStockBasic(LOCAL=True, noSTKC=True, list_data='20190101'):\n if LOCAL:\n pool_df = pd.read_csv('./data/stock/stock_basic_info.csv')\n pool_df['list_date'] = pool_df['list_date'].astype('str')\n else:\n fields = (\n 'ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'\n )\n pool_df = pro.stock_basic(list_status='L', fields=fields)\n if noSTKC:\n pool_df = deleteSTKC(pool_df)\n if list_data:\n pool_df = deleteNew(pool_df, list_data)\n return pool_df\n\n\ndef getIndexBasic(LOCAL=True, market='SZSE'):\n if LOCAL:\n pool_df = pd.read_csv('./data/index/index_basic_info_' + market +\n '.csv')\n else:\n pool_df = pro.index_basic(market=market)\n return pool_df\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryBasic():\n pool_df = pd.read_csv('./data/industry/all_industry_basic_info.csv')\n return pool_df\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryWeekly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/weekly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getStock(stock_code, start_date, end_date, LOCAL=True):\n daily_df = getStockDaily(stock_code, start_date, end_date, LOCAL=LOCAL)\n weekly_df = getStockWeekly(stock_code, start_date, end_date, LOCAL=LOCAL)\n monthly_df = getStockMonthly(stock_code, start_date, end_date, LOCAL=LOCAL)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n return all_df\n\n\ndef getIndex(stock_code, start_date, end_date, LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndexDaily(stock_code, start_date, end_date, LOCAL=True)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndexWeekly(stock_code, start_date, end_date, LOCAL=True)\n monthly_df = getIndexMonthly(stock_code, start_date, end_date, LOCAL=True)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef deleteSTKC(pool_df):\n pool_df['name1'] = [i[0] for i in pool_df['name'].values]\n pool_df['code1'] = [i[0] for i in pool_df['ts_code'].values]\n pool_df['code3'] = [i[0:3] for i in pool_df['ts_code'].values]\n pool_df = pool_df.loc[pool_df['name1'] != '*', :]\n pool_df = pool_df.loc[pool_df['name1'] != 'S', :]\n pool_df = pool_df.loc[pool_df['code1'] != '3', :]\n pool_df = pool_df.loc[pool_df['code3'] != '688', :]\n pool_df = pool_df.drop(['name1', 'code1', 'code3'], axis=1)\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef getStockBasic(LOCAL=True, noSTKC=True, list_data='20190101'):\n if LOCAL:\n pool_df = pd.read_csv('./data/stock/stock_basic_info.csv')\n pool_df['list_date'] = pool_df['list_date'].astype('str')\n else:\n fields = (\n 'ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'\n )\n pool_df = pro.stock_basic(list_status='L', fields=fields)\n if noSTKC:\n pool_df = deleteSTKC(pool_df)\n if list_data:\n pool_df = deleteNew(pool_df, list_data)\n return pool_df\n\n\n<function token>\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryBasic():\n pool_df = pd.read_csv('./data/industry/all_industry_basic_info.csv')\n return pool_df\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryWeekly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/weekly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getStock(stock_code, start_date, end_date, LOCAL=True):\n daily_df = getStockDaily(stock_code, start_date, end_date, LOCAL=LOCAL)\n weekly_df = getStockWeekly(stock_code, start_date, end_date, LOCAL=LOCAL)\n monthly_df = getStockMonthly(stock_code, start_date, end_date, LOCAL=LOCAL)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n return all_df\n\n\ndef getIndex(stock_code, start_date, end_date, LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndexDaily(stock_code, start_date, end_date, LOCAL=True)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndexWeekly(stock_code, start_date, end_date, LOCAL=True)\n monthly_df = getIndexMonthly(stock_code, start_date, end_date, LOCAL=True)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef deleteSTKC(pool_df):\n pool_df['name1'] = [i[0] for i in pool_df['name'].values]\n pool_df['code1'] = [i[0] for i in pool_df['ts_code'].values]\n pool_df['code3'] = [i[0:3] for i in pool_df['ts_code'].values]\n pool_df = pool_df.loc[pool_df['name1'] != '*', :]\n pool_df = pool_df.loc[pool_df['name1'] != 'S', :]\n pool_df = pool_df.loc[pool_df['code1'] != '3', :]\n pool_df = pool_df.loc[pool_df['code3'] != '688', :]\n pool_df = pool_df.drop(['name1', 'code1', 'code3'], axis=1)\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef getStockBasic(LOCAL=True, noSTKC=True, list_data='20190101'):\n if LOCAL:\n pool_df = pd.read_csv('./data/stock/stock_basic_info.csv')\n pool_df['list_date'] = pool_df['list_date'].astype('str')\n else:\n fields = (\n 'ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'\n )\n pool_df = pro.stock_basic(list_status='L', fields=fields)\n if noSTKC:\n pool_df = deleteSTKC(pool_df)\n if list_data:\n pool_df = deleteNew(pool_df, list_data)\n return pool_df\n\n\n<function token>\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryBasic():\n pool_df = pd.read_csv('./data/industry/all_industry_basic_info.csv')\n return pool_df\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getStock(stock_code, start_date, end_date, LOCAL=True):\n daily_df = getStockDaily(stock_code, start_date, end_date, LOCAL=LOCAL)\n weekly_df = getStockWeekly(stock_code, start_date, end_date, LOCAL=LOCAL)\n monthly_df = getStockMonthly(stock_code, start_date, end_date, LOCAL=LOCAL)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n return all_df\n\n\ndef getIndex(stock_code, start_date, end_date, LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndexDaily(stock_code, start_date, end_date, LOCAL=True)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndexWeekly(stock_code, start_date, end_date, LOCAL=True)\n monthly_df = getIndexMonthly(stock_code, start_date, end_date, LOCAL=True)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef deleteSTKC(pool_df):\n pool_df['name1'] = [i[0] for i in pool_df['name'].values]\n pool_df['code1'] = [i[0] for i in pool_df['ts_code'].values]\n pool_df['code3'] = [i[0:3] for i in pool_df['ts_code'].values]\n pool_df = pool_df.loc[pool_df['name1'] != '*', :]\n pool_df = pool_df.loc[pool_df['name1'] != 'S', :]\n pool_df = pool_df.loc[pool_df['code1'] != '3', :]\n pool_df = pool_df.loc[pool_df['code3'] != '688', :]\n pool_df = pool_df.drop(['name1', 'code1', 'code3'], axis=1)\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef getStockBasic(LOCAL=True, noSTKC=True, list_data='20190101'):\n if LOCAL:\n pool_df = pd.read_csv('./data/stock/stock_basic_info.csv')\n pool_df['list_date'] = pool_df['list_date'].astype('str')\n else:\n fields = (\n 'ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'\n )\n pool_df = pro.stock_basic(list_status='L', fields=fields)\n if noSTKC:\n pool_df = deleteSTKC(pool_df)\n if list_data:\n pool_df = deleteNew(pool_df, list_data)\n return pool_df\n\n\n<function token>\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryBasic():\n pool_df = pd.read_csv('./data/industry/all_industry_basic_info.csv')\n return pool_df\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getStock(stock_code, start_date, end_date, LOCAL=True):\n daily_df = getStockDaily(stock_code, start_date, end_date, LOCAL=LOCAL)\n weekly_df = getStockWeekly(stock_code, start_date, end_date, LOCAL=LOCAL)\n monthly_df = getStockMonthly(stock_code, start_date, end_date, LOCAL=LOCAL)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n return all_df\n\n\n<function token>\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef getStockBasic(LOCAL=True, noSTKC=True, list_data='20190101'):\n if LOCAL:\n pool_df = pd.read_csv('./data/stock/stock_basic_info.csv')\n pool_df['list_date'] = pool_df['list_date'].astype('str')\n else:\n fields = (\n 'ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'\n )\n pool_df = pro.stock_basic(list_status='L', fields=fields)\n if noSTKC:\n pool_df = deleteSTKC(pool_df)\n if list_data:\n pool_df = deleteNew(pool_df, list_data)\n return pool_df\n\n\n<function token>\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndustryBasic():\n pool_df = pd.read_csv('./data/industry/all_industry_basic_info.csv')\n return pool_df\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getStock(stock_code, start_date, end_date, LOCAL=True):\n daily_df = getStockDaily(stock_code, start_date, end_date, LOCAL=LOCAL)\n weekly_df = getStockWeekly(stock_code, start_date, end_date, LOCAL=LOCAL)\n monthly_df = getStockMonthly(stock_code, start_date, end_date, LOCAL=LOCAL)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n return all_df\n\n\n<function token>\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\ndef getStockBasic(LOCAL=True, noSTKC=True, list_data='20190101'):\n if LOCAL:\n pool_df = pd.read_csv('./data/stock/stock_basic_info.csv')\n pool_df['list_date'] = pool_df['list_date'].astype('str')\n else:\n fields = (\n 'ts_code,symbol,name,area,industry,list_date,market,list_status,delist_date,exchange'\n )\n pool_df = pro.stock_basic(list_status='L', fields=fields)\n if noSTKC:\n pool_df = deleteSTKC(pool_df)\n if list_data:\n pool_df = deleteNew(pool_df, list_data)\n return pool_df\n\n\n<function token>\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getStock(stock_code, start_date, end_date, LOCAL=True):\n daily_df = getStockDaily(stock_code, start_date, end_date, LOCAL=LOCAL)\n weekly_df = getStockWeekly(stock_code, start_date, end_date, LOCAL=LOCAL)\n monthly_df = getStockMonthly(stock_code, start_date, end_date, LOCAL=LOCAL)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n return all_df\n\n\n<function token>\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\n<function token>\n<function token>\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getStock(stock_code, start_date, end_date, LOCAL=True):\n daily_df = getStockDaily(stock_code, start_date, end_date, LOCAL=LOCAL)\n weekly_df = getStockWeekly(stock_code, start_date, end_date, LOCAL=LOCAL)\n monthly_df = getStockMonthly(stock_code, start_date, end_date, LOCAL=LOCAL)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n return all_df\n\n\n<function token>\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\n<function token>\n<function token>\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_monthly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\n<function token>\n<function token>\n\n\ndef getIndexDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_daily(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getStockDaily(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/daily/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n<function token>\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef getStockMonthly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/monthly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef deleteNew(pool_df, list_data='20190101'):\n pool_df = pool_df.loc[pool_df.list_date.values < list_data, :]\n pool_df = pool_df.reset_index(drop=True)\n return pool_df\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n<function token>\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n<function token>\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n<function token>\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n<function token>\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndustry(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, merge_daily=True):\n if merge_daily:\n daily_df = getIndustryDaily(stock_code, start_date, end_date)\n daily_df = getKdj(daily_df)\n daily_df = getMacd(daily_df)\n weekly_df = getIndustryWeekly(stock_code, start_date, end_date)\n monthly_df = getIndustryMonthly(stock_code, start_date, end_date)\n weekly_df = getKdj(weekly_df)\n weekly_df = getMacd(weekly_df)\n monthly_df = getKdj(monthly_df)\n monthly_df = getMacd(monthly_df)\n if merge_daily:\n all_df = mergeDailyWeeklyMonthly(daily_df, weekly_df, monthly_df)\n else:\n all_df = mergeWeeklyMonthly(weekly_df, monthly_df)\n return all_df\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n<function token>\n\n\ndef getStockWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True):\n dir_file = './data/stock/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.daily(ts_code=stock_code, start_date=start_date,\n end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n<function token>\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndustryDaily(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/daily/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\ndef readData(dir_file, stock_code, start_date='20100101', end_date='20200314'):\n for file_dir, _, files in os.walk(dir_file):\n for i, file_name in enumerate(files):\n if file_name[:9] == stock_code:\n daily_df = pd.read_csv(file_dir + file_name)\n daily_df['trade_date'] = daily_df['trade_date'].astype('str')\n daily_df = daily_df.loc[daily_df['trade_date'] >= start_date, :\n ].reset_index(drop=True)\n daily_df = daily_df.loc[daily_df['trade_date'] <= end_date, :\n ].reset_index(drop=True)\n break\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndustryMonthly(stock_code, start_date='20100101', end_date='20200314'):\n dir_file = './data/industry/monthly/'\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef getIndexWeekly(stock_code, start_date='20100101', end_date='20200314',\n LOCAL=True, market='SZSE'):\n dir_file = './data/index/' + market + '/weekly/'\n if LOCAL:\n daily_df = readData(dir_file, stock_code, start_date, end_date)\n else:\n daily_df = pro.index_weekly(ts_code=stock_code, start_date=\n start_date, end_date=end_date)\n daily_df = resetIndex(daily_df)\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef resetIndex(daily_df):\n daily_df['trade_date_stamp'] = daily_df['trade_date'].copy()\n daily_df['trade_date_stamp'] = pd.to_datetime(daily_df['trade_date_stamp']\n ).map(date2num)\n daily_df.sort_values(by='trade_date_stamp', ascending=True, inplace=True)\n daily_df.reset_index(drop=True, inplace=True)\n return daily_df\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
98,887 |
a8cb755ba2839b7ee118bcf2d0a0a71ed4dbb011
|
class Solution(object):
def wordBreak(self,s,words):
d = [False] * len(s)
for i in range(0,len(d)):
for w in words:
if w == s[i-len(w)+1:i+1] and (d[i-len(w)] or i-len(w)+1 == 0):
d[i] = True
return d[-1]
def wordBreakHelper(self,s,begin,wb,w_d):
for i in range(begin,len(s)):
wb += s[i]
if wb in w_d:
if i == len(s) - 1: return True
if(self.wordBreakHelper(s,i+1,'',w_d)): return True
return False
def word_Break(self, s, wordDict):
return self.wordBreakHelper(s,0,'',set(wordDict))
sol = Solution()
print sol.wordBreak('leetcode',['le','et','etco','de']);
|
[
"class Solution(object):\n\n def wordBreak(self,s,words):\n d = [False] * len(s)\n for i in range(0,len(d)):\n for w in words:\n if w == s[i-len(w)+1:i+1] and (d[i-len(w)] or i-len(w)+1 == 0):\n d[i] = True \n return d[-1]\n\n def wordBreakHelper(self,s,begin,wb,w_d):\n for i in range(begin,len(s)):\n wb += s[i]\n if wb in w_d:\n if i == len(s) - 1: return True\n if(self.wordBreakHelper(s,i+1,'',w_d)): return True\n return False \n \n def word_Break(self, s, wordDict):\n return self.wordBreakHelper(s,0,'',set(wordDict)) \n\nsol = Solution()\nprint sol.wordBreak('leetcode',['le','et','etco','de']);\n \n \n "
] | true |
98,888 |
f45b240698b41e00419987c6fce6b6974e9d21bb
|
import numpy as np
import cv2
import time
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
gpios = True
try: # Wenn Programm nicht auf einem Raspberry läuft, GPIOS nicht benutzen
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.OUT)
GPIO.setup(4, GPIO.OUT)
GPIO.setup(18, GPIO.IN, pull_up_down = GPIO.PUD_UP) # internen Pull-Up aktivieren
except ImportError:
gpios = False
print('WARNING - no GPIOS found')
##README#####################################################################################################################
# Email: Zeile 127
# Bildausschnitte: ab Zeile 164
# Iterationen für Erosion und Dilatation: Zeile 255, 257
# Hough-Detector: Zeile 193
# Blob-Detector Parameter: Zeile 77
# Blob-Detector Parameter: Zeile 272
##PARAMETERS#################################################################################################################
log_name = 'log_seite2' # Name der Log Datei (Zusammenfassung der Messreihe): Wird NICHT fortgesetzt
raw_numbers_name = 'raw_seite2' # Name der Datei, in der alle Würfe einzeln gespeichert werden: Wird fortgesetzt
email_header = 'dicer - seite2' # Emailbetreff
darknumbers = False # Dunkle Würfelaugen?
send_email = True # Email mit Messdaten versenden?
email_log_number = 6000 # Nach wie vielen Würfen soll jeweils eine Email geschrieben werden?
error_logging = True #Bild bei Fehler speichern?
measures = 18000 #Anzahl der Messungen: -1 für unendlich
#Uhrzeit, wenn automatisch beendet werden soll (funktionert, ist aber gerade deaktiviert: Zeile 311):
#endtime_hr = 22
#endtime_min = 45
cap = cv2.VideoCapture(0) # Bildquelle: (Zahl ändern, falls mehrere Kameras angeschlossen sind (auch interne Webcams))
###########################################################################################################################
print('Setting up...')
interrupted = False
dicer_ready = False
ret, frame = cap.read() # Test, ob Kamera funktionert
if ret is not True: #Wenn Kamera nicht geht, Dummy Image laden
dicer_ready = False
grey = cv2.imread('dummy_image.png', 0)
cv2.putText(grey, 'NO CAMERA', (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
pos_img = np.zeros(shape=[100, 100, 1], dtype=np.uint8)
cv2.imshow('Press any key to exit', grey)
print('Error - stopping')
cv2.waitKey() # Taste drücken, zum beenden
elif GPIO.input(18) == 0 and gpios == True: # Temperaturreais prüfen wenn RPi vorhanden
print('Temperature relay is offline, stopping')
else:
dicer_ready = True
global_steptime = 0.00015 # Abstand zwischen den Schritten
# blobdetektor konfigurieren
blob_params = cv2.SimpleBlobDetector_Params()
blob_params.filterByColor = True
blob_params.filterByArea = True
blob_params.minArea = 100
blob_params.filterByCircularity = True
blob_params.minCircularity = 0.7
blob_params.filterByInertia = False
blob_params.filterByConvexity = False
all_numbers = [0] * 9 # [one, two, three, four, five, six, errorcnt, rollnumber, std_dev
def interr(channel):
global gpios
global dicer_ready
global interrupted
gpios = False
dicer_ready = False
interrupted = True
print('Interrupt')
def step_plus(steptime):
GPIO.output(17, GPIO.LOW)
GPIO.output(4, GPIO.HIGH)
time.sleep(steptime)
GPIO.output(4, GPIO.LOW)
time.sleep(steptime)
def step_minus(steptime):
GPIO.output(17, GPIO.HIGH)
GPIO.output(4, GPIO.HIGH)
time.sleep(steptime)
GPIO.output(4, GPIO.LOW)
time.sleep(steptime)
GPIO.output(17, GPIO.LOW)
def clock(now):
time_seconds = int((time.time() - now))
t_hr = int(time_seconds / 3600)
t_min = int(time_seconds / 60) - (t_hr * 60)
t_sec = int(time_seconds) - (t_min * 60)
showTime = str(t_hr) + ':' + str(t_min).zfill(2)
print(showTime)
return showTime
def write_email(numbers, ctime, error, header_name):
server = smtplib.SMTP('SERVERADRESSE', PORTNR)
server.starttls()
server.login('LOGIN-BENUTZERNAME', 'PASSWORT')
msg = MIMEMultipart()
msg['From'] = 'ABSENDER'
msg['To'] = 'EMPFAENGER'
if error:
msg['Subject'] = 'Error'
else:
msg['Cc'] = 'KOPIE ADRESSE'
msg['Subject'] = header_name
message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]) + ',' + str(numbers[3]) + ',' + str(
numbers[4]) + ',' + str(numbers[5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(
numbers[7]) + '\n' + 'Zeit: '+ str(ctime)
msg.attach(MIMEText(message))
server.send_message(msg)
def logging(numbers, ctime, log_name):
file = open(log_name, 'w')
file.write('Einz:' + str(numbers[0]) + '\n')
file.write('Zwei:' + str(numbers[1]) + '\n')
file.write("Drei: " + str(numbers[2]) + '\n')
file.write("Vier: " + str(numbers[3]) + '\n')
file.write("Fuenf: " + str(numbers[4]) + '\n')
file.write("Sechs: " + str(numbers[5]) + '\n')
file.write('Fehler: ' + str(numbers[6]) + '\n')
file.write('Gesamt: ' + str(numbers[7]) + '\n')
file.write('Standardabw: ' + str(numbers[8]) + '\n')
file.write('Zeit: ' + str(ctime) + '\n')
file.close()
def get_images():
for i in range(5):
ret, frame = cap.read()
#cv2.imwrite('frame.png',frame)
# Bildausschnitte von Würfel und Positionserkennung
y = 160
h = 240
x = 220
w = 240
dice_image = frame[y:y + h, x:x + w]
grey = cv2.cvtColor(dice_image, cv2.COLOR_BGR2GRAY)
#cv2.imshow('input', grey)
#cv2.imwrite('real_image.png',frame)
y = 120
h = 15
pos_img = frame[y:y + h, x:x + w]
pos_img = cv2.cvtColor(pos_img, cv2.COLOR_BGR2GRAY)
#cv2.imwrite('pos_raw.png',pos_img)
ret, pos_img = cv2.threshold(pos_img, 245, 255, cv2.THRESH_BINARY)
#cv2.imshow('pos', pos_img)
#cv2.imwrite('pos.png',pos_img)
return grey, pos_img
def hough_detector(input_img):
#cv2.imshow('hough_input', input_image)
img = cv2.medianBlur(input_img, 5) # Bild gätten mit Gauß
cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # Farbraum umwandeln (nur für die farbigen Kreise)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200, param2=10, minRadius=5,
maxRadius=25) # param1: Schwellenwert, param2: muss man ausprobieren
h_number = 0
try: # Kreise zählen und markieren
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
# draw the outer circle
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
h_number += 1
except:
print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')
cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 50), 2, cv2.LINE_AA)
cv2.imshow('hough detector', cimg)
cv2.imwrite('hough detector.png', cimg)
return h_number
def img_processing(image_input): # Bild vorbereitung
image_input = cv2.medianBlur(image_input, 3) # Bild gätten mit Gauß
ret, binary_image = cv2.threshold(image_input, 220, 255,
cv2.THRESH_BINARY) # Schwellenwertbild
#cv2.imwrite('binary1.png', binary_image)
if darknumbers: # Wenn dunkle Würfelaugen, dann Bereich um den Würfel weiß machen
w = binary_image.shape[1] #y
h = binary_image.shape[0] #x
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(binary_image, mask, (0,0), 255);
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(binary_image, mask, (h,w), 255);
else:
binary_image = cv2.bitwise_not(binary_image) # Bei hellen Würfelaugen reicht invertieren des Bildes
#cv2.imwrite('binary2.png', binary_image)
kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0]], dtype=np.uint8) # Kreisförmige Maske erzeugen
dilate = cv2.dilate(binary_image,kernel_round, iterations=3) # Dilatation anwenden
erode = cv2.erode(dilate, kernel_round, iterations=2) # Erosion anwenden
return erode
def counting(image, all_numbers, dice_image, raw_numbers_name):
one = all_numbers[0]
two = all_numbers[1]
three = all_numbers[2]
four = all_numbers[3]
five = all_numbers[4]
six = all_numbers[5]
errorcnt = all_numbers[6]
success_rolls= all_numbers[7]
detector = cv2.SimpleBlobDetector_create(blob_params)
keypoints = detector.detect(image)
img_with_keypoints = cv2.drawKeypoints(image, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
blob_number = 0
for i in keypoints[0:]:
blob_number = blob_number + 1
cv2.putText(img_with_keypoints, str(blob_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2,
cv2.LINE_AA)
hough_number = hough_detector(image)
if blob_number == hough_number:
number = blob_number
print('DETECTED: ', number)
if blob_number > 0 and blob_number < 7:
raw_log = open(raw_numbers_name,'a')
raw_log.write(str(number) + '\n')
raw_log.close()
success_rolls +=1
all_numbers[number-1] += 1
else:
errorcnt = errorcnt + 1
if error_logging is True:
cv2.imwrite('errors/' + str(errorcnt) + ' number_error_binary.png', image)
cv2.imwrite('errors/' + str(errorcnt) + ' number_error_real.png', dice_image)
else:
print('NOT MATCHING FILTERS')
errorcnt = errorcnt + 1
if error_logging is True:
cv2.imwrite('errors/' + str(errorcnt) + ' matching_error.png', image)
cv2.imwrite('errors/' + str(errorcnt) + ' matching_error_real.png', dice_image)
rolled = [one, two, three, four, five, six]
std_dev = np.std(rolled)
all_numbers[6] = errorcnt
all_numbers[7] = success_rolls
all_numbers[8] = std_dev
cv2.imshow('blob detector', img_with_keypoints)
cv2.imwrite('blob_detector.png', img_with_keypoints)
return all_numbers
start_time = time.time()
if dicer_ready is True: # Interrupt initialisieren
GPIO.add_event_detect(18, GPIO.FALLING, callback = interr, bouncetime = 200)
print('Starting...')
while dicer_ready is True:
#localtime = time.localtime(time.time())
#if localtime.tm_hour >= endtime_hr and localtime.tm_min >= endtime_min: # Abschaltung nach Uhrzeit
# dicer_ready = False
if gpios:
for i in range(3200):
#if i > 3100: # die letzten Schritte abbremsen
# steptime = steptime + global_steptime * 0.1
#else:
steptime = global_steptime
step_plus(steptime)
time.sleep(0.6) # Kurze Pause, damit Würfel ruhig liegen kann
position_correct = False
real_image, pos_img = get_images() # Aufnahme machen
while position_correct is not True and gpios is True: #Positionsbestimmung mit Bild von weißem Viereck
real_image, pos_img = get_images()
#cv2.imshow('pos', pos_img)
M = cv2.moments(pos_img) # Momente berechnen
#print(M)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
else:
cX = 0
#cv2.circle(pos_img, (cX, cY), 4, (0, 0, 0), -1)
if cX < 115:
step_minus(global_steptime)
elif cX > 135:
step_plus(global_steptime)
else:
position_correct = True
#print('correct position:')
#print("X:", cX, "Y:", cY)
#cv2.imwrite('newpos.png',pos_img)
processed_img = img_processing(real_image)
numbers = counting(processed_img, all_numbers, real_image, raw_numbers_name)
cv2.imshow('Hold Q to exit', real_image)
ctime = clock(start_time)
if (numbers[7] % 10) == 0: # Nach 10 Messungen ins log schreiben
logging(numbers, ctime, log_name)
if send_email is True and (numbers[7] % email_log_number) == 0: #Bei gewünschter Anzahl Messungen eine Email schreiben
write_email(numbers, ctime,0, email_header)
print('=================')
print('Time: ' + str(ctime))
print('One: ', numbers[0])
print('Two: ', numbers[1])
print('Three: ', numbers[2])
print('Four: ', numbers[3])
print('Five: ', numbers[4])
print('Six: ', numbers[5])
print('Errors: ', numbers[6])
print('Success rolls: ', numbers[7])
print('Deviation: ', numbers[8])
print('=================')
if numbers[7] == measures:
break
if cv2.waitKey(200) & 0xFF == ord('q'): # Q drücken, zum beenden (am besten gedrückt halten, bis beendet wurde)
break
if interrupted == True: #wenn Interrupt (Temperaturfehler) ausgelöst wurde
write_email(numbers, ctime,1, email_header)
elif dicer_ready == True and send_email == True: #wenn Messung normal beendet wurde
write_email(numbers, ctime,0, email_header)
cap.release()
cv2.destroyAllWindows()
print('Everything finished')
|
[
"import numpy as np\nimport cv2\nimport time\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\ngpios = True\n\ntry: # Wenn Programm nicht auf einem Raspberry läuft, GPIOS nicht benutzen\n import RPi.GPIO as GPIO\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(17, GPIO.OUT)\n GPIO.setup(4, GPIO.OUT)\n GPIO.setup(18, GPIO.IN, pull_up_down = GPIO.PUD_UP) # internen Pull-Up aktivieren\n \nexcept ImportError:\n gpios = False\n print('WARNING - no GPIOS found')\n\n##README#####################################################################################################################\n# Email: Zeile 127\n# Bildausschnitte: ab Zeile 164\n# Iterationen für Erosion und Dilatation: Zeile 255, 257\n# Hough-Detector: Zeile 193\n# Blob-Detector Parameter: Zeile 77\n# Blob-Detector Parameter: Zeile 272\n\n##PARAMETERS#################################################################################################################\n\nlog_name = 'log_seite2' # Name der Log Datei (Zusammenfassung der Messreihe): Wird NICHT fortgesetzt\nraw_numbers_name = 'raw_seite2' # Name der Datei, in der alle Würfe einzeln gespeichert werden: Wird fortgesetzt\nemail_header = 'dicer - seite2' # Emailbetreff\n\ndarknumbers = False # Dunkle Würfelaugen?\n\nsend_email = True # Email mit Messdaten versenden?\nemail_log_number = 6000 # Nach wie vielen Würfen soll jeweils eine Email geschrieben werden?\n\nerror_logging = True #Bild bei Fehler speichern?\n\nmeasures = 18000 #Anzahl der Messungen: -1 für unendlich\n\n#Uhrzeit, wenn automatisch beendet werden soll (funktionert, ist aber gerade deaktiviert: Zeile 311): \n#endtime_hr = 22\n#endtime_min = 45\n\ncap = cv2.VideoCapture(0) # Bildquelle: (Zahl ändern, falls mehrere Kameras angeschlossen sind (auch interne Webcams))\n\n###########################################################################################################################\n\nprint('Setting up...')\n\ninterrupted = False\ndicer_ready = False\n\nret, frame = cap.read() # Test, ob Kamera funktionert\n\nif ret is not True: #Wenn Kamera nicht geht, Dummy Image laden\n dicer_ready = False\n grey = cv2.imread('dummy_image.png', 0)\n cv2.putText(grey, 'NO CAMERA', (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n pos_img = np.zeros(shape=[100, 100, 1], dtype=np.uint8)\n cv2.imshow('Press any key to exit', grey)\n print('Error - stopping')\n cv2.waitKey() # Taste drücken, zum beenden\nelif GPIO.input(18) == 0 and gpios == True: # Temperaturreais prüfen wenn RPi vorhanden\n print('Temperature relay is offline, stopping')\nelse:\n dicer_ready = True\n\nglobal_steptime = 0.00015 # Abstand zwischen den Schritten\n\n# blobdetektor konfigurieren\nblob_params = cv2.SimpleBlobDetector_Params()\nblob_params.filterByColor = True\nblob_params.filterByArea = True\nblob_params.minArea = 100\nblob_params.filterByCircularity = True\nblob_params.minCircularity = 0.7\nblob_params.filterByInertia = False\nblob_params.filterByConvexity = False\n\nall_numbers = [0] * 9 # [one, two, three, four, five, six, errorcnt, rollnumber, std_dev\n\n\ndef interr(channel):\n global gpios\n global dicer_ready\n global interrupted\n gpios = False\n dicer_ready = False\n interrupted = True\n print('Interrupt')\n \n\ndef step_plus(steptime):\n GPIO.output(17, GPIO.LOW)\n GPIO.output(4, GPIO.HIGH)\n time.sleep(steptime)\n GPIO.output(4, GPIO.LOW)\n time.sleep(steptime)\n\n\ndef step_minus(steptime):\n GPIO.output(17, GPIO.HIGH)\n GPIO.output(4, GPIO.HIGH)\n time.sleep(steptime)\n GPIO.output(4, GPIO.LOW)\n time.sleep(steptime)\n GPIO.output(17, GPIO.LOW)\n\n\ndef clock(now):\n time_seconds = int((time.time() - now))\n t_hr = int(time_seconds / 3600)\n t_min = int(time_seconds / 60) - (t_hr * 60)\n t_sec = int(time_seconds) - (t_min * 60)\n showTime = str(t_hr) + ':' + str(t_min).zfill(2)\n print(showTime)\n return showTime\n\n\ndef write_email(numbers, ctime, error, header_name):\n server = smtplib.SMTP('SERVERADRESSE', PORTNR)\n server.starttls()\n server.login('LOGIN-BENUTZERNAME', 'PASSWORT')\n msg = MIMEMultipart()\n msg['From'] = 'ABSENDER'\n msg['To'] = 'EMPFAENGER'\n if error:\n msg['Subject'] = 'Error'\n else:\n msg['Cc'] = 'KOPIE ADRESSE'\n msg['Subject'] = header_name\n message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]) + ',' + str(numbers[3]) + ',' + str(\n numbers[4]) + ',' + str(numbers[5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(\n numbers[7]) + '\\n' + 'Zeit: '+ str(ctime)\n msg.attach(MIMEText(message))\n\n server.send_message(msg)\n\n\ndef logging(numbers, ctime, log_name):\n\n file = open(log_name, 'w')\n file.write('Einz:' + str(numbers[0]) + '\\n')\n file.write('Zwei:' + str(numbers[1]) + '\\n')\n file.write(\"Drei: \" + str(numbers[2]) + '\\n')\n file.write(\"Vier: \" + str(numbers[3]) + '\\n')\n file.write(\"Fuenf: \" + str(numbers[4]) + '\\n')\n file.write(\"Sechs: \" + str(numbers[5]) + '\\n')\n file.write('Fehler: ' + str(numbers[6]) + '\\n')\n file.write('Gesamt: ' + str(numbers[7]) + '\\n')\n file.write('Standardabw: ' + str(numbers[8]) + '\\n')\n file.write('Zeit: ' + str(ctime) + '\\n')\n\n file.close()\n\n\ndef get_images():\n for i in range(5):\n ret, frame = cap.read()\n\n #cv2.imwrite('frame.png',frame)\n # Bildausschnitte von Würfel und Positionserkennung\n y = 160\n h = 240\n\n x = 220\n w = 240\n\n dice_image = frame[y:y + h, x:x + w]\n grey = cv2.cvtColor(dice_image, cv2.COLOR_BGR2GRAY)\n #cv2.imshow('input', grey)\n #cv2.imwrite('real_image.png',frame)\n y = 120\n h = 15\n\n\n pos_img = frame[y:y + h, x:x + w]\n pos_img = cv2.cvtColor(pos_img, cv2.COLOR_BGR2GRAY)\n #cv2.imwrite('pos_raw.png',pos_img)\n ret, pos_img = cv2.threshold(pos_img, 245, 255, cv2.THRESH_BINARY)\n #cv2.imshow('pos', pos_img)\n #cv2.imwrite('pos.png',pos_img)\n return grey, pos_img\n\n\ndef hough_detector(input_img):\n #cv2.imshow('hough_input', input_image)\n img = cv2.medianBlur(input_img, 5) # Bild gätten mit Gauß\n cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # Farbraum umwandeln (nur für die farbigen Kreise)\n circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200, param2=10, minRadius=5,\n maxRadius=25) # param1: Schwellenwert, param2: muss man ausprobieren\n\n h_number = 0\n\n try: # Kreise zählen und markieren\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n # draw the outer circle\n cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n h_number += 1\n except:\n print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')\n\n cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 50), 2, cv2.LINE_AA)\n cv2.imshow('hough detector', cimg)\n cv2.imwrite('hough detector.png', cimg)\n\n return h_number\n\n\ndef img_processing(image_input): # Bild vorbereitung\n\n\n image_input = cv2.medianBlur(image_input, 3) # Bild gätten mit Gauß\n ret, binary_image = cv2.threshold(image_input, 220, 255,\n cv2.THRESH_BINARY) # Schwellenwertbild\n\n \n #cv2.imwrite('binary1.png', binary_image)\n\n if darknumbers: # Wenn dunkle Würfelaugen, dann Bereich um den Würfel weiß machen\n w = binary_image.shape[1] #y\n h = binary_image.shape[0] #x\n\n mask = np.zeros((h + 2, w + 2), np.uint8)\n\n cv2.floodFill(binary_image, mask, (0,0), 255);\n \n mask = np.zeros((h + 2, w + 2), np.uint8)\n \n cv2.floodFill(binary_image, mask, (h,w), 255);\n \n else:\n binary_image = cv2.bitwise_not(binary_image) # Bei hellen Würfelaugen reicht invertieren des Bildes\n\n #cv2.imwrite('binary2.png', binary_image)\n\n kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 1, 1, 1, 1, 1, 1, 1, 0],\n [0, 0, 0, 1, 1, 1, 0, 0, 0]], dtype=np.uint8) # Kreisförmige Maske erzeugen\n\n dilate = cv2.dilate(binary_image,kernel_round, iterations=3) # Dilatation anwenden\n\n erode = cv2.erode(dilate, kernel_round, iterations=2) # Erosion anwenden\n\n return erode\n\n\ndef counting(image, all_numbers, dice_image, raw_numbers_name):\n one = all_numbers[0]\n two = all_numbers[1]\n three = all_numbers[2]\n four = all_numbers[3]\n five = all_numbers[4]\n six = all_numbers[5]\n errorcnt = all_numbers[6]\n success_rolls= all_numbers[7]\n\n detector = cv2.SimpleBlobDetector_create(blob_params)\n keypoints = detector.detect(image)\n img_with_keypoints = cv2.drawKeypoints(image, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\n blob_number = 0\n\n for i in keypoints[0:]:\n blob_number = blob_number + 1\n\n cv2.putText(img_with_keypoints, str(blob_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2,\n cv2.LINE_AA)\n\n hough_number = hough_detector(image)\n\n \n if blob_number == hough_number:\n number = blob_number\n print('DETECTED: ', number)\n \n \n \n if blob_number > 0 and blob_number < 7:\n raw_log = open(raw_numbers_name,'a')\n raw_log.write(str(number) + '\\n')\n raw_log.close() \n success_rolls +=1\n all_numbers[number-1] += 1\n else:\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) + ' number_error_binary.png', image)\n cv2.imwrite('errors/' + str(errorcnt) + ' number_error_real.png', dice_image)\n\n else:\n print('NOT MATCHING FILTERS')\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) + ' matching_error.png', image)\n cv2.imwrite('errors/' + str(errorcnt) + ' matching_error_real.png', dice_image)\n \n rolled = [one, two, three, four, five, six]\n std_dev = np.std(rolled)\n\n all_numbers[6] = errorcnt\n all_numbers[7] = success_rolls\n all_numbers[8] = std_dev\n\n cv2.imshow('blob detector', img_with_keypoints)\n cv2.imwrite('blob_detector.png', img_with_keypoints)\n\n return all_numbers\n\n\n\nstart_time = time.time()\n\nif dicer_ready is True: # Interrupt initialisieren\n GPIO.add_event_detect(18, GPIO.FALLING, callback = interr, bouncetime = 200)\n print('Starting...')\n\n\nwhile dicer_ready is True:\n #localtime = time.localtime(time.time())\n #if localtime.tm_hour >= endtime_hr and localtime.tm_min >= endtime_min: # Abschaltung nach Uhrzeit\n # dicer_ready = False\n \n \n if gpios:\n for i in range(3200):\n\n #if i > 3100: # die letzten Schritte abbremsen\n # steptime = steptime + global_steptime * 0.1\n #else:\n steptime = global_steptime\n\n step_plus(steptime)\n\n time.sleep(0.6) # Kurze Pause, damit Würfel ruhig liegen kann\n \n position_correct = False\n real_image, pos_img = get_images() # Aufnahme machen\n\n while position_correct is not True and gpios is True: #Positionsbestimmung mit Bild von weißem Viereck\n real_image, pos_img = get_images()\n #cv2.imshow('pos', pos_img)\n \n M = cv2.moments(pos_img) # Momente berechnen\n\n #print(M)\n\n if M[\"m00\"] != 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n else:\n cX = 0\n\n #cv2.circle(pos_img, (cX, cY), 4, (0, 0, 0), -1)\n\n if cX < 115:\n step_minus(global_steptime)\n elif cX > 135:\n step_plus(global_steptime)\n else:\n position_correct = True\n #print('correct position:')\n #print(\"X:\", cX, \"Y:\", cY)\n #cv2.imwrite('newpos.png',pos_img)\n\n processed_img = img_processing(real_image)\n numbers = counting(processed_img, all_numbers, real_image, raw_numbers_name) \n cv2.imshow('Hold Q to exit', real_image)\n\n ctime = clock(start_time)\n\n if (numbers[7] % 10) == 0: # Nach 10 Messungen ins log schreiben\n logging(numbers, ctime, log_name)\n\n if send_email is True and (numbers[7] % email_log_number) == 0: #Bei gewünschter Anzahl Messungen eine Email schreiben\n write_email(numbers, ctime,0, email_header)\n\n print('=================')\n print('Time: ' + str(ctime))\n print('One: ', numbers[0])\n print('Two: ', numbers[1])\n print('Three: ', numbers[2])\n print('Four: ', numbers[3])\n print('Five: ', numbers[4])\n print('Six: ', numbers[5])\n print('Errors: ', numbers[6])\n print('Success rolls: ', numbers[7])\n print('Deviation: ', numbers[8])\n print('=================')\n\n if numbers[7] == measures:\n break\n\n if cv2.waitKey(200) & 0xFF == ord('q'): # Q drücken, zum beenden (am besten gedrückt halten, bis beendet wurde)\n break\n\nif interrupted == True: #wenn Interrupt (Temperaturfehler) ausgelöst wurde\n write_email(numbers, ctime,1, email_header)\nelif dicer_ready == True and send_email == True: #wenn Messung normal beendet wurde\n write_email(numbers, ctime,0, email_header)\n \ncap.release()\ncv2.destroyAllWindows()\nprint('Everything finished')\n",
"import numpy as np\nimport cv2\nimport time\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\ngpios = True\ntry:\n import RPi.GPIO as GPIO\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(17, GPIO.OUT)\n GPIO.setup(4, GPIO.OUT)\n GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nexcept ImportError:\n gpios = False\n print('WARNING - no GPIOS found')\nlog_name = 'log_seite2'\nraw_numbers_name = 'raw_seite2'\nemail_header = 'dicer - seite2'\ndarknumbers = False\nsend_email = True\nemail_log_number = 6000\nerror_logging = True\nmeasures = 18000\ncap = cv2.VideoCapture(0)\nprint('Setting up...')\ninterrupted = False\ndicer_ready = False\nret, frame = cap.read()\nif ret is not True:\n dicer_ready = False\n grey = cv2.imread('dummy_image.png', 0)\n cv2.putText(grey, 'NO CAMERA', (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 2, cv2.LINE_AA)\n pos_img = np.zeros(shape=[100, 100, 1], dtype=np.uint8)\n cv2.imshow('Press any key to exit', grey)\n print('Error - stopping')\n cv2.waitKey()\nelif GPIO.input(18) == 0 and gpios == True:\n print('Temperature relay is offline, stopping')\nelse:\n dicer_ready = True\nglobal_steptime = 0.00015\nblob_params = cv2.SimpleBlobDetector_Params()\nblob_params.filterByColor = True\nblob_params.filterByArea = True\nblob_params.minArea = 100\nblob_params.filterByCircularity = True\nblob_params.minCircularity = 0.7\nblob_params.filterByInertia = False\nblob_params.filterByConvexity = False\nall_numbers = [0] * 9\n\n\ndef interr(channel):\n global gpios\n global dicer_ready\n global interrupted\n gpios = False\n dicer_ready = False\n interrupted = True\n print('Interrupt')\n\n\ndef step_plus(steptime):\n GPIO.output(17, GPIO.LOW)\n GPIO.output(4, GPIO.HIGH)\n time.sleep(steptime)\n GPIO.output(4, GPIO.LOW)\n time.sleep(steptime)\n\n\ndef step_minus(steptime):\n GPIO.output(17, GPIO.HIGH)\n GPIO.output(4, GPIO.HIGH)\n time.sleep(steptime)\n GPIO.output(4, GPIO.LOW)\n time.sleep(steptime)\n GPIO.output(17, GPIO.LOW)\n\n\ndef clock(now):\n time_seconds = int(time.time() - now)\n t_hr = int(time_seconds / 3600)\n t_min = int(time_seconds / 60) - t_hr * 60\n t_sec = int(time_seconds) - t_min * 60\n showTime = str(t_hr) + ':' + str(t_min).zfill(2)\n print(showTime)\n return showTime\n\n\ndef write_email(numbers, ctime, error, header_name):\n server = smtplib.SMTP('SERVERADRESSE', PORTNR)\n server.starttls()\n server.login('LOGIN-BENUTZERNAME', 'PASSWORT')\n msg = MIMEMultipart()\n msg['From'] = 'ABSENDER'\n msg['To'] = 'EMPFAENGER'\n if error:\n msg['Subject'] = 'Error'\n else:\n msg['Cc'] = 'KOPIE ADRESSE'\n msg['Subject'] = header_name\n message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]\n ) + ',' + str(numbers[3]) + ',' + str(numbers[4]) + ',' + str(numbers\n [5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(numbers[7]\n ) + '\\n' + 'Zeit: ' + str(ctime)\n msg.attach(MIMEText(message))\n server.send_message(msg)\n\n\ndef logging(numbers, ctime, log_name):\n file = open(log_name, 'w')\n file.write('Einz:' + str(numbers[0]) + '\\n')\n file.write('Zwei:' + str(numbers[1]) + '\\n')\n file.write('Drei: ' + str(numbers[2]) + '\\n')\n file.write('Vier: ' + str(numbers[3]) + '\\n')\n file.write('Fuenf: ' + str(numbers[4]) + '\\n')\n file.write('Sechs: ' + str(numbers[5]) + '\\n')\n file.write('Fehler: ' + str(numbers[6]) + '\\n')\n file.write('Gesamt: ' + str(numbers[7]) + '\\n')\n file.write('Standardabw: ' + str(numbers[8]) + '\\n')\n file.write('Zeit: ' + str(ctime) + '\\n')\n file.close()\n\n\ndef get_images():\n for i in range(5):\n ret, frame = cap.read()\n y = 160\n h = 240\n x = 220\n w = 240\n dice_image = frame[y:y + h, x:x + w]\n grey = cv2.cvtColor(dice_image, cv2.COLOR_BGR2GRAY)\n y = 120\n h = 15\n pos_img = frame[y:y + h, x:x + w]\n pos_img = cv2.cvtColor(pos_img, cv2.COLOR_BGR2GRAY)\n ret, pos_img = cv2.threshold(pos_img, 245, 255, cv2.THRESH_BINARY)\n return grey, pos_img\n\n\ndef hough_detector(input_img):\n img = cv2.medianBlur(input_img, 5)\n cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200,\n param2=10, minRadius=5, maxRadius=25)\n h_number = 0\n try:\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n h_number += 1\n except:\n print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')\n cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (0, 255, 50), 2, cv2.LINE_AA)\n cv2.imshow('hough detector', cimg)\n cv2.imwrite('hough detector.png', cimg)\n return h_number\n\n\ndef img_processing(image_input):\n image_input = cv2.medianBlur(image_input, 3)\n ret, binary_image = cv2.threshold(image_input, 220, 255, cv2.THRESH_BINARY)\n if darknumbers:\n w = binary_image.shape[1]\n h = binary_image.shape[0]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (0, 0), 255)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (h, w), 255)\n else:\n binary_image = cv2.bitwise_not(binary_image)\n kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1,\n 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1,\n 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, \n 0, 0, 0]], dtype=np.uint8)\n dilate = cv2.dilate(binary_image, kernel_round, iterations=3)\n erode = cv2.erode(dilate, kernel_round, iterations=2)\n return erode\n\n\ndef counting(image, all_numbers, dice_image, raw_numbers_name):\n one = all_numbers[0]\n two = all_numbers[1]\n three = all_numbers[2]\n four = all_numbers[3]\n five = all_numbers[4]\n six = all_numbers[5]\n errorcnt = all_numbers[6]\n success_rolls = all_numbers[7]\n detector = cv2.SimpleBlobDetector_create(blob_params)\n keypoints = detector.detect(image)\n img_with_keypoints = cv2.drawKeypoints(image, keypoints, np.array([]),\n (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n blob_number = 0\n for i in keypoints[0:]:\n blob_number = blob_number + 1\n cv2.putText(img_with_keypoints, str(blob_number), (10, 200), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\n hough_number = hough_detector(image)\n if blob_number == hough_number:\n number = blob_number\n print('DETECTED: ', number)\n if blob_number > 0 and blob_number < 7:\n raw_log = open(raw_numbers_name, 'a')\n raw_log.write(str(number) + '\\n')\n raw_log.close()\n success_rolls += 1\n all_numbers[number - 1] += 1\n else:\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) +\n ' number_error_binary.png', image)\n cv2.imwrite('errors/' + str(errorcnt) +\n ' number_error_real.png', dice_image)\n else:\n print('NOT MATCHING FILTERS')\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) + ' matching_error.png',\n image)\n cv2.imwrite('errors/' + str(errorcnt) +\n ' matching_error_real.png', dice_image)\n rolled = [one, two, three, four, five, six]\n std_dev = np.std(rolled)\n all_numbers[6] = errorcnt\n all_numbers[7] = success_rolls\n all_numbers[8] = std_dev\n cv2.imshow('blob detector', img_with_keypoints)\n cv2.imwrite('blob_detector.png', img_with_keypoints)\n return all_numbers\n\n\nstart_time = time.time()\nif dicer_ready is True:\n GPIO.add_event_detect(18, GPIO.FALLING, callback=interr, bouncetime=200)\n print('Starting...')\nwhile dicer_ready is True:\n if gpios:\n for i in range(3200):\n steptime = global_steptime\n step_plus(steptime)\n time.sleep(0.6)\n position_correct = False\n real_image, pos_img = get_images()\n while position_correct is not True and gpios is True:\n real_image, pos_img = get_images()\n M = cv2.moments(pos_img)\n if M['m00'] != 0:\n cX = int(M['m10'] / M['m00'])\n else:\n cX = 0\n if cX < 115:\n step_minus(global_steptime)\n elif cX > 135:\n step_plus(global_steptime)\n else:\n position_correct = True\n processed_img = img_processing(real_image)\n numbers = counting(processed_img, all_numbers, real_image, raw_numbers_name\n )\n cv2.imshow('Hold Q to exit', real_image)\n ctime = clock(start_time)\n if numbers[7] % 10 == 0:\n logging(numbers, ctime, log_name)\n if send_email is True and numbers[7] % email_log_number == 0:\n write_email(numbers, ctime, 0, email_header)\n print('=================')\n print('Time: ' + str(ctime))\n print('One: ', numbers[0])\n print('Two: ', numbers[1])\n print('Three: ', numbers[2])\n print('Four: ', numbers[3])\n print('Five: ', numbers[4])\n print('Six: ', numbers[5])\n print('Errors: ', numbers[6])\n print('Success rolls: ', numbers[7])\n print('Deviation: ', numbers[8])\n print('=================')\n if numbers[7] == measures:\n break\n if cv2.waitKey(200) & 255 == ord('q'):\n break\nif interrupted == True:\n write_email(numbers, ctime, 1, email_header)\nelif dicer_ready == True and send_email == True:\n write_email(numbers, ctime, 0, email_header)\ncap.release()\ncv2.destroyAllWindows()\nprint('Everything finished')\n",
"<import token>\ngpios = True\ntry:\n import RPi.GPIO as GPIO\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(17, GPIO.OUT)\n GPIO.setup(4, GPIO.OUT)\n GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nexcept ImportError:\n gpios = False\n print('WARNING - no GPIOS found')\nlog_name = 'log_seite2'\nraw_numbers_name = 'raw_seite2'\nemail_header = 'dicer - seite2'\ndarknumbers = False\nsend_email = True\nemail_log_number = 6000\nerror_logging = True\nmeasures = 18000\ncap = cv2.VideoCapture(0)\nprint('Setting up...')\ninterrupted = False\ndicer_ready = False\nret, frame = cap.read()\nif ret is not True:\n dicer_ready = False\n grey = cv2.imread('dummy_image.png', 0)\n cv2.putText(grey, 'NO CAMERA', (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 2, cv2.LINE_AA)\n pos_img = np.zeros(shape=[100, 100, 1], dtype=np.uint8)\n cv2.imshow('Press any key to exit', grey)\n print('Error - stopping')\n cv2.waitKey()\nelif GPIO.input(18) == 0 and gpios == True:\n print('Temperature relay is offline, stopping')\nelse:\n dicer_ready = True\nglobal_steptime = 0.00015\nblob_params = cv2.SimpleBlobDetector_Params()\nblob_params.filterByColor = True\nblob_params.filterByArea = True\nblob_params.minArea = 100\nblob_params.filterByCircularity = True\nblob_params.minCircularity = 0.7\nblob_params.filterByInertia = False\nblob_params.filterByConvexity = False\nall_numbers = [0] * 9\n\n\ndef interr(channel):\n global gpios\n global dicer_ready\n global interrupted\n gpios = False\n dicer_ready = False\n interrupted = True\n print('Interrupt')\n\n\ndef step_plus(steptime):\n GPIO.output(17, GPIO.LOW)\n GPIO.output(4, GPIO.HIGH)\n time.sleep(steptime)\n GPIO.output(4, GPIO.LOW)\n time.sleep(steptime)\n\n\ndef step_minus(steptime):\n GPIO.output(17, GPIO.HIGH)\n GPIO.output(4, GPIO.HIGH)\n time.sleep(steptime)\n GPIO.output(4, GPIO.LOW)\n time.sleep(steptime)\n GPIO.output(17, GPIO.LOW)\n\n\ndef clock(now):\n time_seconds = int(time.time() - now)\n t_hr = int(time_seconds / 3600)\n t_min = int(time_seconds / 60) - t_hr * 60\n t_sec = int(time_seconds) - t_min * 60\n showTime = str(t_hr) + ':' + str(t_min).zfill(2)\n print(showTime)\n return showTime\n\n\ndef write_email(numbers, ctime, error, header_name):\n server = smtplib.SMTP('SERVERADRESSE', PORTNR)\n server.starttls()\n server.login('LOGIN-BENUTZERNAME', 'PASSWORT')\n msg = MIMEMultipart()\n msg['From'] = 'ABSENDER'\n msg['To'] = 'EMPFAENGER'\n if error:\n msg['Subject'] = 'Error'\n else:\n msg['Cc'] = 'KOPIE ADRESSE'\n msg['Subject'] = header_name\n message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]\n ) + ',' + str(numbers[3]) + ',' + str(numbers[4]) + ',' + str(numbers\n [5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(numbers[7]\n ) + '\\n' + 'Zeit: ' + str(ctime)\n msg.attach(MIMEText(message))\n server.send_message(msg)\n\n\ndef logging(numbers, ctime, log_name):\n file = open(log_name, 'w')\n file.write('Einz:' + str(numbers[0]) + '\\n')\n file.write('Zwei:' + str(numbers[1]) + '\\n')\n file.write('Drei: ' + str(numbers[2]) + '\\n')\n file.write('Vier: ' + str(numbers[3]) + '\\n')\n file.write('Fuenf: ' + str(numbers[4]) + '\\n')\n file.write('Sechs: ' + str(numbers[5]) + '\\n')\n file.write('Fehler: ' + str(numbers[6]) + '\\n')\n file.write('Gesamt: ' + str(numbers[7]) + '\\n')\n file.write('Standardabw: ' + str(numbers[8]) + '\\n')\n file.write('Zeit: ' + str(ctime) + '\\n')\n file.close()\n\n\ndef get_images():\n for i in range(5):\n ret, frame = cap.read()\n y = 160\n h = 240\n x = 220\n w = 240\n dice_image = frame[y:y + h, x:x + w]\n grey = cv2.cvtColor(dice_image, cv2.COLOR_BGR2GRAY)\n y = 120\n h = 15\n pos_img = frame[y:y + h, x:x + w]\n pos_img = cv2.cvtColor(pos_img, cv2.COLOR_BGR2GRAY)\n ret, pos_img = cv2.threshold(pos_img, 245, 255, cv2.THRESH_BINARY)\n return grey, pos_img\n\n\ndef hough_detector(input_img):\n img = cv2.medianBlur(input_img, 5)\n cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200,\n param2=10, minRadius=5, maxRadius=25)\n h_number = 0\n try:\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n h_number += 1\n except:\n print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')\n cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (0, 255, 50), 2, cv2.LINE_AA)\n cv2.imshow('hough detector', cimg)\n cv2.imwrite('hough detector.png', cimg)\n return h_number\n\n\ndef img_processing(image_input):\n image_input = cv2.medianBlur(image_input, 3)\n ret, binary_image = cv2.threshold(image_input, 220, 255, cv2.THRESH_BINARY)\n if darknumbers:\n w = binary_image.shape[1]\n h = binary_image.shape[0]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (0, 0), 255)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (h, w), 255)\n else:\n binary_image = cv2.bitwise_not(binary_image)\n kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1,\n 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1,\n 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, \n 0, 0, 0]], dtype=np.uint8)\n dilate = cv2.dilate(binary_image, kernel_round, iterations=3)\n erode = cv2.erode(dilate, kernel_round, iterations=2)\n return erode\n\n\ndef counting(image, all_numbers, dice_image, raw_numbers_name):\n one = all_numbers[0]\n two = all_numbers[1]\n three = all_numbers[2]\n four = all_numbers[3]\n five = all_numbers[4]\n six = all_numbers[5]\n errorcnt = all_numbers[6]\n success_rolls = all_numbers[7]\n detector = cv2.SimpleBlobDetector_create(blob_params)\n keypoints = detector.detect(image)\n img_with_keypoints = cv2.drawKeypoints(image, keypoints, np.array([]),\n (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n blob_number = 0\n for i in keypoints[0:]:\n blob_number = blob_number + 1\n cv2.putText(img_with_keypoints, str(blob_number), (10, 200), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\n hough_number = hough_detector(image)\n if blob_number == hough_number:\n number = blob_number\n print('DETECTED: ', number)\n if blob_number > 0 and blob_number < 7:\n raw_log = open(raw_numbers_name, 'a')\n raw_log.write(str(number) + '\\n')\n raw_log.close()\n success_rolls += 1\n all_numbers[number - 1] += 1\n else:\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) +\n ' number_error_binary.png', image)\n cv2.imwrite('errors/' + str(errorcnt) +\n ' number_error_real.png', dice_image)\n else:\n print('NOT MATCHING FILTERS')\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) + ' matching_error.png',\n image)\n cv2.imwrite('errors/' + str(errorcnt) +\n ' matching_error_real.png', dice_image)\n rolled = [one, two, three, four, five, six]\n std_dev = np.std(rolled)\n all_numbers[6] = errorcnt\n all_numbers[7] = success_rolls\n all_numbers[8] = std_dev\n cv2.imshow('blob detector', img_with_keypoints)\n cv2.imwrite('blob_detector.png', img_with_keypoints)\n return all_numbers\n\n\nstart_time = time.time()\nif dicer_ready is True:\n GPIO.add_event_detect(18, GPIO.FALLING, callback=interr, bouncetime=200)\n print('Starting...')\nwhile dicer_ready is True:\n if gpios:\n for i in range(3200):\n steptime = global_steptime\n step_plus(steptime)\n time.sleep(0.6)\n position_correct = False\n real_image, pos_img = get_images()\n while position_correct is not True and gpios is True:\n real_image, pos_img = get_images()\n M = cv2.moments(pos_img)\n if M['m00'] != 0:\n cX = int(M['m10'] / M['m00'])\n else:\n cX = 0\n if cX < 115:\n step_minus(global_steptime)\n elif cX > 135:\n step_plus(global_steptime)\n else:\n position_correct = True\n processed_img = img_processing(real_image)\n numbers = counting(processed_img, all_numbers, real_image, raw_numbers_name\n )\n cv2.imshow('Hold Q to exit', real_image)\n ctime = clock(start_time)\n if numbers[7] % 10 == 0:\n logging(numbers, ctime, log_name)\n if send_email is True and numbers[7] % email_log_number == 0:\n write_email(numbers, ctime, 0, email_header)\n print('=================')\n print('Time: ' + str(ctime))\n print('One: ', numbers[0])\n print('Two: ', numbers[1])\n print('Three: ', numbers[2])\n print('Four: ', numbers[3])\n print('Five: ', numbers[4])\n print('Six: ', numbers[5])\n print('Errors: ', numbers[6])\n print('Success rolls: ', numbers[7])\n print('Deviation: ', numbers[8])\n print('=================')\n if numbers[7] == measures:\n break\n if cv2.waitKey(200) & 255 == ord('q'):\n break\nif interrupted == True:\n write_email(numbers, ctime, 1, email_header)\nelif dicer_ready == True and send_email == True:\n write_email(numbers, ctime, 0, email_header)\ncap.release()\ncv2.destroyAllWindows()\nprint('Everything finished')\n",
"<import token>\n<assignment token>\ntry:\n import RPi.GPIO as GPIO\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(17, GPIO.OUT)\n GPIO.setup(4, GPIO.OUT)\n GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)\nexcept ImportError:\n gpios = False\n print('WARNING - no GPIOS found')\n<assignment token>\nprint('Setting up...')\n<assignment token>\nif ret is not True:\n dicer_ready = False\n grey = cv2.imread('dummy_image.png', 0)\n cv2.putText(grey, 'NO CAMERA', (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 2, cv2.LINE_AA)\n pos_img = np.zeros(shape=[100, 100, 1], dtype=np.uint8)\n cv2.imshow('Press any key to exit', grey)\n print('Error - stopping')\n cv2.waitKey()\nelif GPIO.input(18) == 0 and gpios == True:\n print('Temperature relay is offline, stopping')\nelse:\n dicer_ready = True\n<assignment token>\n\n\ndef interr(channel):\n global gpios\n global dicer_ready\n global interrupted\n gpios = False\n dicer_ready = False\n interrupted = True\n print('Interrupt')\n\n\ndef step_plus(steptime):\n GPIO.output(17, GPIO.LOW)\n GPIO.output(4, GPIO.HIGH)\n time.sleep(steptime)\n GPIO.output(4, GPIO.LOW)\n time.sleep(steptime)\n\n\ndef step_minus(steptime):\n GPIO.output(17, GPIO.HIGH)\n GPIO.output(4, GPIO.HIGH)\n time.sleep(steptime)\n GPIO.output(4, GPIO.LOW)\n time.sleep(steptime)\n GPIO.output(17, GPIO.LOW)\n\n\ndef clock(now):\n time_seconds = int(time.time() - now)\n t_hr = int(time_seconds / 3600)\n t_min = int(time_seconds / 60) - t_hr * 60\n t_sec = int(time_seconds) - t_min * 60\n showTime = str(t_hr) + ':' + str(t_min).zfill(2)\n print(showTime)\n return showTime\n\n\ndef write_email(numbers, ctime, error, header_name):\n server = smtplib.SMTP('SERVERADRESSE', PORTNR)\n server.starttls()\n server.login('LOGIN-BENUTZERNAME', 'PASSWORT')\n msg = MIMEMultipart()\n msg['From'] = 'ABSENDER'\n msg['To'] = 'EMPFAENGER'\n if error:\n msg['Subject'] = 'Error'\n else:\n msg['Cc'] = 'KOPIE ADRESSE'\n msg['Subject'] = header_name\n message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]\n ) + ',' + str(numbers[3]) + ',' + str(numbers[4]) + ',' + str(numbers\n [5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(numbers[7]\n ) + '\\n' + 'Zeit: ' + str(ctime)\n msg.attach(MIMEText(message))\n server.send_message(msg)\n\n\ndef logging(numbers, ctime, log_name):\n file = open(log_name, 'w')\n file.write('Einz:' + str(numbers[0]) + '\\n')\n file.write('Zwei:' + str(numbers[1]) + '\\n')\n file.write('Drei: ' + str(numbers[2]) + '\\n')\n file.write('Vier: ' + str(numbers[3]) + '\\n')\n file.write('Fuenf: ' + str(numbers[4]) + '\\n')\n file.write('Sechs: ' + str(numbers[5]) + '\\n')\n file.write('Fehler: ' + str(numbers[6]) + '\\n')\n file.write('Gesamt: ' + str(numbers[7]) + '\\n')\n file.write('Standardabw: ' + str(numbers[8]) + '\\n')\n file.write('Zeit: ' + str(ctime) + '\\n')\n file.close()\n\n\ndef get_images():\n for i in range(5):\n ret, frame = cap.read()\n y = 160\n h = 240\n x = 220\n w = 240\n dice_image = frame[y:y + h, x:x + w]\n grey = cv2.cvtColor(dice_image, cv2.COLOR_BGR2GRAY)\n y = 120\n h = 15\n pos_img = frame[y:y + h, x:x + w]\n pos_img = cv2.cvtColor(pos_img, cv2.COLOR_BGR2GRAY)\n ret, pos_img = cv2.threshold(pos_img, 245, 255, cv2.THRESH_BINARY)\n return grey, pos_img\n\n\ndef hough_detector(input_img):\n img = cv2.medianBlur(input_img, 5)\n cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200,\n param2=10, minRadius=5, maxRadius=25)\n h_number = 0\n try:\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n h_number += 1\n except:\n print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')\n cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (0, 255, 50), 2, cv2.LINE_AA)\n cv2.imshow('hough detector', cimg)\n cv2.imwrite('hough detector.png', cimg)\n return h_number\n\n\ndef img_processing(image_input):\n image_input = cv2.medianBlur(image_input, 3)\n ret, binary_image = cv2.threshold(image_input, 220, 255, cv2.THRESH_BINARY)\n if darknumbers:\n w = binary_image.shape[1]\n h = binary_image.shape[0]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (0, 0), 255)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (h, w), 255)\n else:\n binary_image = cv2.bitwise_not(binary_image)\n kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1,\n 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1,\n 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, \n 0, 0, 0]], dtype=np.uint8)\n dilate = cv2.dilate(binary_image, kernel_round, iterations=3)\n erode = cv2.erode(dilate, kernel_round, iterations=2)\n return erode\n\n\ndef counting(image, all_numbers, dice_image, raw_numbers_name):\n one = all_numbers[0]\n two = all_numbers[1]\n three = all_numbers[2]\n four = all_numbers[3]\n five = all_numbers[4]\n six = all_numbers[5]\n errorcnt = all_numbers[6]\n success_rolls = all_numbers[7]\n detector = cv2.SimpleBlobDetector_create(blob_params)\n keypoints = detector.detect(image)\n img_with_keypoints = cv2.drawKeypoints(image, keypoints, np.array([]),\n (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n blob_number = 0\n for i in keypoints[0:]:\n blob_number = blob_number + 1\n cv2.putText(img_with_keypoints, str(blob_number), (10, 200), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\n hough_number = hough_detector(image)\n if blob_number == hough_number:\n number = blob_number\n print('DETECTED: ', number)\n if blob_number > 0 and blob_number < 7:\n raw_log = open(raw_numbers_name, 'a')\n raw_log.write(str(number) + '\\n')\n raw_log.close()\n success_rolls += 1\n all_numbers[number - 1] += 1\n else:\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) +\n ' number_error_binary.png', image)\n cv2.imwrite('errors/' + str(errorcnt) +\n ' number_error_real.png', dice_image)\n else:\n print('NOT MATCHING FILTERS')\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) + ' matching_error.png',\n image)\n cv2.imwrite('errors/' + str(errorcnt) +\n ' matching_error_real.png', dice_image)\n rolled = [one, two, three, four, five, six]\n std_dev = np.std(rolled)\n all_numbers[6] = errorcnt\n all_numbers[7] = success_rolls\n all_numbers[8] = std_dev\n cv2.imshow('blob detector', img_with_keypoints)\n cv2.imwrite('blob_detector.png', img_with_keypoints)\n return all_numbers\n\n\n<assignment token>\nif dicer_ready is True:\n GPIO.add_event_detect(18, GPIO.FALLING, callback=interr, bouncetime=200)\n print('Starting...')\nwhile dicer_ready is True:\n if gpios:\n for i in range(3200):\n steptime = global_steptime\n step_plus(steptime)\n time.sleep(0.6)\n position_correct = False\n real_image, pos_img = get_images()\n while position_correct is not True and gpios is True:\n real_image, pos_img = get_images()\n M = cv2.moments(pos_img)\n if M['m00'] != 0:\n cX = int(M['m10'] / M['m00'])\n else:\n cX = 0\n if cX < 115:\n step_minus(global_steptime)\n elif cX > 135:\n step_plus(global_steptime)\n else:\n position_correct = True\n processed_img = img_processing(real_image)\n numbers = counting(processed_img, all_numbers, real_image, raw_numbers_name\n )\n cv2.imshow('Hold Q to exit', real_image)\n ctime = clock(start_time)\n if numbers[7] % 10 == 0:\n logging(numbers, ctime, log_name)\n if send_email is True and numbers[7] % email_log_number == 0:\n write_email(numbers, ctime, 0, email_header)\n print('=================')\n print('Time: ' + str(ctime))\n print('One: ', numbers[0])\n print('Two: ', numbers[1])\n print('Three: ', numbers[2])\n print('Four: ', numbers[3])\n print('Five: ', numbers[4])\n print('Six: ', numbers[5])\n print('Errors: ', numbers[6])\n print('Success rolls: ', numbers[7])\n print('Deviation: ', numbers[8])\n print('=================')\n if numbers[7] == measures:\n break\n if cv2.waitKey(200) & 255 == ord('q'):\n break\nif interrupted == True:\n write_email(numbers, ctime, 1, email_header)\nelif dicer_ready == True and send_email == True:\n write_email(numbers, ctime, 0, email_header)\ncap.release()\ncv2.destroyAllWindows()\nprint('Everything finished')\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef interr(channel):\n global gpios\n global dicer_ready\n global interrupted\n gpios = False\n dicer_ready = False\n interrupted = True\n print('Interrupt')\n\n\ndef step_plus(steptime):\n GPIO.output(17, GPIO.LOW)\n GPIO.output(4, GPIO.HIGH)\n time.sleep(steptime)\n GPIO.output(4, GPIO.LOW)\n time.sleep(steptime)\n\n\ndef step_minus(steptime):\n GPIO.output(17, GPIO.HIGH)\n GPIO.output(4, GPIO.HIGH)\n time.sleep(steptime)\n GPIO.output(4, GPIO.LOW)\n time.sleep(steptime)\n GPIO.output(17, GPIO.LOW)\n\n\ndef clock(now):\n time_seconds = int(time.time() - now)\n t_hr = int(time_seconds / 3600)\n t_min = int(time_seconds / 60) - t_hr * 60\n t_sec = int(time_seconds) - t_min * 60\n showTime = str(t_hr) + ':' + str(t_min).zfill(2)\n print(showTime)\n return showTime\n\n\ndef write_email(numbers, ctime, error, header_name):\n server = smtplib.SMTP('SERVERADRESSE', PORTNR)\n server.starttls()\n server.login('LOGIN-BENUTZERNAME', 'PASSWORT')\n msg = MIMEMultipart()\n msg['From'] = 'ABSENDER'\n msg['To'] = 'EMPFAENGER'\n if error:\n msg['Subject'] = 'Error'\n else:\n msg['Cc'] = 'KOPIE ADRESSE'\n msg['Subject'] = header_name\n message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]\n ) + ',' + str(numbers[3]) + ',' + str(numbers[4]) + ',' + str(numbers\n [5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(numbers[7]\n ) + '\\n' + 'Zeit: ' + str(ctime)\n msg.attach(MIMEText(message))\n server.send_message(msg)\n\n\ndef logging(numbers, ctime, log_name):\n file = open(log_name, 'w')\n file.write('Einz:' + str(numbers[0]) + '\\n')\n file.write('Zwei:' + str(numbers[1]) + '\\n')\n file.write('Drei: ' + str(numbers[2]) + '\\n')\n file.write('Vier: ' + str(numbers[3]) + '\\n')\n file.write('Fuenf: ' + str(numbers[4]) + '\\n')\n file.write('Sechs: ' + str(numbers[5]) + '\\n')\n file.write('Fehler: ' + str(numbers[6]) + '\\n')\n file.write('Gesamt: ' + str(numbers[7]) + '\\n')\n file.write('Standardabw: ' + str(numbers[8]) + '\\n')\n file.write('Zeit: ' + str(ctime) + '\\n')\n file.close()\n\n\ndef get_images():\n for i in range(5):\n ret, frame = cap.read()\n y = 160\n h = 240\n x = 220\n w = 240\n dice_image = frame[y:y + h, x:x + w]\n grey = cv2.cvtColor(dice_image, cv2.COLOR_BGR2GRAY)\n y = 120\n h = 15\n pos_img = frame[y:y + h, x:x + w]\n pos_img = cv2.cvtColor(pos_img, cv2.COLOR_BGR2GRAY)\n ret, pos_img = cv2.threshold(pos_img, 245, 255, cv2.THRESH_BINARY)\n return grey, pos_img\n\n\ndef hough_detector(input_img):\n img = cv2.medianBlur(input_img, 5)\n cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200,\n param2=10, minRadius=5, maxRadius=25)\n h_number = 0\n try:\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n h_number += 1\n except:\n print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')\n cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (0, 255, 50), 2, cv2.LINE_AA)\n cv2.imshow('hough detector', cimg)\n cv2.imwrite('hough detector.png', cimg)\n return h_number\n\n\ndef img_processing(image_input):\n image_input = cv2.medianBlur(image_input, 3)\n ret, binary_image = cv2.threshold(image_input, 220, 255, cv2.THRESH_BINARY)\n if darknumbers:\n w = binary_image.shape[1]\n h = binary_image.shape[0]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (0, 0), 255)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (h, w), 255)\n else:\n binary_image = cv2.bitwise_not(binary_image)\n kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1,\n 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1,\n 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, \n 0, 0, 0]], dtype=np.uint8)\n dilate = cv2.dilate(binary_image, kernel_round, iterations=3)\n erode = cv2.erode(dilate, kernel_round, iterations=2)\n return erode\n\n\ndef counting(image, all_numbers, dice_image, raw_numbers_name):\n one = all_numbers[0]\n two = all_numbers[1]\n three = all_numbers[2]\n four = all_numbers[3]\n five = all_numbers[4]\n six = all_numbers[5]\n errorcnt = all_numbers[6]\n success_rolls = all_numbers[7]\n detector = cv2.SimpleBlobDetector_create(blob_params)\n keypoints = detector.detect(image)\n img_with_keypoints = cv2.drawKeypoints(image, keypoints, np.array([]),\n (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n blob_number = 0\n for i in keypoints[0:]:\n blob_number = blob_number + 1\n cv2.putText(img_with_keypoints, str(blob_number), (10, 200), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\n hough_number = hough_detector(image)\n if blob_number == hough_number:\n number = blob_number\n print('DETECTED: ', number)\n if blob_number > 0 and blob_number < 7:\n raw_log = open(raw_numbers_name, 'a')\n raw_log.write(str(number) + '\\n')\n raw_log.close()\n success_rolls += 1\n all_numbers[number - 1] += 1\n else:\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) +\n ' number_error_binary.png', image)\n cv2.imwrite('errors/' + str(errorcnt) +\n ' number_error_real.png', dice_image)\n else:\n print('NOT MATCHING FILTERS')\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) + ' matching_error.png',\n image)\n cv2.imwrite('errors/' + str(errorcnt) +\n ' matching_error_real.png', dice_image)\n rolled = [one, two, three, four, five, six]\n std_dev = np.std(rolled)\n all_numbers[6] = errorcnt\n all_numbers[7] = success_rolls\n all_numbers[8] = std_dev\n cv2.imshow('blob detector', img_with_keypoints)\n cv2.imwrite('blob_detector.png', img_with_keypoints)\n return all_numbers\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef interr(channel):\n global gpios\n global dicer_ready\n global interrupted\n gpios = False\n dicer_ready = False\n interrupted = True\n print('Interrupt')\n\n\ndef step_plus(steptime):\n GPIO.output(17, GPIO.LOW)\n GPIO.output(4, GPIO.HIGH)\n time.sleep(steptime)\n GPIO.output(4, GPIO.LOW)\n time.sleep(steptime)\n\n\n<function token>\n\n\ndef clock(now):\n time_seconds = int(time.time() - now)\n t_hr = int(time_seconds / 3600)\n t_min = int(time_seconds / 60) - t_hr * 60\n t_sec = int(time_seconds) - t_min * 60\n showTime = str(t_hr) + ':' + str(t_min).zfill(2)\n print(showTime)\n return showTime\n\n\ndef write_email(numbers, ctime, error, header_name):\n server = smtplib.SMTP('SERVERADRESSE', PORTNR)\n server.starttls()\n server.login('LOGIN-BENUTZERNAME', 'PASSWORT')\n msg = MIMEMultipart()\n msg['From'] = 'ABSENDER'\n msg['To'] = 'EMPFAENGER'\n if error:\n msg['Subject'] = 'Error'\n else:\n msg['Cc'] = 'KOPIE ADRESSE'\n msg['Subject'] = header_name\n message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]\n ) + ',' + str(numbers[3]) + ',' + str(numbers[4]) + ',' + str(numbers\n [5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(numbers[7]\n ) + '\\n' + 'Zeit: ' + str(ctime)\n msg.attach(MIMEText(message))\n server.send_message(msg)\n\n\ndef logging(numbers, ctime, log_name):\n file = open(log_name, 'w')\n file.write('Einz:' + str(numbers[0]) + '\\n')\n file.write('Zwei:' + str(numbers[1]) + '\\n')\n file.write('Drei: ' + str(numbers[2]) + '\\n')\n file.write('Vier: ' + str(numbers[3]) + '\\n')\n file.write('Fuenf: ' + str(numbers[4]) + '\\n')\n file.write('Sechs: ' + str(numbers[5]) + '\\n')\n file.write('Fehler: ' + str(numbers[6]) + '\\n')\n file.write('Gesamt: ' + str(numbers[7]) + '\\n')\n file.write('Standardabw: ' + str(numbers[8]) + '\\n')\n file.write('Zeit: ' + str(ctime) + '\\n')\n file.close()\n\n\ndef get_images():\n for i in range(5):\n ret, frame = cap.read()\n y = 160\n h = 240\n x = 220\n w = 240\n dice_image = frame[y:y + h, x:x + w]\n grey = cv2.cvtColor(dice_image, cv2.COLOR_BGR2GRAY)\n y = 120\n h = 15\n pos_img = frame[y:y + h, x:x + w]\n pos_img = cv2.cvtColor(pos_img, cv2.COLOR_BGR2GRAY)\n ret, pos_img = cv2.threshold(pos_img, 245, 255, cv2.THRESH_BINARY)\n return grey, pos_img\n\n\ndef hough_detector(input_img):\n img = cv2.medianBlur(input_img, 5)\n cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200,\n param2=10, minRadius=5, maxRadius=25)\n h_number = 0\n try:\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n h_number += 1\n except:\n print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')\n cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (0, 255, 50), 2, cv2.LINE_AA)\n cv2.imshow('hough detector', cimg)\n cv2.imwrite('hough detector.png', cimg)\n return h_number\n\n\ndef img_processing(image_input):\n image_input = cv2.medianBlur(image_input, 3)\n ret, binary_image = cv2.threshold(image_input, 220, 255, cv2.THRESH_BINARY)\n if darknumbers:\n w = binary_image.shape[1]\n h = binary_image.shape[0]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (0, 0), 255)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (h, w), 255)\n else:\n binary_image = cv2.bitwise_not(binary_image)\n kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1,\n 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1,\n 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, \n 0, 0, 0]], dtype=np.uint8)\n dilate = cv2.dilate(binary_image, kernel_round, iterations=3)\n erode = cv2.erode(dilate, kernel_round, iterations=2)\n return erode\n\n\ndef counting(image, all_numbers, dice_image, raw_numbers_name):\n one = all_numbers[0]\n two = all_numbers[1]\n three = all_numbers[2]\n four = all_numbers[3]\n five = all_numbers[4]\n six = all_numbers[5]\n errorcnt = all_numbers[6]\n success_rolls = all_numbers[7]\n detector = cv2.SimpleBlobDetector_create(blob_params)\n keypoints = detector.detect(image)\n img_with_keypoints = cv2.drawKeypoints(image, keypoints, np.array([]),\n (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n blob_number = 0\n for i in keypoints[0:]:\n blob_number = blob_number + 1\n cv2.putText(img_with_keypoints, str(blob_number), (10, 200), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\n hough_number = hough_detector(image)\n if blob_number == hough_number:\n number = blob_number\n print('DETECTED: ', number)\n if blob_number > 0 and blob_number < 7:\n raw_log = open(raw_numbers_name, 'a')\n raw_log.write(str(number) + '\\n')\n raw_log.close()\n success_rolls += 1\n all_numbers[number - 1] += 1\n else:\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) +\n ' number_error_binary.png', image)\n cv2.imwrite('errors/' + str(errorcnt) +\n ' number_error_real.png', dice_image)\n else:\n print('NOT MATCHING FILTERS')\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) + ' matching_error.png',\n image)\n cv2.imwrite('errors/' + str(errorcnt) +\n ' matching_error_real.png', dice_image)\n rolled = [one, two, three, four, five, six]\n std_dev = np.std(rolled)\n all_numbers[6] = errorcnt\n all_numbers[7] = success_rolls\n all_numbers[8] = std_dev\n cv2.imshow('blob detector', img_with_keypoints)\n cv2.imwrite('blob_detector.png', img_with_keypoints)\n return all_numbers\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef interr(channel):\n global gpios\n global dicer_ready\n global interrupted\n gpios = False\n dicer_ready = False\n interrupted = True\n print('Interrupt')\n\n\ndef step_plus(steptime):\n GPIO.output(17, GPIO.LOW)\n GPIO.output(4, GPIO.HIGH)\n time.sleep(steptime)\n GPIO.output(4, GPIO.LOW)\n time.sleep(steptime)\n\n\n<function token>\n\n\ndef clock(now):\n time_seconds = int(time.time() - now)\n t_hr = int(time_seconds / 3600)\n t_min = int(time_seconds / 60) - t_hr * 60\n t_sec = int(time_seconds) - t_min * 60\n showTime = str(t_hr) + ':' + str(t_min).zfill(2)\n print(showTime)\n return showTime\n\n\ndef write_email(numbers, ctime, error, header_name):\n server = smtplib.SMTP('SERVERADRESSE', PORTNR)\n server.starttls()\n server.login('LOGIN-BENUTZERNAME', 'PASSWORT')\n msg = MIMEMultipart()\n msg['From'] = 'ABSENDER'\n msg['To'] = 'EMPFAENGER'\n if error:\n msg['Subject'] = 'Error'\n else:\n msg['Cc'] = 'KOPIE ADRESSE'\n msg['Subject'] = header_name\n message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]\n ) + ',' + str(numbers[3]) + ',' + str(numbers[4]) + ',' + str(numbers\n [5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(numbers[7]\n ) + '\\n' + 'Zeit: ' + str(ctime)\n msg.attach(MIMEText(message))\n server.send_message(msg)\n\n\n<function token>\n\n\ndef get_images():\n for i in range(5):\n ret, frame = cap.read()\n y = 160\n h = 240\n x = 220\n w = 240\n dice_image = frame[y:y + h, x:x + w]\n grey = cv2.cvtColor(dice_image, cv2.COLOR_BGR2GRAY)\n y = 120\n h = 15\n pos_img = frame[y:y + h, x:x + w]\n pos_img = cv2.cvtColor(pos_img, cv2.COLOR_BGR2GRAY)\n ret, pos_img = cv2.threshold(pos_img, 245, 255, cv2.THRESH_BINARY)\n return grey, pos_img\n\n\ndef hough_detector(input_img):\n img = cv2.medianBlur(input_img, 5)\n cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200,\n param2=10, minRadius=5, maxRadius=25)\n h_number = 0\n try:\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n h_number += 1\n except:\n print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')\n cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (0, 255, 50), 2, cv2.LINE_AA)\n cv2.imshow('hough detector', cimg)\n cv2.imwrite('hough detector.png', cimg)\n return h_number\n\n\ndef img_processing(image_input):\n image_input = cv2.medianBlur(image_input, 3)\n ret, binary_image = cv2.threshold(image_input, 220, 255, cv2.THRESH_BINARY)\n if darknumbers:\n w = binary_image.shape[1]\n h = binary_image.shape[0]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (0, 0), 255)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (h, w), 255)\n else:\n binary_image = cv2.bitwise_not(binary_image)\n kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1,\n 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1,\n 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, \n 0, 0, 0]], dtype=np.uint8)\n dilate = cv2.dilate(binary_image, kernel_round, iterations=3)\n erode = cv2.erode(dilate, kernel_round, iterations=2)\n return erode\n\n\ndef counting(image, all_numbers, dice_image, raw_numbers_name):\n one = all_numbers[0]\n two = all_numbers[1]\n three = all_numbers[2]\n four = all_numbers[3]\n five = all_numbers[4]\n six = all_numbers[5]\n errorcnt = all_numbers[6]\n success_rolls = all_numbers[7]\n detector = cv2.SimpleBlobDetector_create(blob_params)\n keypoints = detector.detect(image)\n img_with_keypoints = cv2.drawKeypoints(image, keypoints, np.array([]),\n (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n blob_number = 0\n for i in keypoints[0:]:\n blob_number = blob_number + 1\n cv2.putText(img_with_keypoints, str(blob_number), (10, 200), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\n hough_number = hough_detector(image)\n if blob_number == hough_number:\n number = blob_number\n print('DETECTED: ', number)\n if blob_number > 0 and blob_number < 7:\n raw_log = open(raw_numbers_name, 'a')\n raw_log.write(str(number) + '\\n')\n raw_log.close()\n success_rolls += 1\n all_numbers[number - 1] += 1\n else:\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) +\n ' number_error_binary.png', image)\n cv2.imwrite('errors/' + str(errorcnt) +\n ' number_error_real.png', dice_image)\n else:\n print('NOT MATCHING FILTERS')\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) + ' matching_error.png',\n image)\n cv2.imwrite('errors/' + str(errorcnt) +\n ' matching_error_real.png', dice_image)\n rolled = [one, two, three, four, five, six]\n std_dev = np.std(rolled)\n all_numbers[6] = errorcnt\n all_numbers[7] = success_rolls\n all_numbers[8] = std_dev\n cv2.imshow('blob detector', img_with_keypoints)\n cv2.imwrite('blob_detector.png', img_with_keypoints)\n return all_numbers\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef interr(channel):\n global gpios\n global dicer_ready\n global interrupted\n gpios = False\n dicer_ready = False\n interrupted = True\n print('Interrupt')\n\n\ndef step_plus(steptime):\n GPIO.output(17, GPIO.LOW)\n GPIO.output(4, GPIO.HIGH)\n time.sleep(steptime)\n GPIO.output(4, GPIO.LOW)\n time.sleep(steptime)\n\n\n<function token>\n\n\ndef clock(now):\n time_seconds = int(time.time() - now)\n t_hr = int(time_seconds / 3600)\n t_min = int(time_seconds / 60) - t_hr * 60\n t_sec = int(time_seconds) - t_min * 60\n showTime = str(t_hr) + ':' + str(t_min).zfill(2)\n print(showTime)\n return showTime\n\n\ndef write_email(numbers, ctime, error, header_name):\n server = smtplib.SMTP('SERVERADRESSE', PORTNR)\n server.starttls()\n server.login('LOGIN-BENUTZERNAME', 'PASSWORT')\n msg = MIMEMultipart()\n msg['From'] = 'ABSENDER'\n msg['To'] = 'EMPFAENGER'\n if error:\n msg['Subject'] = 'Error'\n else:\n msg['Cc'] = 'KOPIE ADRESSE'\n msg['Subject'] = header_name\n message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]\n ) + ',' + str(numbers[3]) + ',' + str(numbers[4]) + ',' + str(numbers\n [5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(numbers[7]\n ) + '\\n' + 'Zeit: ' + str(ctime)\n msg.attach(MIMEText(message))\n server.send_message(msg)\n\n\n<function token>\n<function token>\n\n\ndef hough_detector(input_img):\n img = cv2.medianBlur(input_img, 5)\n cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200,\n param2=10, minRadius=5, maxRadius=25)\n h_number = 0\n try:\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n h_number += 1\n except:\n print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')\n cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (0, 255, 50), 2, cv2.LINE_AA)\n cv2.imshow('hough detector', cimg)\n cv2.imwrite('hough detector.png', cimg)\n return h_number\n\n\ndef img_processing(image_input):\n image_input = cv2.medianBlur(image_input, 3)\n ret, binary_image = cv2.threshold(image_input, 220, 255, cv2.THRESH_BINARY)\n if darknumbers:\n w = binary_image.shape[1]\n h = binary_image.shape[0]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (0, 0), 255)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (h, w), 255)\n else:\n binary_image = cv2.bitwise_not(binary_image)\n kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1,\n 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1,\n 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, \n 0, 0, 0]], dtype=np.uint8)\n dilate = cv2.dilate(binary_image, kernel_round, iterations=3)\n erode = cv2.erode(dilate, kernel_round, iterations=2)\n return erode\n\n\ndef counting(image, all_numbers, dice_image, raw_numbers_name):\n one = all_numbers[0]\n two = all_numbers[1]\n three = all_numbers[2]\n four = all_numbers[3]\n five = all_numbers[4]\n six = all_numbers[5]\n errorcnt = all_numbers[6]\n success_rolls = all_numbers[7]\n detector = cv2.SimpleBlobDetector_create(blob_params)\n keypoints = detector.detect(image)\n img_with_keypoints = cv2.drawKeypoints(image, keypoints, np.array([]),\n (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n blob_number = 0\n for i in keypoints[0:]:\n blob_number = blob_number + 1\n cv2.putText(img_with_keypoints, str(blob_number), (10, 200), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\n hough_number = hough_detector(image)\n if blob_number == hough_number:\n number = blob_number\n print('DETECTED: ', number)\n if blob_number > 0 and blob_number < 7:\n raw_log = open(raw_numbers_name, 'a')\n raw_log.write(str(number) + '\\n')\n raw_log.close()\n success_rolls += 1\n all_numbers[number - 1] += 1\n else:\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) +\n ' number_error_binary.png', image)\n cv2.imwrite('errors/' + str(errorcnt) +\n ' number_error_real.png', dice_image)\n else:\n print('NOT MATCHING FILTERS')\n errorcnt = errorcnt + 1\n if error_logging is True:\n cv2.imwrite('errors/' + str(errorcnt) + ' matching_error.png',\n image)\n cv2.imwrite('errors/' + str(errorcnt) +\n ' matching_error_real.png', dice_image)\n rolled = [one, two, three, four, five, six]\n std_dev = np.std(rolled)\n all_numbers[6] = errorcnt\n all_numbers[7] = success_rolls\n all_numbers[8] = std_dev\n cv2.imshow('blob detector', img_with_keypoints)\n cv2.imwrite('blob_detector.png', img_with_keypoints)\n return all_numbers\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef interr(channel):\n global gpios\n global dicer_ready\n global interrupted\n gpios = False\n dicer_ready = False\n interrupted = True\n print('Interrupt')\n\n\ndef step_plus(steptime):\n GPIO.output(17, GPIO.LOW)\n GPIO.output(4, GPIO.HIGH)\n time.sleep(steptime)\n GPIO.output(4, GPIO.LOW)\n time.sleep(steptime)\n\n\n<function token>\n\n\ndef clock(now):\n time_seconds = int(time.time() - now)\n t_hr = int(time_seconds / 3600)\n t_min = int(time_seconds / 60) - t_hr * 60\n t_sec = int(time_seconds) - t_min * 60\n showTime = str(t_hr) + ':' + str(t_min).zfill(2)\n print(showTime)\n return showTime\n\n\ndef write_email(numbers, ctime, error, header_name):\n server = smtplib.SMTP('SERVERADRESSE', PORTNR)\n server.starttls()\n server.login('LOGIN-BENUTZERNAME', 'PASSWORT')\n msg = MIMEMultipart()\n msg['From'] = 'ABSENDER'\n msg['To'] = 'EMPFAENGER'\n if error:\n msg['Subject'] = 'Error'\n else:\n msg['Cc'] = 'KOPIE ADRESSE'\n msg['Subject'] = header_name\n message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]\n ) + ',' + str(numbers[3]) + ',' + str(numbers[4]) + ',' + str(numbers\n [5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(numbers[7]\n ) + '\\n' + 'Zeit: ' + str(ctime)\n msg.attach(MIMEText(message))\n server.send_message(msg)\n\n\n<function token>\n<function token>\n\n\ndef hough_detector(input_img):\n img = cv2.medianBlur(input_img, 5)\n cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200,\n param2=10, minRadius=5, maxRadius=25)\n h_number = 0\n try:\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n h_number += 1\n except:\n print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')\n cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (0, 255, 50), 2, cv2.LINE_AA)\n cv2.imshow('hough detector', cimg)\n cv2.imwrite('hough detector.png', cimg)\n return h_number\n\n\ndef img_processing(image_input):\n image_input = cv2.medianBlur(image_input, 3)\n ret, binary_image = cv2.threshold(image_input, 220, 255, cv2.THRESH_BINARY)\n if darknumbers:\n w = binary_image.shape[1]\n h = binary_image.shape[0]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (0, 0), 255)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (h, w), 255)\n else:\n binary_image = cv2.bitwise_not(binary_image)\n kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1,\n 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1,\n 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, \n 0, 0, 0]], dtype=np.uint8)\n dilate = cv2.dilate(binary_image, kernel_round, iterations=3)\n erode = cv2.erode(dilate, kernel_round, iterations=2)\n return erode\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef interr(channel):\n global gpios\n global dicer_ready\n global interrupted\n gpios = False\n dicer_ready = False\n interrupted = True\n print('Interrupt')\n\n\n<function token>\n<function token>\n\n\ndef clock(now):\n time_seconds = int(time.time() - now)\n t_hr = int(time_seconds / 3600)\n t_min = int(time_seconds / 60) - t_hr * 60\n t_sec = int(time_seconds) - t_min * 60\n showTime = str(t_hr) + ':' + str(t_min).zfill(2)\n print(showTime)\n return showTime\n\n\ndef write_email(numbers, ctime, error, header_name):\n server = smtplib.SMTP('SERVERADRESSE', PORTNR)\n server.starttls()\n server.login('LOGIN-BENUTZERNAME', 'PASSWORT')\n msg = MIMEMultipart()\n msg['From'] = 'ABSENDER'\n msg['To'] = 'EMPFAENGER'\n if error:\n msg['Subject'] = 'Error'\n else:\n msg['Cc'] = 'KOPIE ADRESSE'\n msg['Subject'] = header_name\n message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]\n ) + ',' + str(numbers[3]) + ',' + str(numbers[4]) + ',' + str(numbers\n [5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(numbers[7]\n ) + '\\n' + 'Zeit: ' + str(ctime)\n msg.attach(MIMEText(message))\n server.send_message(msg)\n\n\n<function token>\n<function token>\n\n\ndef hough_detector(input_img):\n img = cv2.medianBlur(input_img, 5)\n cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=200,\n param2=10, minRadius=5, maxRadius=25)\n h_number = 0\n try:\n circles = np.uint16(np.around(circles))\n for i in circles[0, :]:\n cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)\n h_number += 1\n except:\n print('HOUGH DETECTOR ERROR, NO CIRCLES FOUND')\n cv2.putText(cimg, str(h_number), (10, 200), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (0, 255, 50), 2, cv2.LINE_AA)\n cv2.imshow('hough detector', cimg)\n cv2.imwrite('hough detector.png', cimg)\n return h_number\n\n\ndef img_processing(image_input):\n image_input = cv2.medianBlur(image_input, 3)\n ret, binary_image = cv2.threshold(image_input, 220, 255, cv2.THRESH_BINARY)\n if darknumbers:\n w = binary_image.shape[1]\n h = binary_image.shape[0]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (0, 0), 255)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (h, w), 255)\n else:\n binary_image = cv2.bitwise_not(binary_image)\n kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1,\n 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1,\n 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, \n 0, 0, 0]], dtype=np.uint8)\n dilate = cv2.dilate(binary_image, kernel_round, iterations=3)\n erode = cv2.erode(dilate, kernel_round, iterations=2)\n return erode\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef interr(channel):\n global gpios\n global dicer_ready\n global interrupted\n gpios = False\n dicer_ready = False\n interrupted = True\n print('Interrupt')\n\n\n<function token>\n<function token>\n\n\ndef clock(now):\n time_seconds = int(time.time() - now)\n t_hr = int(time_seconds / 3600)\n t_min = int(time_seconds / 60) - t_hr * 60\n t_sec = int(time_seconds) - t_min * 60\n showTime = str(t_hr) + ':' + str(t_min).zfill(2)\n print(showTime)\n return showTime\n\n\ndef write_email(numbers, ctime, error, header_name):\n server = smtplib.SMTP('SERVERADRESSE', PORTNR)\n server.starttls()\n server.login('LOGIN-BENUTZERNAME', 'PASSWORT')\n msg = MIMEMultipart()\n msg['From'] = 'ABSENDER'\n msg['To'] = 'EMPFAENGER'\n if error:\n msg['Subject'] = 'Error'\n else:\n msg['Cc'] = 'KOPIE ADRESSE'\n msg['Subject'] = header_name\n message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]\n ) + ',' + str(numbers[3]) + ',' + str(numbers[4]) + ',' + str(numbers\n [5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(numbers[7]\n ) + '\\n' + 'Zeit: ' + str(ctime)\n msg.attach(MIMEText(message))\n server.send_message(msg)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef img_processing(image_input):\n image_input = cv2.medianBlur(image_input, 3)\n ret, binary_image = cv2.threshold(image_input, 220, 255, cv2.THRESH_BINARY)\n if darknumbers:\n w = binary_image.shape[1]\n h = binary_image.shape[0]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (0, 0), 255)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (h, w), 255)\n else:\n binary_image = cv2.bitwise_not(binary_image)\n kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1,\n 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1,\n 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, \n 0, 0, 0]], dtype=np.uint8)\n dilate = cv2.dilate(binary_image, kernel_round, iterations=3)\n erode = cv2.erode(dilate, kernel_round, iterations=2)\n return erode\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef clock(now):\n time_seconds = int(time.time() - now)\n t_hr = int(time_seconds / 3600)\n t_min = int(time_seconds / 60) - t_hr * 60\n t_sec = int(time_seconds) - t_min * 60\n showTime = str(t_hr) + ':' + str(t_min).zfill(2)\n print(showTime)\n return showTime\n\n\ndef write_email(numbers, ctime, error, header_name):\n server = smtplib.SMTP('SERVERADRESSE', PORTNR)\n server.starttls()\n server.login('LOGIN-BENUTZERNAME', 'PASSWORT')\n msg = MIMEMultipart()\n msg['From'] = 'ABSENDER'\n msg['To'] = 'EMPFAENGER'\n if error:\n msg['Subject'] = 'Error'\n else:\n msg['Cc'] = 'KOPIE ADRESSE'\n msg['Subject'] = header_name\n message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]\n ) + ',' + str(numbers[3]) + ',' + str(numbers[4]) + ',' + str(numbers\n [5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(numbers[7]\n ) + '\\n' + 'Zeit: ' + str(ctime)\n msg.attach(MIMEText(message))\n server.send_message(msg)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef img_processing(image_input):\n image_input = cv2.medianBlur(image_input, 3)\n ret, binary_image = cv2.threshold(image_input, 220, 255, cv2.THRESH_BINARY)\n if darknumbers:\n w = binary_image.shape[1]\n h = binary_image.shape[0]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (0, 0), 255)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (h, w), 255)\n else:\n binary_image = cv2.bitwise_not(binary_image)\n kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1,\n 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1,\n 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, \n 0, 0, 0]], dtype=np.uint8)\n dilate = cv2.dilate(binary_image, kernel_round, iterations=3)\n erode = cv2.erode(dilate, kernel_round, iterations=2)\n return erode\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef write_email(numbers, ctime, error, header_name):\n server = smtplib.SMTP('SERVERADRESSE', PORTNR)\n server.starttls()\n server.login('LOGIN-BENUTZERNAME', 'PASSWORT')\n msg = MIMEMultipart()\n msg['From'] = 'ABSENDER'\n msg['To'] = 'EMPFAENGER'\n if error:\n msg['Subject'] = 'Error'\n else:\n msg['Cc'] = 'KOPIE ADRESSE'\n msg['Subject'] = header_name\n message = str(numbers[0]) + ',' + str(numbers[1]) + ',' + str(numbers[2]\n ) + ',' + str(numbers[3]) + ',' + str(numbers[4]) + ',' + str(numbers\n [5]) + ' Err: ' + str(numbers[6]) + ' All: ' + str(numbers[7]\n ) + '\\n' + 'Zeit: ' + str(ctime)\n msg.attach(MIMEText(message))\n server.send_message(msg)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef img_processing(image_input):\n image_input = cv2.medianBlur(image_input, 3)\n ret, binary_image = cv2.threshold(image_input, 220, 255, cv2.THRESH_BINARY)\n if darknumbers:\n w = binary_image.shape[1]\n h = binary_image.shape[0]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (0, 0), 255)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (h, w), 255)\n else:\n binary_image = cv2.bitwise_not(binary_image)\n kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1,\n 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1,\n 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, \n 0, 0, 0]], dtype=np.uint8)\n dilate = cv2.dilate(binary_image, kernel_round, iterations=3)\n erode = cv2.erode(dilate, kernel_round, iterations=2)\n return erode\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef img_processing(image_input):\n image_input = cv2.medianBlur(image_input, 3)\n ret, binary_image = cv2.threshold(image_input, 220, 255, cv2.THRESH_BINARY)\n if darknumbers:\n w = binary_image.shape[1]\n h = binary_image.shape[0]\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (0, 0), 255)\n mask = np.zeros((h + 2, w + 2), np.uint8)\n cv2.floodFill(binary_image, mask, (h, w), 255)\n else:\n binary_image = cv2.bitwise_not(binary_image)\n kernel_round = np.array([[0, 0, 0, 1, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 1,\n 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1,\n 1, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 1, \n 0, 0, 0]], dtype=np.uint8)\n dilate = cv2.dilate(binary_image, kernel_round, iterations=3)\n erode = cv2.erode(dilate, kernel_round, iterations=2)\n return erode\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
98,889 |
1f1cc5409f475a33f5cc3b27ae3016b4d54ed797
|
# Program to extract number
# of rows using Python
import xlrd
import math
# Give the location of the file
loc = (r'F:\Document\article\journal\results\cal\scores.xlsx')
wb = xlrd.open_workbook(loc)
sheet = wb.sheet_by_index(0)
print(sheet.cell_value(1, 0))
total = sheet.cell_value(1,2)
print(total)
i = 1
import numpy as np
from pyitlib import discrete_random_variable as drv
#X = np.array((0,0))
#print(drv.entropy(X))
res = ""
i=1
for i in range(sheet.nrows -2):
j=0
while j<1:
ftop = sheet.cell_value(int(i+1),int(j))
j = j+1
ptof = sheet.cell_value(int(i+1) , int(j))
mu1 =(ftop / total)
mu2 =(ptof / total)
m11 = mu1 * (math.log2(mu1 + 0.0000001))
m22 = mu2 * (math.log2(mu2 + 0.0000001))
muen = (-m11+m22 )
if (muen > 0):
print("******************line number is ***************")
print(i+2)
print("************************************************")
# X = np.array((mu1))
# mu11 = drv . entropy(X)
# Y = np.array((mu2))
# mu22 = drv.entropy(Y)
# muen = mu11 - mu22
res = res + str( muen) +" , "
print(res)
# Extracting number of rows
#print(sheet.nrows)
#print(res)
|
[
"\r\n# Program to extract number\r\n# of rows using Python\r\nimport xlrd\r\nimport math\r\n# Give the location of the file\r\nloc = (r'F:\\Document\\article\\journal\\results\\cal\\scores.xlsx')\r\n\r\nwb = xlrd.open_workbook(loc)\r\nsheet = wb.sheet_by_index(0)\r\nprint(sheet.cell_value(1, 0))\r\n\r\n\r\ntotal = sheet.cell_value(1,2)\r\nprint(total)\r\ni = 1\r\n\r\nimport numpy as np\r\nfrom pyitlib import discrete_random_variable as drv\r\n#X = np.array((0,0))\r\n#print(drv.entropy(X))\r\n\r\nres = \"\"\r\ni=1\r\nfor i in range(sheet.nrows -2):\r\n j=0\r\n while j<1:\r\n\r\n ftop = sheet.cell_value(int(i+1),int(j))\r\n\r\n j = j+1\r\n ptof = sheet.cell_value(int(i+1) , int(j))\r\n mu1 =(ftop / total)\r\n mu2 =(ptof / total)\r\n\r\n m11 = mu1 * (math.log2(mu1 + 0.0000001))\r\n m22 = mu2 * (math.log2(mu2 + 0.0000001))\r\n muen = (-m11+m22 )\r\n if (muen > 0):\r\n print(\"******************line number is ***************\")\r\n print(i+2)\r\n print(\"************************************************\")\r\n # X = np.array((mu1))\r\n # mu11 = drv . entropy(X)\r\n # Y = np.array((mu2))\r\n # mu22 = drv.entropy(Y)\r\n # muen = mu11 - mu22\r\n\r\n\r\n res = res + str( muen) +\" , \"\r\n\r\n\r\nprint(res)\r\n# Extracting number of rows\r\n#print(sheet.nrows)\r\n#print(res)\r\n\r\n\r\n\r\n\r\n\r\n",
"import xlrd\nimport math\nloc = 'F:\\\\Document\\\\article\\\\journal\\\\results\\\\cal\\\\scores.xlsx'\nwb = xlrd.open_workbook(loc)\nsheet = wb.sheet_by_index(0)\nprint(sheet.cell_value(1, 0))\ntotal = sheet.cell_value(1, 2)\nprint(total)\ni = 1\nimport numpy as np\nfrom pyitlib import discrete_random_variable as drv\nres = ''\ni = 1\nfor i in range(sheet.nrows - 2):\n j = 0\n while j < 1:\n ftop = sheet.cell_value(int(i + 1), int(j))\n j = j + 1\n ptof = sheet.cell_value(int(i + 1), int(j))\n mu1 = ftop / total\n mu2 = ptof / total\n m11 = mu1 * math.log2(mu1 + 1e-07)\n m22 = mu2 * math.log2(mu2 + 1e-07)\n muen = -m11 + m22\n if muen > 0:\n print('******************line number is ***************')\n print(i + 2)\n print('************************************************')\n res = res + str(muen) + ' , '\nprint(res)\n",
"<import token>\nloc = 'F:\\\\Document\\\\article\\\\journal\\\\results\\\\cal\\\\scores.xlsx'\nwb = xlrd.open_workbook(loc)\nsheet = wb.sheet_by_index(0)\nprint(sheet.cell_value(1, 0))\ntotal = sheet.cell_value(1, 2)\nprint(total)\ni = 1\n<import token>\nres = ''\ni = 1\nfor i in range(sheet.nrows - 2):\n j = 0\n while j < 1:\n ftop = sheet.cell_value(int(i + 1), int(j))\n j = j + 1\n ptof = sheet.cell_value(int(i + 1), int(j))\n mu1 = ftop / total\n mu2 = ptof / total\n m11 = mu1 * math.log2(mu1 + 1e-07)\n m22 = mu2 * math.log2(mu2 + 1e-07)\n muen = -m11 + m22\n if muen > 0:\n print('******************line number is ***************')\n print(i + 2)\n print('************************************************')\n res = res + str(muen) + ' , '\nprint(res)\n",
"<import token>\n<assignment token>\nprint(sheet.cell_value(1, 0))\n<assignment token>\nprint(total)\n<assignment token>\n<import token>\n<assignment token>\nfor i in range(sheet.nrows - 2):\n j = 0\n while j < 1:\n ftop = sheet.cell_value(int(i + 1), int(j))\n j = j + 1\n ptof = sheet.cell_value(int(i + 1), int(j))\n mu1 = ftop / total\n mu2 = ptof / total\n m11 = mu1 * math.log2(mu1 + 1e-07)\n m22 = mu2 * math.log2(mu2 + 1e-07)\n muen = -m11 + m22\n if muen > 0:\n print('******************line number is ***************')\n print(i + 2)\n print('************************************************')\n res = res + str(muen) + ' , '\nprint(res)\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
98,890 |
2093c4a14bc6731c210556b26b3748d1a8f8d5fe
|
print('\nCurso Ciência de Dados - Fundamentos em Python (Modulos e Pacotes)\[email protected]\n')
print('Parte 1:\n')
print('Parte 2:\n')
#---------------------------------
#----- Parte 1 -------------------
print('----------------------------------------------\n---------- Parte 1----------------------------')
import statistics as est
import math
from statistics import mean,median
from statistics import*
z = [10,20,20,40]
x = est.mean(z)
y = est.median(z)
x1 = mean(z)
y1 = median(z)
dp1 = stdev(z)
print('\nConjunto de dados criado: ',z)
print('\nMédia: ',x)
print('\nMediana: ',y)
print('\nMédia: ',x1)
print('\nMediana: ',y1)
print('\nDesvio Padrão Calculado pela função statistics.stdev: ',dp1,' Observe que há diferença do desvio padrão calculado, provavelmente pq a função utiliza algum espaço amostra dessa lista que é pequena\n')
z1 = [zi-mean(z) for zi in z]
print('\nDiferença entre elementos e média da lista: ',z1,'\n')
z_square= [pow(z1i,2) for z1i in z1]
print('\nQuadrado da diferença calculada acima: ',z_square,'\n')
z_total = sum(z_square)
print(z_total/len(z))
print("\nO desvio padrão é:", math.sqrt(z_total/len(z)),'\n')
|
[
"print('\\nCurso Ciência de Dados - Fundamentos em Python (Modulos e Pacotes)\\[email protected]\\n')\nprint('Parte 1:\\n')\nprint('Parte 2:\\n')\n\n\n#---------------------------------\n#----- Parte 1 -------------------\nprint('----------------------------------------------\\n---------- Parte 1----------------------------')\n\nimport statistics as est\nimport math\nfrom statistics import mean,median\nfrom statistics import*\nz = [10,20,20,40]\nx = est.mean(z)\ny = est.median(z)\nx1 = mean(z)\ny1 = median(z)\ndp1 = stdev(z)\nprint('\\nConjunto de dados criado: ',z)\nprint('\\nMédia: ',x)\nprint('\\nMediana: ',y)\nprint('\\nMédia: ',x1)\nprint('\\nMediana: ',y1)\nprint('\\nDesvio Padrão Calculado pela função statistics.stdev: ',dp1,' Observe que há diferença do desvio padrão calculado, provavelmente pq a função utiliza algum espaço amostra dessa lista que é pequena\\n') \nz1 = [zi-mean(z) for zi in z]\nprint('\\nDiferença entre elementos e média da lista: ',z1,'\\n')\nz_square= [pow(z1i,2) for z1i in z1]\nprint('\\nQuadrado da diferença calculada acima: ',z_square,'\\n')\nz_total = sum(z_square)\nprint(z_total/len(z))\nprint(\"\\nO desvio padrão é:\", math.sqrt(z_total/len(z)),'\\n')\n",
"print(\n \"\"\"\nCurso Ciência de Dados - Fundamentos em Python (Modulos e Pacotes)\[email protected]\n\"\"\"\n )\nprint('Parte 1:\\n')\nprint('Parte 2:\\n')\nprint(\n \"\"\"----------------------------------------------\n---------- Parte 1----------------------------\"\"\"\n )\nimport statistics as est\nimport math\nfrom statistics import mean, median\nfrom statistics import *\nz = [10, 20, 20, 40]\nx = est.mean(z)\ny = est.median(z)\nx1 = mean(z)\ny1 = median(z)\ndp1 = stdev(z)\nprint(\"\"\"\nConjunto de dados criado: \"\"\", z)\nprint('\\nMédia: ', x)\nprint('\\nMediana: ', y)\nprint('\\nMédia: ', x1)\nprint('\\nMediana: ', y1)\nprint(\"\"\"\nDesvio Padrão Calculado pela função statistics.stdev: \"\"\", dp1,\n \"\"\" Observe que há diferença do desvio padrão calculado, provavelmente pq a função utiliza algum espaço amostra dessa lista que é pequena\n\"\"\"\n )\nz1 = [(zi - mean(z)) for zi in z]\nprint(\"\"\"\nDiferença entre elementos e média da lista: \"\"\", z1, '\\n')\nz_square = [pow(z1i, 2) for z1i in z1]\nprint(\"\"\"\nQuadrado da diferença calculada acima: \"\"\", z_square, '\\n')\nz_total = sum(z_square)\nprint(z_total / len(z))\nprint(\"\"\"\nO desvio padrão é:\"\"\", math.sqrt(z_total / len(z)), '\\n')\n",
"print(\n \"\"\"\nCurso Ciência de Dados - Fundamentos em Python (Modulos e Pacotes)\[email protected]\n\"\"\"\n )\nprint('Parte 1:\\n')\nprint('Parte 2:\\n')\nprint(\n \"\"\"----------------------------------------------\n---------- Parte 1----------------------------\"\"\"\n )\n<import token>\nz = [10, 20, 20, 40]\nx = est.mean(z)\ny = est.median(z)\nx1 = mean(z)\ny1 = median(z)\ndp1 = stdev(z)\nprint(\"\"\"\nConjunto de dados criado: \"\"\", z)\nprint('\\nMédia: ', x)\nprint('\\nMediana: ', y)\nprint('\\nMédia: ', x1)\nprint('\\nMediana: ', y1)\nprint(\"\"\"\nDesvio Padrão Calculado pela função statistics.stdev: \"\"\", dp1,\n \"\"\" Observe que há diferença do desvio padrão calculado, provavelmente pq a função utiliza algum espaço amostra dessa lista que é pequena\n\"\"\"\n )\nz1 = [(zi - mean(z)) for zi in z]\nprint(\"\"\"\nDiferença entre elementos e média da lista: \"\"\", z1, '\\n')\nz_square = [pow(z1i, 2) for z1i in z1]\nprint(\"\"\"\nQuadrado da diferença calculada acima: \"\"\", z_square, '\\n')\nz_total = sum(z_square)\nprint(z_total / len(z))\nprint(\"\"\"\nO desvio padrão é:\"\"\", math.sqrt(z_total / len(z)), '\\n')\n",
"print(\n \"\"\"\nCurso Ciência de Dados - Fundamentos em Python (Modulos e Pacotes)\[email protected]\n\"\"\"\n )\nprint('Parte 1:\\n')\nprint('Parte 2:\\n')\nprint(\n \"\"\"----------------------------------------------\n---------- Parte 1----------------------------\"\"\"\n )\n<import token>\n<assignment token>\nprint(\"\"\"\nConjunto de dados criado: \"\"\", z)\nprint('\\nMédia: ', x)\nprint('\\nMediana: ', y)\nprint('\\nMédia: ', x1)\nprint('\\nMediana: ', y1)\nprint(\"\"\"\nDesvio Padrão Calculado pela função statistics.stdev: \"\"\", dp1,\n \"\"\" Observe que há diferença do desvio padrão calculado, provavelmente pq a função utiliza algum espaço amostra dessa lista que é pequena\n\"\"\"\n )\n<assignment token>\nprint(\"\"\"\nDiferença entre elementos e média da lista: \"\"\", z1, '\\n')\n<assignment token>\nprint(\"\"\"\nQuadrado da diferença calculada acima: \"\"\", z_square, '\\n')\n<assignment token>\nprint(z_total / len(z))\nprint(\"\"\"\nO desvio padrão é:\"\"\", math.sqrt(z_total / len(z)), '\\n')\n",
"<code token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,891 |
e8295931bed5249b10f8b83dd5929a79d2b284df
|
def count_money_combinations(amount, denominations, denom_index=0, num_denom=0):
global count_list
if amount < 0:
return 0
if amount == 0:
count_list.append(num_denom)
return 1
if denom_index > len(denominations) - 1 and amount > 0:
return 0
return count_money_combinations(amount - denominations[denom_index], denominations, denom_index, num_denom + 1) \
+ count_money_combinations(amount, denominations, denom_index + 1, num_denom)
if __name__ == '__main__':
denominations = [1, 5, 10, 20, 50]
amount = 79
count_list = []
_ = count_money_combinations(amount, denominations)
print(count_list)
assert min(count_list) == 7
|
[
"def count_money_combinations(amount, denominations, denom_index=0, num_denom=0):\n global count_list\n\n if amount < 0:\n return 0\n\n if amount == 0:\n count_list.append(num_denom)\n return 1\n\n if denom_index > len(denominations) - 1 and amount > 0:\n return 0\n\n return count_money_combinations(amount - denominations[denom_index], denominations, denom_index, num_denom + 1) \\\n + count_money_combinations(amount, denominations, denom_index + 1, num_denom)\n\n\nif __name__ == '__main__':\n denominations = [1, 5, 10, 20, 50]\n amount = 79\n count_list = []\n _ = count_money_combinations(amount, denominations)\n print(count_list)\n assert min(count_list) == 7",
"def count_money_combinations(amount, denominations, denom_index=0, num_denom=0\n ):\n global count_list\n if amount < 0:\n return 0\n if amount == 0:\n count_list.append(num_denom)\n return 1\n if denom_index > len(denominations) - 1 and amount > 0:\n return 0\n return count_money_combinations(amount - denominations[denom_index],\n denominations, denom_index, num_denom + 1) + count_money_combinations(\n amount, denominations, denom_index + 1, num_denom)\n\n\nif __name__ == '__main__':\n denominations = [1, 5, 10, 20, 50]\n amount = 79\n count_list = []\n _ = count_money_combinations(amount, denominations)\n print(count_list)\n assert min(count_list) == 7\n",
"def count_money_combinations(amount, denominations, denom_index=0, num_denom=0\n ):\n global count_list\n if amount < 0:\n return 0\n if amount == 0:\n count_list.append(num_denom)\n return 1\n if denom_index > len(denominations) - 1 and amount > 0:\n return 0\n return count_money_combinations(amount - denominations[denom_index],\n denominations, denom_index, num_denom + 1) + count_money_combinations(\n amount, denominations, denom_index + 1, num_denom)\n\n\n<code token>\n",
"<function token>\n<code token>\n"
] | false |
98,892 |
a7593e0f21cbdbc84ba6d55dcc55f527744c420f
|
# 다음의 결과와 같이 국어, 영어, 수학 점수를 입력받아 합계를 구하는 객체지향 코드를 작성하십시오.
# 이 때 학생 클래스의 객체는 객체 생성 시 국어, 영어, 수학 점수를 저장하며, 총점을 구하는 메서드를 제공합니다.
# 입력
# 89, 90, 100
# 출력
# 국어, 영어, 수학의 총점: 279
class Student:
def __init__(self, kor, eng, math):
self.__kor = kor
self.__eng = eng
self.__math = math
@property
def kor(self):
return self.__kor
@property
def eng(self):
return self.__eng
@property
def math(self):
return self.__math
def scores(self):
return f"국어, 영어, 수학의 총점: {self.kor + self.eng + self.math}"
students = list(map(int, input().split(", ")))
students_list = Student(students[0], students[1], students[2])
print(students_list.scores())
|
[
"# 다음의 결과와 같이 국어, 영어, 수학 점수를 입력받아 합계를 구하는 객체지향 코드를 작성하십시오.\n# 이 때 학생 클래스의 객체는 객체 생성 시 국어, 영어, 수학 점수를 저장하며, 총점을 구하는 메서드를 제공합니다.\n# 입력\n# 89, 90, 100\n# 출력\n# 국어, 영어, 수학의 총점: 279\n\n\nclass Student:\n def __init__(self, kor, eng, math):\n self.__kor = kor\n self.__eng = eng\n self.__math = math\n\n @property\n def kor(self):\n return self.__kor\n\n @property\n def eng(self):\n return self.__eng\n\n @property\n def math(self):\n return self.__math\n\n def scores(self):\n return f\"국어, 영어, 수학의 총점: {self.kor + self.eng + self.math}\"\n\n\nstudents = list(map(int, input().split(\", \")))\nstudents_list = Student(students[0], students[1], students[2])\nprint(students_list.scores())\n",
"class Student:\n\n def __init__(self, kor, eng, math):\n self.__kor = kor\n self.__eng = eng\n self.__math = math\n\n @property\n def kor(self):\n return self.__kor\n\n @property\n def eng(self):\n return self.__eng\n\n @property\n def math(self):\n return self.__math\n\n def scores(self):\n return f'국어, 영어, 수학의 총점: {self.kor + self.eng + self.math}'\n\n\nstudents = list(map(int, input().split(', ')))\nstudents_list = Student(students[0], students[1], students[2])\nprint(students_list.scores())\n",
"class Student:\n\n def __init__(self, kor, eng, math):\n self.__kor = kor\n self.__eng = eng\n self.__math = math\n\n @property\n def kor(self):\n return self.__kor\n\n @property\n def eng(self):\n return self.__eng\n\n @property\n def math(self):\n return self.__math\n\n def scores(self):\n return f'국어, 영어, 수학의 총점: {self.kor + self.eng + self.math}'\n\n\n<assignment token>\nprint(students_list.scores())\n",
"class Student:\n\n def __init__(self, kor, eng, math):\n self.__kor = kor\n self.__eng = eng\n self.__math = math\n\n @property\n def kor(self):\n return self.__kor\n\n @property\n def eng(self):\n return self.__eng\n\n @property\n def math(self):\n return self.__math\n\n def scores(self):\n return f'국어, 영어, 수학의 총점: {self.kor + self.eng + self.math}'\n\n\n<assignment token>\n<code token>\n",
"class Student:\n\n def __init__(self, kor, eng, math):\n self.__kor = kor\n self.__eng = eng\n self.__math = math\n\n @property\n def kor(self):\n return self.__kor\n <function token>\n\n @property\n def math(self):\n return self.__math\n\n def scores(self):\n return f'국어, 영어, 수학의 총점: {self.kor + self.eng + self.math}'\n\n\n<assignment token>\n<code token>\n",
"class Student:\n\n def __init__(self, kor, eng, math):\n self.__kor = kor\n self.__eng = eng\n self.__math = math\n\n @property\n def kor(self):\n return self.__kor\n <function token>\n\n @property\n def math(self):\n return self.__math\n <function token>\n\n\n<assignment token>\n<code token>\n",
"class Student:\n\n def __init__(self, kor, eng, math):\n self.__kor = kor\n self.__eng = eng\n self.__math = math\n\n @property\n def kor(self):\n return self.__kor\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"class Student:\n <function token>\n\n @property\n def kor(self):\n return self.__kor\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"class Student:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<class token>\n<assignment token>\n<code token>\n"
] | false |
98,893 |
b34cabe9f84b661ea006be6a1f19f50ef24d570b
|
# Generated by Django 3.1.3 on 2020-11-04 01:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stocks', '0003_portfoliosummary'),
]
operations = [
migrations.AddField(
model_name='portfoliosummary',
name='average_cost',
field=models.DecimalField(decimal_places=2, max_digits=12, null=True),
),
migrations.AddField(
model_name='portfoliosummary',
name='current_market_price',
field=models.DecimalField(decimal_places=2, max_digits=12, null=True),
),
migrations.AddField(
model_name='portfoliosummary',
name='total_gain_loss',
field=models.DecimalField(decimal_places=2, max_digits=20, null=True),
),
migrations.AlterField(
model_name='portfoliosummary',
name='book_value',
field=models.DecimalField(decimal_places=2, max_digits=20, null=True),
),
migrations.AlterField(
model_name='portfoliosummary',
name='market_value',
field=models.DecimalField(decimal_places=2, max_digits=20, null=True),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=150, verbose_name='first name'),
),
migrations.AlterModelTable(
name='portfoliosummary',
table='portfolio_summary',
),
]
|
[
"# Generated by Django 3.1.3 on 2020-11-04 01:49\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('stocks', '0003_portfoliosummary'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='portfoliosummary',\n name='average_cost',\n field=models.DecimalField(decimal_places=2, max_digits=12, null=True),\n ),\n migrations.AddField(\n model_name='portfoliosummary',\n name='current_market_price',\n field=models.DecimalField(decimal_places=2, max_digits=12, null=True),\n ),\n migrations.AddField(\n model_name='portfoliosummary',\n name='total_gain_loss',\n field=models.DecimalField(decimal_places=2, max_digits=20, null=True),\n ),\n migrations.AlterField(\n model_name='portfoliosummary',\n name='book_value',\n field=models.DecimalField(decimal_places=2, max_digits=20, null=True),\n ),\n migrations.AlterField(\n model_name='portfoliosummary',\n name='market_value',\n field=models.DecimalField(decimal_places=2, max_digits=20, null=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='first_name',\n field=models.CharField(blank=True, max_length=150, verbose_name='first name'),\n ),\n migrations.AlterModelTable(\n name='portfoliosummary',\n table='portfolio_summary',\n ),\n ]\n",
"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('stocks', '0003_portfoliosummary')]\n operations = [migrations.AddField(model_name='portfoliosummary', name=\n 'average_cost', field=models.DecimalField(decimal_places=2,\n max_digits=12, null=True)), migrations.AddField(model_name=\n 'portfoliosummary', name='current_market_price', field=models.\n DecimalField(decimal_places=2, max_digits=12, null=True)),\n migrations.AddField(model_name='portfoliosummary', name=\n 'total_gain_loss', field=models.DecimalField(decimal_places=2,\n max_digits=20, null=True)), migrations.AlterField(model_name=\n 'portfoliosummary', name='book_value', field=models.DecimalField(\n decimal_places=2, max_digits=20, null=True)), migrations.AlterField\n (model_name='portfoliosummary', name='market_value', field=models.\n DecimalField(decimal_places=2, max_digits=20, null=True)),\n migrations.AlterField(model_name='user', name='first_name', field=\n models.CharField(blank=True, max_length=150, verbose_name=\n 'first name')), migrations.AlterModelTable(name='portfoliosummary',\n table='portfolio_summary')]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('stocks', '0003_portfoliosummary')]\n operations = [migrations.AddField(model_name='portfoliosummary', name=\n 'average_cost', field=models.DecimalField(decimal_places=2,\n max_digits=12, null=True)), migrations.AddField(model_name=\n 'portfoliosummary', name='current_market_price', field=models.\n DecimalField(decimal_places=2, max_digits=12, null=True)),\n migrations.AddField(model_name='portfoliosummary', name=\n 'total_gain_loss', field=models.DecimalField(decimal_places=2,\n max_digits=20, null=True)), migrations.AlterField(model_name=\n 'portfoliosummary', name='book_value', field=models.DecimalField(\n decimal_places=2, max_digits=20, null=True)), migrations.AlterField\n (model_name='portfoliosummary', name='market_value', field=models.\n DecimalField(decimal_places=2, max_digits=20, null=True)),\n migrations.AlterField(model_name='user', name='first_name', field=\n models.CharField(blank=True, max_length=150, verbose_name=\n 'first name')), migrations.AlterModelTable(name='portfoliosummary',\n table='portfolio_summary')]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
98,894 |
fa783aa1ec7adb8a5f4c2c8c5e8994484492d9c3
|
import uuid
import datetime
import random
import json
from azure.servicebus import ServiceBusService
sbs = ServiceBusService(service_namespace='brucewaynetolltooth', shared_access_key_name='RootManageSharedAccessKey', shared_access_key_value='m6mWS29LUMIh2ZH9gh4KjmoNPiXBxeMCaq6eMxojBDc=')
devices = []
for x in range(0, 10):
devices.append(str(uuid.uuid4()))
for y in range(0,20):
for dev in devices:
reading = {'id': dev, 'timestamp': str(datetime.datetime.utcnow()), 'uv': random.random(), 'temperature': random.randint(70, 100), 'humidity': random.randint(70, 100)}
s = json.dumps(reading)
sbs.send_event('entrysignals', s)
print(y)
|
[
"import uuid\nimport datetime\nimport random\nimport json\nfrom azure.servicebus import ServiceBusService\n\nsbs = ServiceBusService(service_namespace='brucewaynetolltooth', shared_access_key_name='RootManageSharedAccessKey', shared_access_key_value='m6mWS29LUMIh2ZH9gh4KjmoNPiXBxeMCaq6eMxojBDc=')\ndevices = []\nfor x in range(0, 10):\n devices.append(str(uuid.uuid4()))\n\nfor y in range(0,20):\n for dev in devices:\n reading = {'id': dev, 'timestamp': str(datetime.datetime.utcnow()), 'uv': random.random(), 'temperature': random.randint(70, 100), 'humidity': random.randint(70, 100)}\n s = json.dumps(reading)\n sbs.send_event('entrysignals', s)\n print(y)",
"import uuid\nimport datetime\nimport random\nimport json\nfrom azure.servicebus import ServiceBusService\nsbs = ServiceBusService(service_namespace='brucewaynetolltooth',\n shared_access_key_name='RootManageSharedAccessKey',\n shared_access_key_value='m6mWS29LUMIh2ZH9gh4KjmoNPiXBxeMCaq6eMxojBDc=')\ndevices = []\nfor x in range(0, 10):\n devices.append(str(uuid.uuid4()))\nfor y in range(0, 20):\n for dev in devices:\n reading = {'id': dev, 'timestamp': str(datetime.datetime.utcnow()),\n 'uv': random.random(), 'temperature': random.randint(70, 100),\n 'humidity': random.randint(70, 100)}\n s = json.dumps(reading)\n sbs.send_event('entrysignals', s)\n print(y)\n",
"<import token>\nsbs = ServiceBusService(service_namespace='brucewaynetolltooth',\n shared_access_key_name='RootManageSharedAccessKey',\n shared_access_key_value='m6mWS29LUMIh2ZH9gh4KjmoNPiXBxeMCaq6eMxojBDc=')\ndevices = []\nfor x in range(0, 10):\n devices.append(str(uuid.uuid4()))\nfor y in range(0, 20):\n for dev in devices:\n reading = {'id': dev, 'timestamp': str(datetime.datetime.utcnow()),\n 'uv': random.random(), 'temperature': random.randint(70, 100),\n 'humidity': random.randint(70, 100)}\n s = json.dumps(reading)\n sbs.send_event('entrysignals', s)\n print(y)\n",
"<import token>\n<assignment token>\nfor x in range(0, 10):\n devices.append(str(uuid.uuid4()))\nfor y in range(0, 20):\n for dev in devices:\n reading = {'id': dev, 'timestamp': str(datetime.datetime.utcnow()),\n 'uv': random.random(), 'temperature': random.randint(70, 100),\n 'humidity': random.randint(70, 100)}\n s = json.dumps(reading)\n sbs.send_event('entrysignals', s)\n print(y)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
98,895 |
f8f68a06303712f28be07fc91589356a760e2311
|
from __future__ import division
import numpy as np
import pickle
file = open("syntax_avg_lang_knn-2",'r')
sys_1 = pickle.load(file)
preds_1 = [item for sublist in sys_1 for item in sublist]
file = open("syntax_avg_lang_preds",'r')
sys_2 = pickle.load(file)
preds_2 = [item for sublist in sys_2 for item in sublist]
file = open("syntax_avg_lang_refs",'r')
references = pickle.load(file)
refs = [item for sublist in references for item in sublist]
assert len(preds_1)==len(refs)
assert len(preds_2)==len(refs)
bootstrap_number=10000
count_win_1 = 0
count_win_2 = 0
count_ties = 0
for k in range(bootstrap_number):
# Make random subset of half sentences in test data
subset = np.random.choice(len(refs),size=int(0.5*len(refs)))
#print([preds_1[idx] for idx in subset])
#print([preds_2[idx] for idx in subset])
#print([refs[idx] for idx in subset])
b_1 = sum([1 for idx in subset if preds_1[idx]==refs[idx]])
b_2 = sum([1 for idx in subset if preds_2[idx]==refs[idx]])
#print(b_1,b_2)
if b_1 > b_2:
count_win_1 += 1
elif b_1 < b_2:
count_win_2 += 1
else:
count_ties += 1
print('Win probabilities: %.3f , %.3f , Tie Probability: %.3f' % ((count_win_1/bootstrap_number)*100.0,(count_win_2/bootstrap_number)*100.0, (count_ties/bootstrap_number)*100.0))
|
[
"from __future__ import division\nimport numpy as np\nimport pickle\n\nfile = open(\"syntax_avg_lang_knn-2\",'r') \nsys_1 = pickle.load(file)\npreds_1 = [item for sublist in sys_1 for item in sublist]\n\nfile = open(\"syntax_avg_lang_preds\",'r')\nsys_2 = pickle.load(file)\npreds_2 = [item for sublist in sys_2 for item in sublist]\n\nfile = open(\"syntax_avg_lang_refs\",'r')\nreferences = pickle.load(file)\nrefs = [item for sublist in references for item in sublist]\n\n\nassert len(preds_1)==len(refs)\nassert len(preds_2)==len(refs)\n\nbootstrap_number=10000\ncount_win_1 = 0\ncount_win_2 = 0\ncount_ties = 0\n\n\nfor k in range(bootstrap_number):\n # Make random subset of half sentences in test data\n subset = np.random.choice(len(refs),size=int(0.5*len(refs)))\n #print([preds_1[idx] for idx in subset])\n #print([preds_2[idx] for idx in subset])\n #print([refs[idx] for idx in subset])\n b_1 = sum([1 for idx in subset if preds_1[idx]==refs[idx]])\n b_2 = sum([1 for idx in subset if preds_2[idx]==refs[idx]])\n #print(b_1,b_2) \n if b_1 > b_2:\n\tcount_win_1 += 1\n elif b_1 < b_2:\n\tcount_win_2 += 1\n else:\n\tcount_ties += 1\n\nprint('Win probabilities: %.3f , %.3f , Tie Probability: %.3f' % ((count_win_1/bootstrap_number)*100.0,(count_win_2/bootstrap_number)*100.0, (count_ties/bootstrap_number)*100.0))\n"
] | true |
98,896 |
6c6009eec2fb24d57802dbe10336363abbc77e34
|
# trying to make a one list and add links as per needs
#adithya prabhu
#developed jun 2021
#latest open input in webpage
import webbrowser
import time
import datetime
import pyautogui
from tabulate import tabulate
screenWidth,screenHeight = pyautogui.size()
now = datetime.datetime.now()
day=(now.strftime('%A'))
print(day)
Current_time = time.strftime("%H.%M")
all="https://meet.google.com/ghfgffc"
comp="https://meet.google.com/ghfghgcf"
rest="https://please-take-a-break.adithyarprabhu.repl.co/" #please take a break website
# monday_url=[all,comp,all,all,rest,rest,rest,rest]
# tuesday_url=[comp,all,all,all,rest,rest,rest,rest]
# wednesday_url=[all,comp,all,all,rest,rest,rest,rest]
# thursday_url=[all,comp,all,all,rest,rest,rest,rest]
# friday_url=[all,all,all,comp,rest,rest,rest,rest]
# saturday_url=[all,all,all,all,rest,rest,rest,rest]
# while n<len(urls):
# print(urls[n],times[n])
# n=n+1
# while n<len(urls):
# print(urls[n],times[n],sep="-----at-->")
# n=n+1
urls=[]
if day=='Monday':
urls.append(all)
urls.append(comp)
urls.append(all)
urls.append(all)
urls.append(rest)
urls.append(rest)
urls.append(rest)
urls.append(rest)
elif day=='Tuesday':
urls.append(comp)
urls.append(all)
urls.append(all)
urls.append(all)
urls.append(rest)
urls.append(rest)
urls.append(rest)
urls.append(rest)
elif day=='Wednesday':
urls.append(all)
urls.append(comp)
urls.append(all)
urls.append(all)
urls.append(rest)
urls.append(rest)
urls.append(rest)
urls.append(rest)
elif day=='Thursday':
urls.append(all)
urls.append(comp)
urls.append(all)
urls.append(all)
urls.append(rest)
urls.append(rest)
urls.append(rest)
urls.append(rest)
elif day=='Friday':
urls.append(all)
urls.append(all)
urls.append(all)
urls.append(comp)
urls.append(rest)
urls.append(rest)
urls.append(rest)
urls.append(rest)
elif day=='Saturday':
urls.append(all)
urls.append(all)
urls.append(all)
urls.append(all)
urls.append(rest)
urls.append(rest)
urls.append(rest)
urls.append(rest)
else:#Sunday
print("Sunday is a Holiday 😁😀😀 you fool")
time.sleep(10)
times=[]
times.append("07.12")
times.append("08.17")
times.append("09.27")
times.append("10.27")
times.append("12.17")
times.append("14.43")
times.append("15.51")
times.append("19.31")
#definitions
def add():
y_n=input("do you have any extra sessions today ?(y/n) -->")
while "y" in y_n.lower():
exttime=input("enter the time to shedule it in format HH.MM-->")
times.append(exttime)
times.sort()
pos_in_times=times.index(exttime)
ext=input("Enter the link here(must include https and all that)-->")
urls.insert(pos_in_times,ext)
y_n=input("do you have any more extra sessions today ?(y/n) -->")
def next():
while Current_time >(times[0]) :
urls.remove(urls[0])
times.remove(times[0])
def remove():
re_y_n=input("do you want to remove any session (y/n)-->")
task_table()
while "y" in re_y_n.lower():
which=int(input("which session do you want to remove(1,2..)--"))
which=which-1
urls.remove(urls[which])#use numbers from 0 as 0 is first element of the list
times.remove(times[which])
task_table()
re_y_n=input("now do you want to remove any session (y/n)-->")
def opengmeet_or_site():
next()
for i in range(100):
link = (urls[0])
alarm = (times[0])
Current_time = time.strftime("%H.%M")
while (Current_time != alarm):
print ("Waiting, the current time is " + Current_time+" :-( " )
Current_time = time.strftime("%H.%M")
time.sleep(1)
if (Current_time == alarm):
print ("WEBSITE IS OPENING :D")
if "meet.google.com" in (urls[0]) :
webbrowser.open(link)
pyautogui.press('enter')
time.sleep(2)
pyautogui.click(100*screenWidth/1680,410*screenHeight/1050) #Join now
time.sleep(10)
pyautogui.hotkey('ctrl','d')
time.sleep(1)
pyautogui.hotkey('ctrl','e')
time.sleep(1)
pyautogui.click(1150*screenWidth/1680,620*screenHeight/1050) #Join now
urls.remove(urls[0])
times.remove(times[0])
else:
webbrowser.open(link)
urls.remove(urls[0])
times.remove(times[0])
def task_table():
from tabulate import tabulate
next()
n=0
l = []
numbers=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,100]
while n<len(urls):
a= [numbers[n],urls[n],times[n]]
l.append(a)
n=n+1
table = tabulate(l, headers=['num','url', 'time'], tablefmt='orgtbl')
print(table)
def start_table():
start_list = [["1","VIEW TODAY'S SCHEDULE"],[],["2","ADD TASK TO TODAY'S SCHEDULE"],[]
,["3","REMOVE TASK FROM TODAY'S SCHEDULE"],[],["4","RUN THE PROGRAMME"],[],["5","EXIT"]]
start_table = tabulate(start_list, headers=['OPTION','PURPOSE'], tablefmt='orgtbl')
print(start_table)
next()
print("**********************************************************")
print("WELCOME TO AUTOMATIC WEBSITE OPENER AND GOOGLE MEET JOINER")
print("**********************************************************")
start_table()
k=0
while k==k:
o=input("Enter your option to proceed(1/2/3/4/5)-->")
if "1" in o:
task_table()
elif "2" in o:
add()
elif "3" in o:
remove()
elif "4" in o:
opengmeet_or_site()
elif "5" in o:
print("Thanks")
break
else:
print("Error Man try again!")
|
[
"# trying to make a one list and add links as per needs\r\n\r\n#adithya prabhu\r\n#developed jun 2021\r\n#latest open input in webpage\r\n\r\n\r\nimport webbrowser\r\nimport time \r\nimport datetime\r\nimport pyautogui\r\nfrom tabulate import tabulate\r\n\r\nscreenWidth,screenHeight = pyautogui.size()\r\nnow = datetime.datetime.now()\r\nday=(now.strftime('%A'))\r\nprint(day)\r\nCurrent_time = time.strftime(\"%H.%M\")\r\n \r\n\r\nall=\"https://meet.google.com/ghfgffc\"\r\ncomp=\"https://meet.google.com/ghfghgcf\"\r\nrest=\"https://please-take-a-break.adithyarprabhu.repl.co/\" #please take a break website\r\n\r\n# monday_url=[all,comp,all,all,rest,rest,rest,rest]\r\n# tuesday_url=[comp,all,all,all,rest,rest,rest,rest]\r\n# wednesday_url=[all,comp,all,all,rest,rest,rest,rest]\r\n# thursday_url=[all,comp,all,all,rest,rest,rest,rest]\r\n# friday_url=[all,all,all,comp,rest,rest,rest,rest]\r\n# saturday_url=[all,all,all,all,rest,rest,rest,rest]\r\n# while n<len(urls):\r\n# print(urls[n],times[n])\r\n# n=n+1\r\n# while n<len(urls):\r\n# print(urls[n],times[n],sep=\"-----at-->\")\r\n# n=n+1\r\n\r\n\r\nurls=[]\r\nif day=='Monday':\r\n urls.append(all)\r\n urls.append(comp)\r\n urls.append(all)\r\n urls.append(all)\r\n urls.append(rest)\r\n urls.append(rest)\r\n urls.append(rest)\r\n urls.append(rest)\r\nelif day=='Tuesday': \r\n urls.append(comp)\r\n urls.append(all)\r\n urls.append(all)\r\n urls.append(all)\r\n urls.append(rest)\r\n urls.append(rest)\r\n urls.append(rest)\r\n urls.append(rest)\r\nelif day=='Wednesday': \r\n urls.append(all)\r\n urls.append(comp)\r\n urls.append(all)\r\n urls.append(all)\r\n urls.append(rest)\r\n urls.append(rest)\r\n urls.append(rest)\r\n urls.append(rest)\r\nelif day=='Thursday': \r\n urls.append(all)\r\n urls.append(comp)\r\n urls.append(all)\r\n urls.append(all)\r\n urls.append(rest)\r\n urls.append(rest)\r\n urls.append(rest)\r\n urls.append(rest)\r\nelif day=='Friday': \r\n urls.append(all)\r\n urls.append(all)\r\n urls.append(all)\r\n urls.append(comp)\r\n urls.append(rest)\r\n urls.append(rest)\r\n urls.append(rest)\r\n urls.append(rest)\r\nelif day=='Saturday': \r\n urls.append(all)\r\n urls.append(all)\r\n urls.append(all)\r\n urls.append(all)\r\n urls.append(rest)\r\n urls.append(rest)\r\n urls.append(rest)\r\n urls.append(rest)\r\nelse:#Sunday\r\n print(\"Sunday is a Holiday 😁😀😀 you fool\")\r\n time.sleep(10)\r\n\r\n\r\n\r\n\r\n\r\ntimes=[]\r\ntimes.append(\"07.12\")\r\ntimes.append(\"08.17\")\r\ntimes.append(\"09.27\")\r\ntimes.append(\"10.27\")\r\ntimes.append(\"12.17\")\r\ntimes.append(\"14.43\")\r\ntimes.append(\"15.51\")\r\ntimes.append(\"19.31\")\r\n\r\n\r\n\r\n\r\n\r\n#definitions\r\ndef add():\r\n y_n=input(\"do you have any extra sessions today ?(y/n) -->\")\r\n while \"y\" in y_n.lower():\r\n exttime=input(\"enter the time to shedule it in format HH.MM-->\")\r\n times.append(exttime)\r\n times.sort()\r\n pos_in_times=times.index(exttime)\r\n ext=input(\"Enter the link here(must include https and all that)-->\")\r\n urls.insert(pos_in_times,ext)\r\n y_n=input(\"do you have any more extra sessions today ?(y/n) -->\")\r\ndef next():\r\n while Current_time >(times[0]) :\r\n urls.remove(urls[0])\r\n times.remove(times[0])\r\ndef remove():\r\n re_y_n=input(\"do you want to remove any session (y/n)-->\")\r\n task_table()\r\n while \"y\" in re_y_n.lower():\r\n which=int(input(\"which session do you want to remove(1,2..)--\"))\r\n which=which-1\r\n urls.remove(urls[which])#use numbers from 0 as 0 is first element of the list\r\n times.remove(times[which])\r\n task_table()\r\n re_y_n=input(\"now do you want to remove any session (y/n)-->\")\r\ndef opengmeet_or_site():\r\n next()\r\n for i in range(100):\r\n link = (urls[0]) \r\n alarm = (times[0])\r\n Current_time = time.strftime(\"%H.%M\")\r\n while (Current_time != alarm): \r\n print (\"Waiting, the current time is \" + Current_time+\" :-( \" )\r\n Current_time = time.strftime(\"%H.%M\") \r\n time.sleep(1) \r\n if (Current_time == alarm): \r\n print (\"WEBSITE IS OPENING :D\")\r\n if \"meet.google.com\" in (urls[0]) : \r\n webbrowser.open(link)\r\n pyautogui.press('enter')\r\n time.sleep(2)\r\n pyautogui.click(100*screenWidth/1680,410*screenHeight/1050) #Join now\r\n time.sleep(10)\r\n pyautogui.hotkey('ctrl','d')\r\n time.sleep(1)\r\n pyautogui.hotkey('ctrl','e')\r\n time.sleep(1)\r\n pyautogui.click(1150*screenWidth/1680,620*screenHeight/1050) #Join now\r\n urls.remove(urls[0])\r\n times.remove(times[0])\r\n else:\r\n webbrowser.open(link)\r\n urls.remove(urls[0])\r\n times.remove(times[0]) \r\ndef task_table():\r\n from tabulate import tabulate\r\n next()\r\n n=0\r\n l = []\r\n numbers=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,100]\r\n while n<len(urls):\r\n a= [numbers[n],urls[n],times[n]]\r\n l.append(a)\r\n n=n+1\r\n table = tabulate(l, headers=['num','url', 'time'], tablefmt='orgtbl')\r\n print(table)\r\ndef start_table():\r\n start_list = [[\"1\",\"VIEW TODAY'S SCHEDULE\"],[],[\"2\",\"ADD TASK TO TODAY'S SCHEDULE\"],[]\r\n ,[\"3\",\"REMOVE TASK FROM TODAY'S SCHEDULE\"],[],[\"4\",\"RUN THE PROGRAMME\"],[],[\"5\",\"EXIT\"]]\r\n start_table = tabulate(start_list, headers=['OPTION','PURPOSE'], tablefmt='orgtbl')\r\n\r\n print(start_table)\r\n\r\nnext()\r\n\r\nprint(\"**********************************************************\")\r\nprint(\"WELCOME TO AUTOMATIC WEBSITE OPENER AND GOOGLE MEET JOINER\")\r\nprint(\"**********************************************************\")\r\n\r\nstart_table()\r\nk=0\r\nwhile k==k:\r\n o=input(\"Enter your option to proceed(1/2/3/4/5)-->\")\r\n if \"1\" in o:\r\n task_table()\r\n elif \"2\" in o:\r\n add()\r\n elif \"3\" in o:\r\n remove()\r\n elif \"4\" in o:\r\n opengmeet_or_site()\r\n elif \"5\" in o:\r\n print(\"Thanks\")\r\n break\r\n else:\r\n print(\"Error Man try again!\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n",
"import webbrowser\nimport time\nimport datetime\nimport pyautogui\nfrom tabulate import tabulate\nscreenWidth, screenHeight = pyautogui.size()\nnow = datetime.datetime.now()\nday = now.strftime('%A')\nprint(day)\nCurrent_time = time.strftime('%H.%M')\nall = 'https://meet.google.com/ghfgffc'\ncomp = 'https://meet.google.com/ghfghgcf'\nrest = 'https://please-take-a-break.adithyarprabhu.repl.co/'\nurls = []\nif day == 'Monday':\n urls.append(all)\n urls.append(comp)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Tuesday':\n urls.append(comp)\n urls.append(all)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Wednesday':\n urls.append(all)\n urls.append(comp)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Thursday':\n urls.append(all)\n urls.append(comp)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Friday':\n urls.append(all)\n urls.append(all)\n urls.append(all)\n urls.append(comp)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Saturday':\n urls.append(all)\n urls.append(all)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelse:\n print('Sunday is a Holiday 😁😀😀 you fool')\n time.sleep(10)\ntimes = []\ntimes.append('07.12')\ntimes.append('08.17')\ntimes.append('09.27')\ntimes.append('10.27')\ntimes.append('12.17')\ntimes.append('14.43')\ntimes.append('15.51')\ntimes.append('19.31')\n\n\ndef add():\n y_n = input('do you have any extra sessions today ?(y/n) -->')\n while 'y' in y_n.lower():\n exttime = input('enter the time to shedule it in format HH.MM-->')\n times.append(exttime)\n times.sort()\n pos_in_times = times.index(exttime)\n ext = input('Enter the link here(must include https and all that)-->')\n urls.insert(pos_in_times, ext)\n y_n = input('do you have any more extra sessions today ?(y/n) -->')\n\n\ndef next():\n while Current_time > times[0]:\n urls.remove(urls[0])\n times.remove(times[0])\n\n\ndef remove():\n re_y_n = input('do you want to remove any session (y/n)-->')\n task_table()\n while 'y' in re_y_n.lower():\n which = int(input('which session do you want to remove(1,2..)--'))\n which = which - 1\n urls.remove(urls[which])\n times.remove(times[which])\n task_table()\n re_y_n = input('now do you want to remove any session (y/n)-->')\n\n\ndef opengmeet_or_site():\n next()\n for i in range(100):\n link = urls[0]\n alarm = times[0]\n Current_time = time.strftime('%H.%M')\n while Current_time != alarm:\n print('Waiting, the current time is ' + Current_time + ' :-( ')\n Current_time = time.strftime('%H.%M')\n time.sleep(1)\n if Current_time == alarm:\n print('WEBSITE IS OPENING :D')\n if 'meet.google.com' in urls[0]:\n webbrowser.open(link)\n pyautogui.press('enter')\n time.sleep(2)\n pyautogui.click(100 * screenWidth / 1680, 410 *\n screenHeight / 1050)\n time.sleep(10)\n pyautogui.hotkey('ctrl', 'd')\n time.sleep(1)\n pyautogui.hotkey('ctrl', 'e')\n time.sleep(1)\n pyautogui.click(1150 * screenWidth / 1680, 620 *\n screenHeight / 1050)\n urls.remove(urls[0])\n times.remove(times[0])\n else:\n webbrowser.open(link)\n urls.remove(urls[0])\n times.remove(times[0])\n\n\ndef task_table():\n from tabulate import tabulate\n next()\n n = 0\n l = []\n numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, \n 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,\n 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,\n 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]\n while n < len(urls):\n a = [numbers[n], urls[n], times[n]]\n l.append(a)\n n = n + 1\n table = tabulate(l, headers=['num', 'url', 'time'], tablefmt='orgtbl')\n print(table)\n\n\ndef start_table():\n start_list = [['1', \"VIEW TODAY'S SCHEDULE\"], [], ['2',\n \"ADD TASK TO TODAY'S SCHEDULE\"], [], ['3',\n \"REMOVE TASK FROM TODAY'S SCHEDULE\"], [], ['4', 'RUN THE PROGRAMME'\n ], [], ['5', 'EXIT']]\n start_table = tabulate(start_list, headers=['OPTION', 'PURPOSE'],\n tablefmt='orgtbl')\n print(start_table)\n\n\nnext()\nprint('**********************************************************')\nprint('WELCOME TO AUTOMATIC WEBSITE OPENER AND GOOGLE MEET JOINER')\nprint('**********************************************************')\nstart_table()\nk = 0\nwhile k == k:\n o = input('Enter your option to proceed(1/2/3/4/5)-->')\n if '1' in o:\n task_table()\n elif '2' in o:\n add()\n elif '3' in o:\n remove()\n elif '4' in o:\n opengmeet_or_site()\n elif '5' in o:\n print('Thanks')\n break\n else:\n print('Error Man try again!')\n",
"<import token>\nscreenWidth, screenHeight = pyautogui.size()\nnow = datetime.datetime.now()\nday = now.strftime('%A')\nprint(day)\nCurrent_time = time.strftime('%H.%M')\nall = 'https://meet.google.com/ghfgffc'\ncomp = 'https://meet.google.com/ghfghgcf'\nrest = 'https://please-take-a-break.adithyarprabhu.repl.co/'\nurls = []\nif day == 'Monday':\n urls.append(all)\n urls.append(comp)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Tuesday':\n urls.append(comp)\n urls.append(all)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Wednesday':\n urls.append(all)\n urls.append(comp)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Thursday':\n urls.append(all)\n urls.append(comp)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Friday':\n urls.append(all)\n urls.append(all)\n urls.append(all)\n urls.append(comp)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Saturday':\n urls.append(all)\n urls.append(all)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelse:\n print('Sunday is a Holiday 😁😀😀 you fool')\n time.sleep(10)\ntimes = []\ntimes.append('07.12')\ntimes.append('08.17')\ntimes.append('09.27')\ntimes.append('10.27')\ntimes.append('12.17')\ntimes.append('14.43')\ntimes.append('15.51')\ntimes.append('19.31')\n\n\ndef add():\n y_n = input('do you have any extra sessions today ?(y/n) -->')\n while 'y' in y_n.lower():\n exttime = input('enter the time to shedule it in format HH.MM-->')\n times.append(exttime)\n times.sort()\n pos_in_times = times.index(exttime)\n ext = input('Enter the link here(must include https and all that)-->')\n urls.insert(pos_in_times, ext)\n y_n = input('do you have any more extra sessions today ?(y/n) -->')\n\n\ndef next():\n while Current_time > times[0]:\n urls.remove(urls[0])\n times.remove(times[0])\n\n\ndef remove():\n re_y_n = input('do you want to remove any session (y/n)-->')\n task_table()\n while 'y' in re_y_n.lower():\n which = int(input('which session do you want to remove(1,2..)--'))\n which = which - 1\n urls.remove(urls[which])\n times.remove(times[which])\n task_table()\n re_y_n = input('now do you want to remove any session (y/n)-->')\n\n\ndef opengmeet_or_site():\n next()\n for i in range(100):\n link = urls[0]\n alarm = times[0]\n Current_time = time.strftime('%H.%M')\n while Current_time != alarm:\n print('Waiting, the current time is ' + Current_time + ' :-( ')\n Current_time = time.strftime('%H.%M')\n time.sleep(1)\n if Current_time == alarm:\n print('WEBSITE IS OPENING :D')\n if 'meet.google.com' in urls[0]:\n webbrowser.open(link)\n pyautogui.press('enter')\n time.sleep(2)\n pyautogui.click(100 * screenWidth / 1680, 410 *\n screenHeight / 1050)\n time.sleep(10)\n pyautogui.hotkey('ctrl', 'd')\n time.sleep(1)\n pyautogui.hotkey('ctrl', 'e')\n time.sleep(1)\n pyautogui.click(1150 * screenWidth / 1680, 620 *\n screenHeight / 1050)\n urls.remove(urls[0])\n times.remove(times[0])\n else:\n webbrowser.open(link)\n urls.remove(urls[0])\n times.remove(times[0])\n\n\ndef task_table():\n from tabulate import tabulate\n next()\n n = 0\n l = []\n numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, \n 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,\n 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,\n 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]\n while n < len(urls):\n a = [numbers[n], urls[n], times[n]]\n l.append(a)\n n = n + 1\n table = tabulate(l, headers=['num', 'url', 'time'], tablefmt='orgtbl')\n print(table)\n\n\ndef start_table():\n start_list = [['1', \"VIEW TODAY'S SCHEDULE\"], [], ['2',\n \"ADD TASK TO TODAY'S SCHEDULE\"], [], ['3',\n \"REMOVE TASK FROM TODAY'S SCHEDULE\"], [], ['4', 'RUN THE PROGRAMME'\n ], [], ['5', 'EXIT']]\n start_table = tabulate(start_list, headers=['OPTION', 'PURPOSE'],\n tablefmt='orgtbl')\n print(start_table)\n\n\nnext()\nprint('**********************************************************')\nprint('WELCOME TO AUTOMATIC WEBSITE OPENER AND GOOGLE MEET JOINER')\nprint('**********************************************************')\nstart_table()\nk = 0\nwhile k == k:\n o = input('Enter your option to proceed(1/2/3/4/5)-->')\n if '1' in o:\n task_table()\n elif '2' in o:\n add()\n elif '3' in o:\n remove()\n elif '4' in o:\n opengmeet_or_site()\n elif '5' in o:\n print('Thanks')\n break\n else:\n print('Error Man try again!')\n",
"<import token>\n<assignment token>\nprint(day)\n<assignment token>\nif day == 'Monday':\n urls.append(all)\n urls.append(comp)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Tuesday':\n urls.append(comp)\n urls.append(all)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Wednesday':\n urls.append(all)\n urls.append(comp)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Thursday':\n urls.append(all)\n urls.append(comp)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Friday':\n urls.append(all)\n urls.append(all)\n urls.append(all)\n urls.append(comp)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelif day == 'Saturday':\n urls.append(all)\n urls.append(all)\n urls.append(all)\n urls.append(all)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\n urls.append(rest)\nelse:\n print('Sunday is a Holiday 😁😀😀 you fool')\n time.sleep(10)\n<assignment token>\ntimes.append('07.12')\ntimes.append('08.17')\ntimes.append('09.27')\ntimes.append('10.27')\ntimes.append('12.17')\ntimes.append('14.43')\ntimes.append('15.51')\ntimes.append('19.31')\n\n\ndef add():\n y_n = input('do you have any extra sessions today ?(y/n) -->')\n while 'y' in y_n.lower():\n exttime = input('enter the time to shedule it in format HH.MM-->')\n times.append(exttime)\n times.sort()\n pos_in_times = times.index(exttime)\n ext = input('Enter the link here(must include https and all that)-->')\n urls.insert(pos_in_times, ext)\n y_n = input('do you have any more extra sessions today ?(y/n) -->')\n\n\ndef next():\n while Current_time > times[0]:\n urls.remove(urls[0])\n times.remove(times[0])\n\n\ndef remove():\n re_y_n = input('do you want to remove any session (y/n)-->')\n task_table()\n while 'y' in re_y_n.lower():\n which = int(input('which session do you want to remove(1,2..)--'))\n which = which - 1\n urls.remove(urls[which])\n times.remove(times[which])\n task_table()\n re_y_n = input('now do you want to remove any session (y/n)-->')\n\n\ndef opengmeet_or_site():\n next()\n for i in range(100):\n link = urls[0]\n alarm = times[0]\n Current_time = time.strftime('%H.%M')\n while Current_time != alarm:\n print('Waiting, the current time is ' + Current_time + ' :-( ')\n Current_time = time.strftime('%H.%M')\n time.sleep(1)\n if Current_time == alarm:\n print('WEBSITE IS OPENING :D')\n if 'meet.google.com' in urls[0]:\n webbrowser.open(link)\n pyautogui.press('enter')\n time.sleep(2)\n pyautogui.click(100 * screenWidth / 1680, 410 *\n screenHeight / 1050)\n time.sleep(10)\n pyautogui.hotkey('ctrl', 'd')\n time.sleep(1)\n pyautogui.hotkey('ctrl', 'e')\n time.sleep(1)\n pyautogui.click(1150 * screenWidth / 1680, 620 *\n screenHeight / 1050)\n urls.remove(urls[0])\n times.remove(times[0])\n else:\n webbrowser.open(link)\n urls.remove(urls[0])\n times.remove(times[0])\n\n\ndef task_table():\n from tabulate import tabulate\n next()\n n = 0\n l = []\n numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, \n 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,\n 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,\n 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]\n while n < len(urls):\n a = [numbers[n], urls[n], times[n]]\n l.append(a)\n n = n + 1\n table = tabulate(l, headers=['num', 'url', 'time'], tablefmt='orgtbl')\n print(table)\n\n\ndef start_table():\n start_list = [['1', \"VIEW TODAY'S SCHEDULE\"], [], ['2',\n \"ADD TASK TO TODAY'S SCHEDULE\"], [], ['3',\n \"REMOVE TASK FROM TODAY'S SCHEDULE\"], [], ['4', 'RUN THE PROGRAMME'\n ], [], ['5', 'EXIT']]\n start_table = tabulate(start_list, headers=['OPTION', 'PURPOSE'],\n tablefmt='orgtbl')\n print(start_table)\n\n\nnext()\nprint('**********************************************************')\nprint('WELCOME TO AUTOMATIC WEBSITE OPENER AND GOOGLE MEET JOINER')\nprint('**********************************************************')\nstart_table()\n<assignment token>\nwhile k == k:\n o = input('Enter your option to proceed(1/2/3/4/5)-->')\n if '1' in o:\n task_table()\n elif '2' in o:\n add()\n elif '3' in o:\n remove()\n elif '4' in o:\n opengmeet_or_site()\n elif '5' in o:\n print('Thanks')\n break\n else:\n print('Error Man try again!')\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef add():\n y_n = input('do you have any extra sessions today ?(y/n) -->')\n while 'y' in y_n.lower():\n exttime = input('enter the time to shedule it in format HH.MM-->')\n times.append(exttime)\n times.sort()\n pos_in_times = times.index(exttime)\n ext = input('Enter the link here(must include https and all that)-->')\n urls.insert(pos_in_times, ext)\n y_n = input('do you have any more extra sessions today ?(y/n) -->')\n\n\ndef next():\n while Current_time > times[0]:\n urls.remove(urls[0])\n times.remove(times[0])\n\n\ndef remove():\n re_y_n = input('do you want to remove any session (y/n)-->')\n task_table()\n while 'y' in re_y_n.lower():\n which = int(input('which session do you want to remove(1,2..)--'))\n which = which - 1\n urls.remove(urls[which])\n times.remove(times[which])\n task_table()\n re_y_n = input('now do you want to remove any session (y/n)-->')\n\n\ndef opengmeet_or_site():\n next()\n for i in range(100):\n link = urls[0]\n alarm = times[0]\n Current_time = time.strftime('%H.%M')\n while Current_time != alarm:\n print('Waiting, the current time is ' + Current_time + ' :-( ')\n Current_time = time.strftime('%H.%M')\n time.sleep(1)\n if Current_time == alarm:\n print('WEBSITE IS OPENING :D')\n if 'meet.google.com' in urls[0]:\n webbrowser.open(link)\n pyautogui.press('enter')\n time.sleep(2)\n pyautogui.click(100 * screenWidth / 1680, 410 *\n screenHeight / 1050)\n time.sleep(10)\n pyautogui.hotkey('ctrl', 'd')\n time.sleep(1)\n pyautogui.hotkey('ctrl', 'e')\n time.sleep(1)\n pyautogui.click(1150 * screenWidth / 1680, 620 *\n screenHeight / 1050)\n urls.remove(urls[0])\n times.remove(times[0])\n else:\n webbrowser.open(link)\n urls.remove(urls[0])\n times.remove(times[0])\n\n\ndef task_table():\n from tabulate import tabulate\n next()\n n = 0\n l = []\n numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, \n 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,\n 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,\n 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]\n while n < len(urls):\n a = [numbers[n], urls[n], times[n]]\n l.append(a)\n n = n + 1\n table = tabulate(l, headers=['num', 'url', 'time'], tablefmt='orgtbl')\n print(table)\n\n\ndef start_table():\n start_list = [['1', \"VIEW TODAY'S SCHEDULE\"], [], ['2',\n \"ADD TASK TO TODAY'S SCHEDULE\"], [], ['3',\n \"REMOVE TASK FROM TODAY'S SCHEDULE\"], [], ['4', 'RUN THE PROGRAMME'\n ], [], ['5', 'EXIT']]\n start_table = tabulate(start_list, headers=['OPTION', 'PURPOSE'],\n tablefmt='orgtbl')\n print(start_table)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef add():\n y_n = input('do you have any extra sessions today ?(y/n) -->')\n while 'y' in y_n.lower():\n exttime = input('enter the time to shedule it in format HH.MM-->')\n times.append(exttime)\n times.sort()\n pos_in_times = times.index(exttime)\n ext = input('Enter the link here(must include https and all that)-->')\n urls.insert(pos_in_times, ext)\n y_n = input('do you have any more extra sessions today ?(y/n) -->')\n\n\n<function token>\n\n\ndef remove():\n re_y_n = input('do you want to remove any session (y/n)-->')\n task_table()\n while 'y' in re_y_n.lower():\n which = int(input('which session do you want to remove(1,2..)--'))\n which = which - 1\n urls.remove(urls[which])\n times.remove(times[which])\n task_table()\n re_y_n = input('now do you want to remove any session (y/n)-->')\n\n\ndef opengmeet_or_site():\n next()\n for i in range(100):\n link = urls[0]\n alarm = times[0]\n Current_time = time.strftime('%H.%M')\n while Current_time != alarm:\n print('Waiting, the current time is ' + Current_time + ' :-( ')\n Current_time = time.strftime('%H.%M')\n time.sleep(1)\n if Current_time == alarm:\n print('WEBSITE IS OPENING :D')\n if 'meet.google.com' in urls[0]:\n webbrowser.open(link)\n pyautogui.press('enter')\n time.sleep(2)\n pyautogui.click(100 * screenWidth / 1680, 410 *\n screenHeight / 1050)\n time.sleep(10)\n pyautogui.hotkey('ctrl', 'd')\n time.sleep(1)\n pyautogui.hotkey('ctrl', 'e')\n time.sleep(1)\n pyautogui.click(1150 * screenWidth / 1680, 620 *\n screenHeight / 1050)\n urls.remove(urls[0])\n times.remove(times[0])\n else:\n webbrowser.open(link)\n urls.remove(urls[0])\n times.remove(times[0])\n\n\ndef task_table():\n from tabulate import tabulate\n next()\n n = 0\n l = []\n numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, \n 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,\n 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,\n 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]\n while n < len(urls):\n a = [numbers[n], urls[n], times[n]]\n l.append(a)\n n = n + 1\n table = tabulate(l, headers=['num', 'url', 'time'], tablefmt='orgtbl')\n print(table)\n\n\ndef start_table():\n start_list = [['1', \"VIEW TODAY'S SCHEDULE\"], [], ['2',\n \"ADD TASK TO TODAY'S SCHEDULE\"], [], ['3',\n \"REMOVE TASK FROM TODAY'S SCHEDULE\"], [], ['4', 'RUN THE PROGRAMME'\n ], [], ['5', 'EXIT']]\n start_table = tabulate(start_list, headers=['OPTION', 'PURPOSE'],\n tablefmt='orgtbl')\n print(start_table)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\ndef add():\n y_n = input('do you have any extra sessions today ?(y/n) -->')\n while 'y' in y_n.lower():\n exttime = input('enter the time to shedule it in format HH.MM-->')\n times.append(exttime)\n times.sort()\n pos_in_times = times.index(exttime)\n ext = input('Enter the link here(must include https and all that)-->')\n urls.insert(pos_in_times, ext)\n y_n = input('do you have any more extra sessions today ?(y/n) -->')\n\n\n<function token>\n<function token>\n\n\ndef opengmeet_or_site():\n next()\n for i in range(100):\n link = urls[0]\n alarm = times[0]\n Current_time = time.strftime('%H.%M')\n while Current_time != alarm:\n print('Waiting, the current time is ' + Current_time + ' :-( ')\n Current_time = time.strftime('%H.%M')\n time.sleep(1)\n if Current_time == alarm:\n print('WEBSITE IS OPENING :D')\n if 'meet.google.com' in urls[0]:\n webbrowser.open(link)\n pyautogui.press('enter')\n time.sleep(2)\n pyautogui.click(100 * screenWidth / 1680, 410 *\n screenHeight / 1050)\n time.sleep(10)\n pyautogui.hotkey('ctrl', 'd')\n time.sleep(1)\n pyautogui.hotkey('ctrl', 'e')\n time.sleep(1)\n pyautogui.click(1150 * screenWidth / 1680, 620 *\n screenHeight / 1050)\n urls.remove(urls[0])\n times.remove(times[0])\n else:\n webbrowser.open(link)\n urls.remove(urls[0])\n times.remove(times[0])\n\n\ndef task_table():\n from tabulate import tabulate\n next()\n n = 0\n l = []\n numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, \n 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,\n 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,\n 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]\n while n < len(urls):\n a = [numbers[n], urls[n], times[n]]\n l.append(a)\n n = n + 1\n table = tabulate(l, headers=['num', 'url', 'time'], tablefmt='orgtbl')\n print(table)\n\n\ndef start_table():\n start_list = [['1', \"VIEW TODAY'S SCHEDULE\"], [], ['2',\n \"ADD TASK TO TODAY'S SCHEDULE\"], [], ['3',\n \"REMOVE TASK FROM TODAY'S SCHEDULE\"], [], ['4', 'RUN THE PROGRAMME'\n ], [], ['5', 'EXIT']]\n start_table = tabulate(start_list, headers=['OPTION', 'PURPOSE'],\n tablefmt='orgtbl')\n print(start_table)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n\n\ndef opengmeet_or_site():\n next()\n for i in range(100):\n link = urls[0]\n alarm = times[0]\n Current_time = time.strftime('%H.%M')\n while Current_time != alarm:\n print('Waiting, the current time is ' + Current_time + ' :-( ')\n Current_time = time.strftime('%H.%M')\n time.sleep(1)\n if Current_time == alarm:\n print('WEBSITE IS OPENING :D')\n if 'meet.google.com' in urls[0]:\n webbrowser.open(link)\n pyautogui.press('enter')\n time.sleep(2)\n pyautogui.click(100 * screenWidth / 1680, 410 *\n screenHeight / 1050)\n time.sleep(10)\n pyautogui.hotkey('ctrl', 'd')\n time.sleep(1)\n pyautogui.hotkey('ctrl', 'e')\n time.sleep(1)\n pyautogui.click(1150 * screenWidth / 1680, 620 *\n screenHeight / 1050)\n urls.remove(urls[0])\n times.remove(times[0])\n else:\n webbrowser.open(link)\n urls.remove(urls[0])\n times.remove(times[0])\n\n\ndef task_table():\n from tabulate import tabulate\n next()\n n = 0\n l = []\n numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, \n 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,\n 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85,\n 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]\n while n < len(urls):\n a = [numbers[n], urls[n], times[n]]\n l.append(a)\n n = n + 1\n table = tabulate(l, headers=['num', 'url', 'time'], tablefmt='orgtbl')\n print(table)\n\n\ndef start_table():\n start_list = [['1', \"VIEW TODAY'S SCHEDULE\"], [], ['2',\n \"ADD TASK TO TODAY'S SCHEDULE\"], [], ['3',\n \"REMOVE TASK FROM TODAY'S SCHEDULE\"], [], ['4', 'RUN THE PROGRAMME'\n ], [], ['5', 'EXIT']]\n start_table = tabulate(start_list, headers=['OPTION', 'PURPOSE'],\n tablefmt='orgtbl')\n print(start_table)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n\n\ndef opengmeet_or_site():\n next()\n for i in range(100):\n link = urls[0]\n alarm = times[0]\n Current_time = time.strftime('%H.%M')\n while Current_time != alarm:\n print('Waiting, the current time is ' + Current_time + ' :-( ')\n Current_time = time.strftime('%H.%M')\n time.sleep(1)\n if Current_time == alarm:\n print('WEBSITE IS OPENING :D')\n if 'meet.google.com' in urls[0]:\n webbrowser.open(link)\n pyautogui.press('enter')\n time.sleep(2)\n pyautogui.click(100 * screenWidth / 1680, 410 *\n screenHeight / 1050)\n time.sleep(10)\n pyautogui.hotkey('ctrl', 'd')\n time.sleep(1)\n pyautogui.hotkey('ctrl', 'e')\n time.sleep(1)\n pyautogui.click(1150 * screenWidth / 1680, 620 *\n screenHeight / 1050)\n urls.remove(urls[0])\n times.remove(times[0])\n else:\n webbrowser.open(link)\n urls.remove(urls[0])\n times.remove(times[0])\n\n\n<function token>\n\n\ndef start_table():\n start_list = [['1', \"VIEW TODAY'S SCHEDULE\"], [], ['2',\n \"ADD TASK TO TODAY'S SCHEDULE\"], [], ['3',\n \"REMOVE TASK FROM TODAY'S SCHEDULE\"], [], ['4', 'RUN THE PROGRAMME'\n ], [], ['5', 'EXIT']]\n start_table = tabulate(start_list, headers=['OPTION', 'PURPOSE'],\n tablefmt='orgtbl')\n print(start_table)\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n\n\ndef opengmeet_or_site():\n next()\n for i in range(100):\n link = urls[0]\n alarm = times[0]\n Current_time = time.strftime('%H.%M')\n while Current_time != alarm:\n print('Waiting, the current time is ' + Current_time + ' :-( ')\n Current_time = time.strftime('%H.%M')\n time.sleep(1)\n if Current_time == alarm:\n print('WEBSITE IS OPENING :D')\n if 'meet.google.com' in urls[0]:\n webbrowser.open(link)\n pyautogui.press('enter')\n time.sleep(2)\n pyautogui.click(100 * screenWidth / 1680, 410 *\n screenHeight / 1050)\n time.sleep(10)\n pyautogui.hotkey('ctrl', 'd')\n time.sleep(1)\n pyautogui.hotkey('ctrl', 'e')\n time.sleep(1)\n pyautogui.click(1150 * screenWidth / 1680, 620 *\n screenHeight / 1050)\n urls.remove(urls[0])\n times.remove(times[0])\n else:\n webbrowser.open(link)\n urls.remove(urls[0])\n times.remove(times[0])\n\n\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
98,897 |
11a09fae8c9ff81b561eb538f095e36df6f47842
|
def letras_maiusculas(função):
def maiuscula():
return função().upper()
return maiuscula
@letras_maiusculas # Decorator
def meu_nome():
return 'Fernando'
nome = meu_nome()
print(nome)
|
[
"def letras_maiusculas(função):\r\n def maiuscula():\r\n return função().upper()\r\n\r\n return maiuscula\r\n\r\n\r\n@letras_maiusculas # Decorator\r\ndef meu_nome():\r\n return 'Fernando'\r\n\r\n\r\nnome = meu_nome()\r\nprint(nome)\r\n",
"def letras_maiusculas(função):\n\n def maiuscula():\n return função().upper()\n return maiuscula\n\n\n@letras_maiusculas\ndef meu_nome():\n return 'Fernando'\n\n\nnome = meu_nome()\nprint(nome)\n",
"def letras_maiusculas(função):\n\n def maiuscula():\n return função().upper()\n return maiuscula\n\n\n@letras_maiusculas\ndef meu_nome():\n return 'Fernando'\n\n\n<assignment token>\nprint(nome)\n",
"def letras_maiusculas(função):\n\n def maiuscula():\n return função().upper()\n return maiuscula\n\n\n@letras_maiusculas\ndef meu_nome():\n return 'Fernando'\n\n\n<assignment token>\n<code token>\n",
"def letras_maiusculas(função):\n\n def maiuscula():\n return função().upper()\n return maiuscula\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
98,898 |
6ac6d608990831c153da0befb60329ce1f02ebfb
|
from rest_framework import routers
from musiclib.views import CategoryViewSet, ArtistViewSet, DiscographyViewSet, SongViewSet
router = routers.SimpleRouter()
router.register(r'categories', CategoryViewSet)
router.register(r'artists', ArtistViewSet)
router.register(r'discographies', DiscographyViewSet)
router.register(r'songs', SongViewSet)
urlpatterns = router.urls
|
[
"from rest_framework import routers\nfrom musiclib.views import CategoryViewSet, ArtistViewSet, DiscographyViewSet, SongViewSet\n\nrouter = routers.SimpleRouter()\nrouter.register(r'categories', CategoryViewSet)\nrouter.register(r'artists', ArtistViewSet)\nrouter.register(r'discographies', DiscographyViewSet)\nrouter.register(r'songs', SongViewSet)\nurlpatterns = router.urls\n",
"from rest_framework import routers\nfrom musiclib.views import CategoryViewSet, ArtistViewSet, DiscographyViewSet, SongViewSet\nrouter = routers.SimpleRouter()\nrouter.register('categories', CategoryViewSet)\nrouter.register('artists', ArtistViewSet)\nrouter.register('discographies', DiscographyViewSet)\nrouter.register('songs', SongViewSet)\nurlpatterns = router.urls\n",
"<import token>\nrouter = routers.SimpleRouter()\nrouter.register('categories', CategoryViewSet)\nrouter.register('artists', ArtistViewSet)\nrouter.register('discographies', DiscographyViewSet)\nrouter.register('songs', SongViewSet)\nurlpatterns = router.urls\n",
"<import token>\n<assignment token>\nrouter.register('categories', CategoryViewSet)\nrouter.register('artists', ArtistViewSet)\nrouter.register('discographies', DiscographyViewSet)\nrouter.register('songs', SongViewSet)\n<assignment token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n"
] | false |
98,899 |
8179b83dadfa4bb62feac793150daaedc47a2cec
|
#Uses python2
import sys
#import queue
def distance(adj, cost, s, t):
#write your code here
dist = [float("inf") for _ in range(len(adj))]
prev = [None for _ in range(len(adj))]
dist[s] = 0
H = dict()
for i in range(len(adj)):
H[i] = dist[i]
while len(H) > 0:
minimum_distance = min(H.itervalues())
U = [key for key, value in H.iteritems() if value == minimum_distance]
u = U[0]
H.pop(u)
i = 0
for v in adj[u]:
if dist[v] > dist[u] + cost[u][i]:
dist[v] = dist[u] + cost[u][i]
H[v] = dist[v]
prev[v] = u
i += 1
return -1 if dist[t] == float("inf") else dist[t]
n, m = list(map(int, raw_input().split()))
edges = []
for i in range(m):
a, b, w = map(int, raw_input().split())
edges.append(((a, b), w))
adj = [[] for _ in range(n)]
cost = [[] for _ in range(n)]
for ((a, b), w) in edges:
adj[a - 1].append(b - 1)
cost[a - 1].append(w)
s, t = list(map(int, raw_input().split()))
s-=1
t-=1
print(distance(adj, cost, s, t))
|
[
"#Uses python2\n\nimport sys\n#import queue\n\n\ndef distance(adj, cost, s, t):\n #write your code here\n dist = [float(\"inf\") for _ in range(len(adj))]\n prev = [None for _ in range(len(adj))]\n dist[s] = 0\n \n H = dict()\n for i in range(len(adj)):\n H[i] = dist[i]\n \n while len(H) > 0:\n minimum_distance = min(H.itervalues())\n U = [key for key, value in H.iteritems() if value == minimum_distance]\n u = U[0]\n H.pop(u)\n i = 0\n for v in adj[u]:\n if dist[v] > dist[u] + cost[u][i]:\n dist[v] = dist[u] + cost[u][i]\n H[v] = dist[v]\n prev[v] = u\n i += 1\n return -1 if dist[t] == float(\"inf\") else dist[t]\n\n\nn, m = list(map(int, raw_input().split()))\n\nedges = []\nfor i in range(m):\n a, b, w = map(int, raw_input().split())\n edges.append(((a, b), w))\n\nadj = [[] for _ in range(n)]\ncost = [[] for _ in range(n)]\n\nfor ((a, b), w) in edges:\n adj[a - 1].append(b - 1)\n cost[a - 1].append(w)\n \ns, t = list(map(int, raw_input().split()))\ns-=1\nt-=1\nprint(distance(adj, cost, s, t))\n\n",
"import sys\n\n\ndef distance(adj, cost, s, t):\n dist = [float('inf') for _ in range(len(adj))]\n prev = [None for _ in range(len(adj))]\n dist[s] = 0\n H = dict()\n for i in range(len(adj)):\n H[i] = dist[i]\n while len(H) > 0:\n minimum_distance = min(H.itervalues())\n U = [key for key, value in H.iteritems() if value == minimum_distance]\n u = U[0]\n H.pop(u)\n i = 0\n for v in adj[u]:\n if dist[v] > dist[u] + cost[u][i]:\n dist[v] = dist[u] + cost[u][i]\n H[v] = dist[v]\n prev[v] = u\n i += 1\n return -1 if dist[t] == float('inf') else dist[t]\n\n\nn, m = list(map(int, raw_input().split()))\nedges = []\nfor i in range(m):\n a, b, w = map(int, raw_input().split())\n edges.append(((a, b), w))\nadj = [[] for _ in range(n)]\ncost = [[] for _ in range(n)]\nfor (a, b), w in edges:\n adj[a - 1].append(b - 1)\n cost[a - 1].append(w)\ns, t = list(map(int, raw_input().split()))\ns -= 1\nt -= 1\nprint(distance(adj, cost, s, t))\n",
"<import token>\n\n\ndef distance(adj, cost, s, t):\n dist = [float('inf') for _ in range(len(adj))]\n prev = [None for _ in range(len(adj))]\n dist[s] = 0\n H = dict()\n for i in range(len(adj)):\n H[i] = dist[i]\n while len(H) > 0:\n minimum_distance = min(H.itervalues())\n U = [key for key, value in H.iteritems() if value == minimum_distance]\n u = U[0]\n H.pop(u)\n i = 0\n for v in adj[u]:\n if dist[v] > dist[u] + cost[u][i]:\n dist[v] = dist[u] + cost[u][i]\n H[v] = dist[v]\n prev[v] = u\n i += 1\n return -1 if dist[t] == float('inf') else dist[t]\n\n\nn, m = list(map(int, raw_input().split()))\nedges = []\nfor i in range(m):\n a, b, w = map(int, raw_input().split())\n edges.append(((a, b), w))\nadj = [[] for _ in range(n)]\ncost = [[] for _ in range(n)]\nfor (a, b), w in edges:\n adj[a - 1].append(b - 1)\n cost[a - 1].append(w)\ns, t = list(map(int, raw_input().split()))\ns -= 1\nt -= 1\nprint(distance(adj, cost, s, t))\n",
"<import token>\n\n\ndef distance(adj, cost, s, t):\n dist = [float('inf') for _ in range(len(adj))]\n prev = [None for _ in range(len(adj))]\n dist[s] = 0\n H = dict()\n for i in range(len(adj)):\n H[i] = dist[i]\n while len(H) > 0:\n minimum_distance = min(H.itervalues())\n U = [key for key, value in H.iteritems() if value == minimum_distance]\n u = U[0]\n H.pop(u)\n i = 0\n for v in adj[u]:\n if dist[v] > dist[u] + cost[u][i]:\n dist[v] = dist[u] + cost[u][i]\n H[v] = dist[v]\n prev[v] = u\n i += 1\n return -1 if dist[t] == float('inf') else dist[t]\n\n\n<assignment token>\nfor i in range(m):\n a, b, w = map(int, raw_input().split())\n edges.append(((a, b), w))\n<assignment token>\nfor (a, b), w in edges:\n adj[a - 1].append(b - 1)\n cost[a - 1].append(w)\n<assignment token>\ns -= 1\nt -= 1\nprint(distance(adj, cost, s, t))\n",
"<import token>\n\n\ndef distance(adj, cost, s, t):\n dist = [float('inf') for _ in range(len(adj))]\n prev = [None for _ in range(len(adj))]\n dist[s] = 0\n H = dict()\n for i in range(len(adj)):\n H[i] = dist[i]\n while len(H) > 0:\n minimum_distance = min(H.itervalues())\n U = [key for key, value in H.iteritems() if value == minimum_distance]\n u = U[0]\n H.pop(u)\n i = 0\n for v in adj[u]:\n if dist[v] > dist[u] + cost[u][i]:\n dist[v] = dist[u] + cost[u][i]\n H[v] = dist[v]\n prev[v] = u\n i += 1\n return -1 if dist[t] == float('inf') else dist[t]\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.