code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
from flask import request, Flask
import ldap3
app = Flask(__name__)
@app.route("/normal")
def normal():
"""
A RemoteFlowSource is used directly as DN and search filter
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = "dc={}".format(unsafe_dc)
search_filter = "(user={})".format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True)
conn.search(dn, search_filter)
@app.route("/direct")
def direct():
"""
A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search
"""
unsafe_dc = request.args['dc']
unsafe_filter = request.args['username']
dn = "dc={}".format(unsafe_dc)
search_filter = "(user={})".format(unsafe_filter)
srv = ldap3.Server('ldap://127.0.0.1')
conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(
dn, search_filter)
# if __name__ == "__main__":
# app.run(debug=True)
|
normal
|
{
"blob_id": "b51591de921f6e153c1dd478cec7fad42ff4251a",
"index": 749,
"step-1": "<mask token>\n\n\[email protected]('/direct')\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,\n search_filter)\n",
"step-2": "<mask token>\n\n\[email protected]('/normal')\ndef normal():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True)\n conn.search(dn, search_filter)\n\n\[email protected]('/direct')\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,\n search_filter)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/normal')\ndef normal():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True)\n conn.search(dn, search_filter)\n\n\[email protected]('/direct')\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,\n search_filter)\n",
"step-4": "from flask import request, Flask\nimport ldap3\napp = Flask(__name__)\n\n\[email protected]('/normal')\ndef normal():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True)\n conn.search(dn, search_filter)\n\n\[email protected]('/direct')\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n dn = 'dc={}'.format(unsafe_dc)\n search_filter = '(user={})'.format(unsafe_filter)\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(dn,\n search_filter)\n",
"step-5": "from flask import request, Flask\nimport ldap3\n\napp = Flask(__name__)\n\n\[email protected](\"/normal\")\ndef normal():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter\n \"\"\"\n\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n\n dn = \"dc={}\".format(unsafe_dc)\n search_filter = \"(user={})\".format(unsafe_filter)\n\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True)\n conn.search(dn, search_filter)\n\n\[email protected](\"/direct\")\ndef direct():\n \"\"\"\n A RemoteFlowSource is used directly as DN and search filter using a oneline call to .search\n \"\"\"\n\n unsafe_dc = request.args['dc']\n unsafe_filter = request.args['username']\n\n dn = \"dc={}\".format(unsafe_dc)\n search_filter = \"(user={})\".format(unsafe_filter)\n\n srv = ldap3.Server('ldap://127.0.0.1')\n conn = ldap3.Connection(srv, user=dn, auto_bind=True).search(\n dn, search_filter)\n\n# if __name__ == \"__main__\":\n# app.run(debug=True)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from .. import CURRENT_NAME
from ..cmd import call_cmd
from .config import Configurator
from .config import USER_INI
from icemac.install.addressbook._compat import Path
import argparse
import os
import pdb # noqa: T002
import sys
def update(stdin=None):
"""Update the current address book installation."""
curr_path = Path.cwd() / CURRENT_NAME
if not curr_path.exists():
print("ERROR: There is no symlink named {!r} in the current"
" directory.".format(CURRENT_NAME))
print("This script cannot be called here.")
sys.exit(-1)
if (curr_path / 'buildout.cfg').exists():
print("ERROR: '{}/buildout.cfg' already exists please (re-) move"
" it.".format(CURRENT_NAME))
sys.exit(-2)
cwd = os.getcwd()
os.chdir(str(curr_path)) # PY2: in PY3 `str` is no longer needed
configurator = Configurator(
curr_path / USER_INI, install_new_version=False, stdin=stdin)
try:
configurator()
call_cmd('running bin/buildout', '../bin/buildout')
if configurator.restart_server == 'yes':
call_cmd('Restarting instance', 'bin/svctl', 'restart', 'all')
finally:
os.chdir(str(cwd)) # PY2: in PY3 `str` is no longer needed
print('Done.')
def main(args=None):
"""Entry point for `bin/change-addressbook-config`."""
parser = argparse.ArgumentParser(
description='Update the current address book installation.')
parser.add_argument(
'--debug', action="store_true",
help='Enter debugger on errors.')
args = parser.parse_args(args)
try:
update()
except Exception:
if args.debug:
pdb.post_mortem()
else:
raise
|
normal
|
{
"blob_id": "f5274f5d838d484ca0c1cc5a5192a2fd698cf827",
"index": 9432,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef update(stdin=None):\n \"\"\"Update the current address book installation.\"\"\"\n curr_path = Path.cwd() / CURRENT_NAME\n if not curr_path.exists():\n print('ERROR: There is no symlink named {!r} in the current directory.'\n .format(CURRENT_NAME))\n print('This script cannot be called here.')\n sys.exit(-1)\n if (curr_path / 'buildout.cfg').exists():\n print(\"ERROR: '{}/buildout.cfg' already exists please (re-) move it.\"\n .format(CURRENT_NAME))\n sys.exit(-2)\n cwd = os.getcwd()\n os.chdir(str(curr_path))\n configurator = Configurator(curr_path / USER_INI, install_new_version=\n False, stdin=stdin)\n try:\n configurator()\n call_cmd('running bin/buildout', '../bin/buildout')\n if configurator.restart_server == 'yes':\n call_cmd('Restarting instance', 'bin/svctl', 'restart', 'all')\n finally:\n os.chdir(str(cwd))\n print('Done.')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef update(stdin=None):\n \"\"\"Update the current address book installation.\"\"\"\n curr_path = Path.cwd() / CURRENT_NAME\n if not curr_path.exists():\n print('ERROR: There is no symlink named {!r} in the current directory.'\n .format(CURRENT_NAME))\n print('This script cannot be called here.')\n sys.exit(-1)\n if (curr_path / 'buildout.cfg').exists():\n print(\"ERROR: '{}/buildout.cfg' already exists please (re-) move it.\"\n .format(CURRENT_NAME))\n sys.exit(-2)\n cwd = os.getcwd()\n os.chdir(str(curr_path))\n configurator = Configurator(curr_path / USER_INI, install_new_version=\n False, stdin=stdin)\n try:\n configurator()\n call_cmd('running bin/buildout', '../bin/buildout')\n if configurator.restart_server == 'yes':\n call_cmd('Restarting instance', 'bin/svctl', 'restart', 'all')\n finally:\n os.chdir(str(cwd))\n print('Done.')\n\n\ndef main(args=None):\n \"\"\"Entry point for `bin/change-addressbook-config`.\"\"\"\n parser = argparse.ArgumentParser(description=\n 'Update the current address book installation.')\n parser.add_argument('--debug', action='store_true', help=\n 'Enter debugger on errors.')\n args = parser.parse_args(args)\n try:\n update()\n except Exception:\n if args.debug:\n pdb.post_mortem()\n else:\n raise\n",
"step-4": "from .. import CURRENT_NAME\nfrom ..cmd import call_cmd\nfrom .config import Configurator\nfrom .config import USER_INI\nfrom icemac.install.addressbook._compat import Path\nimport argparse\nimport os\nimport pdb\nimport sys\n\n\ndef update(stdin=None):\n \"\"\"Update the current address book installation.\"\"\"\n curr_path = Path.cwd() / CURRENT_NAME\n if not curr_path.exists():\n print('ERROR: There is no symlink named {!r} in the current directory.'\n .format(CURRENT_NAME))\n print('This script cannot be called here.')\n sys.exit(-1)\n if (curr_path / 'buildout.cfg').exists():\n print(\"ERROR: '{}/buildout.cfg' already exists please (re-) move it.\"\n .format(CURRENT_NAME))\n sys.exit(-2)\n cwd = os.getcwd()\n os.chdir(str(curr_path))\n configurator = Configurator(curr_path / USER_INI, install_new_version=\n False, stdin=stdin)\n try:\n configurator()\n call_cmd('running bin/buildout', '../bin/buildout')\n if configurator.restart_server == 'yes':\n call_cmd('Restarting instance', 'bin/svctl', 'restart', 'all')\n finally:\n os.chdir(str(cwd))\n print('Done.')\n\n\ndef main(args=None):\n \"\"\"Entry point for `bin/change-addressbook-config`.\"\"\"\n parser = argparse.ArgumentParser(description=\n 'Update the current address book installation.')\n parser.add_argument('--debug', action='store_true', help=\n 'Enter debugger on errors.')\n args = parser.parse_args(args)\n try:\n update()\n except Exception:\n if args.debug:\n pdb.post_mortem()\n else:\n raise\n",
"step-5": "from .. import CURRENT_NAME\nfrom ..cmd import call_cmd\nfrom .config import Configurator\nfrom .config import USER_INI\nfrom icemac.install.addressbook._compat import Path\nimport argparse\nimport os\nimport pdb # noqa: T002\nimport sys\n\n\ndef update(stdin=None):\n \"\"\"Update the current address book installation.\"\"\"\n curr_path = Path.cwd() / CURRENT_NAME\n if not curr_path.exists():\n print(\"ERROR: There is no symlink named {!r} in the current\"\n \" directory.\".format(CURRENT_NAME))\n print(\"This script cannot be called here.\")\n sys.exit(-1)\n\n if (curr_path / 'buildout.cfg').exists():\n print(\"ERROR: '{}/buildout.cfg' already exists please (re-) move\"\n \" it.\".format(CURRENT_NAME))\n sys.exit(-2)\n\n cwd = os.getcwd()\n os.chdir(str(curr_path)) # PY2: in PY3 `str` is no longer needed\n configurator = Configurator(\n curr_path / USER_INI, install_new_version=False, stdin=stdin)\n try:\n configurator()\n call_cmd('running bin/buildout', '../bin/buildout')\n if configurator.restart_server == 'yes':\n call_cmd('Restarting instance', 'bin/svctl', 'restart', 'all')\n finally:\n os.chdir(str(cwd)) # PY2: in PY3 `str` is no longer needed\n print('Done.')\n\n\ndef main(args=None):\n \"\"\"Entry point for `bin/change-addressbook-config`.\"\"\"\n parser = argparse.ArgumentParser(\n description='Update the current address book installation.')\n parser.add_argument(\n '--debug', action=\"store_true\",\n help='Enter debugger on errors.')\n\n args = parser.parse_args(args)\n try:\n update()\n except Exception:\n if args.debug:\n pdb.post_mortem()\n else:\n raise\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import types
import qt
cfg = qt.cfgman
cfg.remove_cfg('protocols')
cfg.remove_cfg('samples')
cfg.remove_cfg('setup')
cfg.add_cfg('protocols')
cfg.add_cfg('samples')
cfg.add_cfg('setup')
cfg['samples']['current'] = 'hans-sil13'
cfg['protocols']['current'] = 'hans-sil13-default'
print 'updating msmt params for {}'.format(cfg['samples']['current'])
##############################################################################
##############################################################################
### Protocols
###
### To make sure everything works fine, only leave the current sample
### un-commented here (we use local variables here for convenience,
### that WILL lead to trouble because the get overwritten in each sample
### section)
##############################################################################
##############################################################################
##############################################################################
### The111/2
##############################################################################
# branch='samples/sil2/'
# f_msm1_cntr = 2.826961e9
# N_frq = 7.13429e6
# N_HF_frq = 2.19290e6
# cfg.set(branch+'ms-1_cntr_frq', f_msm1_cntr)
# cfg.set(branch+'N_0-1_splitting_ms-1', N_frq)
# cfg.set(branch+'N_HF_frq', N_HF_frq)
# mw0 = 2.8e9
# f0 = f_msm1_cntr - mw0
# Nsplit = N_HF_frq
# finit = f0 - Nsplit
# fmIp1 = f_msm1_cntr - mw0 + N_HF_frq
# cfg.set(branch+'mIm1_mod_frq', f_msm1_cntr - mw0 - N_HF_frq)
# cfg.set(branch+'mI0_mod_frq', f_msm1_cntr - mw0)
# cfg.set(branch+'mIp1_mod_frq', f_msm1_cntr - mw0 + N_HF_frq)
##############################################################################
### HANS/4
##############################################################################
# branch='samples/hans-sil4/'
# f_msm1_cntr = 2.826455e9
# N_frq = 7.13377e6
# N_HF_frq = 2.19290e6
# cfg.set(branch+'ms-1_cntr_frq', f_msm1_cntr)
# cfg.set(branch+'N_0-1_splitting_ms-1', N_frq)
# cfg.set(branch+'N_HF_frq', N_HF_frq)
# mw0 = 2.8e9
# f0 = f_msm1_cntr - mw0
# Nsplit = N_HF_frq
# finit = f0 - Nsplit
# fmIp1 = f_msm1_cntr - mw0 + N_HF_frq
# cfg.set(branch+'mIm1_mod_frq', f_msm1_cntr - mw0 - N_HF_frq)
# cfg.set(branch+'mI0_mod_frq', f_msm1_cntr - mw0)
# cfg.set(branch+'mIp1_mod_frq', f_msm1_cntr - mw0 + N_HF_frq)
branch='samples/hans-sil4/'
f_msm1_cntr = 2.827962e9
N_frq = 7.13456e6
N_HF_frq = 2.19290e6
cfg.set(branch+'ms-1_cntr_frq', f_msm1_cntr)
cfg.set(branch+'N_0-1_splitting_ms-1', N_frq)
cfg.set(branch+'N_HF_frq', N_HF_frq)
mw0 = 2.8e9
f0 = f_msm1_cntr - mw0
Nsplit = N_HF_frq
finit = f0 - Nsplit
fmIp1 = f_msm1_cntr - mw0 + N_HF_frq
cfg.set(branch+'mIm1_mod_frq', f_msm1_cntr - mw0 - N_HF_frq)
cfg.set(branch+'mI0_mod_frq', f_msm1_cntr - mw0)
cfg.set(branch+'mIp1_mod_frq', f_msm1_cntr - mw0 + N_HF_frq)
##############################################################################
##############################################################################
### Protocols
##############################################################################
##############################################################################
### General settings for AdwinSSRO
branch='protocols/AdwinSSRO/'
cfg.set(branch+ 'AWG_done_DI_channel', 16)
cfg.set(branch+ 'AWG_event_jump_DO_channel', 14)
cfg.set(branch+ 'AWG_start_DO_channel', 10)
cfg.set(branch+ 'A_laser_DAC_channel', 6)
cfg.set(branch+ 'Ex_laser_DAC_channel', 7)
cfg.set(branch+ 'counter_channel', 1)
cfg.set(branch+ 'cycle_duration', 300)
cfg.set(branch+ 'green_laser_DAC_channel', 4)
cfg.set(branch+ 'green_off_amplitude', 0.0)
cfg.set(branch+ 'green_repump_amplitude', 50e-6)
cfg.set(branch+ 'green_repump_duration', 10)
cfg.set(branch+ 'send_AWG_start', 0)
cfg.set(branch+ 'sequence_wait_time', 1)
cfg.set(branch+ 'wait_after_RO_pulse_duration', 3)
cfg.set(branch+ 'wait_after_pulse_duration', 3)
cfg.set(branch+ 'cr_wait_after_pulse_duration', 1)
cfg.set(branch+ 'wait_for_AWG_done', 0)
cfg.set(branch+ 'green_off_voltage', 0)
cfg.set(branch+ 'repump_off_voltage', 0)
cfg.set(branch+ 'yellow_repump_amplitude', 50e-9)
cfg.set(branch+ 'yellow_repump_duration', 500)
cfg.set(branch+ 'yellow_repump_after_repetitions',100)
cfg.set(branch+ 'yellow_CR_repump', 1)
cfg.set(branch+ 'green_repump_after_repetitions',1)
cfg.set(branch+ 'green_CR_repump', 1000)
cfg.set(branch+ 'CR_probe_max_time', 1000000)
cfg.set(branch+ 'SP_filter_duration', 0)
cfg.set(branch+ 'SSRO_duration', 50)
cfg.set(branch+ 'SSRO_repetitions', 1000)
cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)
yellow=True
cfg.set(branch + 'yellow', yellow)
if yellow:
cfg.set(branch + 'repump_duration', cfg.get(branch+ 'yellow_repump_duration'))
cfg.set(branch + 'repump_amplitude', cfg.get(branch+ 'yellow_repump_amplitude'))
cfg.set(branch + 'CR_repump', cfg.get(branch+ 'yellow_CR_repump'))
cfg.set(branch + 'repump_after_repetitions', cfg.get(branch+ 'yellow_repump_after_repetitions'))
else:
cfg.set(branch + 'repump_duration', cfg.get(branch+ 'green_repump_duration'))
cfg.set(branch + 'repump_amplitude', cfg.get(branch+ 'green_repump_amplitude'))
cfg.set(branch + 'CR_repump', cfg.get(branch+ 'green_CR_repump'))
cfg.set(branch + 'repump_after_repetitions', cfg.get(branch+ 'green_repump_after_repetitions'))
### General settings for AdwinSSRO+espin
branch='protocols/AdwinSSRO+espin/'
cfg.set(branch+ 'send_AWG_start', 1)
cfg.set(branch+ 'MW_pulse_mod_risetime', 10e-9)
cfg.set(branch+ 'mw_frq', mw0)
cfg.set(branch+ 'mw_power', 20)
### General settings for AdwinSSRO+MBI
branch='protocols/AdwinSSRO+MBI/'
cfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 1e-6)
cfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration',
np.array([15e-6]).tolist())
cfg.set(branch+ 'AWG_wait_duration_before_shelving_pulse', 100e-9)
cfg.set(branch+ 'nr_of_ROsequences', 1)
cfg.set(branch+ 'MW_pulse_mod_risetime', 10e-9)
cfg.set(branch+ 'AWG_to_adwin_ttl_trigger_duration', 5e-6)
cfg.set(branch+ 'max_MBI_attempts', 1)
cfg.set(branch+ 'N_randomize_duration', 50)
cfg.set(branch+ 'Ex_N_randomize_amplitude', 0e-9)
cfg.set(branch+ 'A_N_randomize_amplitude', 0e-9)
cfg.set(branch+ 'yellow_N_randomize_amplitude', 0e-9)
##############################################################################
##############################################################################
### Specific sample settings for protocols
##############################################################################
##############################################################################
##############################################################################
### HANS/13 --- SSRO
##############################################################################
branch='protocols/hans-sil13-default/AdwinSSRO/'
cfg.set(branch+ 'A_CR_amplitude', 10e-9)
cfg.set(branch+ 'A_RO_amplitude', 0.)
cfg.set(branch+ 'A_SP_amplitude', 10e-9)
cfg.set(branch+ 'CR_duration', 50)
cfg.set(branch+ 'CR_preselect', 1000)
cfg.set(branch+ 'CR_probe', 20)
cfg.set(branch+ 'CR_repump', 1000)
cfg.set(branch+ 'Ex_CR_amplitude', 10e-9)
cfg.set(branch+ 'Ex_RO_amplitude', 5e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 0.)
cfg.set(branch+ 'SP_duration', 100)
cfg.set(branch+ 'SP_filter_duration', 0)
cfg.set(branch+ 'SSRO_duration', 25)
cfg.set(branch+ 'SSRO_repetitions', 5000)
cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)
cfg.set('protocols/hans-sil1-default/AdwinSSRO-integrated/SSRO_duration', 25)
###############################################################################
#### HANS/1 --- SSRO
###############################################################################
#
#branch='protocols/hans-sil1-default/AdwinSSRO/'
#cfg.set(branch+ 'A_CR_amplitude', 15e-9)
#cfg.set(branch+ 'A_RO_amplitude', 0.)
#cfg.set(branch+ 'A_SP_amplitude', 15e-9)
#cfg.set(branch+ 'CR_duration', 50)
#cfg.set(branch+ 'CR_preselect', 1000)
#cfg.set(branch+ 'CR_probe', 20)
#cfg.set(branch+ 'CR_repump', 1000)
#cfg.set(branch+ 'Ex_CR_amplitude', 4e-9)
#cfg.set(branch+ 'Ex_RO_amplitude', 4e-9)
#cfg.set(branch+ 'Ex_SP_amplitude', 0.)
#cfg.set(branch+ 'SP_duration', 250)
#cfg.set(branch+ 'SP_filter_duration', 0)
#cfg.set(branch+ 'SSRO_duration', 50)
#cfg.set(branch+ 'SSRO_repetitions', 5000)
#cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)
#
#cfg.set('protocols/hans-sil1-default/AdwinSSRO-integrated/SSRO_duration', 11)
#
###############################################################################
#### HANS/1 --- MBI
###############################################################################
#
#branch='protocols/hans-sil1-default/AdwinSSRO+MBI/'
#cfg.set(branch+ 'mw_frq', mw0)
#cfg.set(branch+ 'mw_power', 20)
#cfg.set(branch+ 'Ex_MBI_amplitude', 4e-9)
#cfg.set(branch+ 'Ex_SP_amplitude', 10e-9)
#cfg.set(branch+ 'MBI_duration', 4) #put back to 4 with gate
#cfg.set(branch+ 'max_MBI_attempts', 1)
#cfg.set(branch+ 'MBI_threshold', 1)
#cfg.set(branch+ 'SP_E_duration', 60)
#cfg.set(branch+ 'repump_after_MBI_duration', 15)
#cfg.set(branch+ 'repump_after_MBI_amplitude', 15e-9)
#cfg.set(branch+ 'repump_after_E_RO_duration', 15)
#cfg.set(branch+ 'repump_after_E_RO_amplitude', 15e-9)
#
## MBI pulse
#cfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 50e-9)
#cfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration', 15e-6)
#
###############################################################################
#### HANS/1 --- Pulses
###############################################################################
#
#branch='protocols/hans-sil1-default/pulses/'
#
#cfg.set(branch+ 'selective_pi_duration', 2500e-9)
#cfg.set(branch+ 'selective_pi_amp', 0.0166)
#cfg.set(branch+ 'selective_pi_mod_frq', finit)
#cfg.set(branch+ 'AWG_MBI_MW_pulse_mod_frq',
# finit)
#cfg.set(branch+ 'AWG_MBI_MW_pulse_ssbmod_frq',
# finit)
#cfg.set(branch+ 'AWG_MBI_MW_pulse_amp',
# cfg.get(branch+ 'selective_pi_amp'))
#cfg.set(branch+ 'AWG_MBI_MW_pulse_duration',
# cfg.get(branch+ 'selective_pi_duration'))
#
#cfg.set(branch+ 'fast_pi_duration', 80e-9)
#cfg.set(branch+ 'fast_pi_amp', 0.816)
#cfg.set(branch+ 'fast_pi_mod_frq', finit)
#
#cfg.set(branch+ 'fast_pi2_duration', 40e-9)
#cfg.set(branch+ 'fast_pi2_amp', 0.816)
#cfg.set(branch+ 'fast_pi2_mod_frq', finit)
#
#### CNOTs
#cfg.set(branch+ 'pi2pi_mIm1_duration', 396e-9)
#cfg.set(branch+ 'pi2pi_mIm1_amp', 0.109166)
#cfg.set(branch+ 'pi2pi_mIm1_mod_frq', finit)
#
#### CORPSE used in the BSM
#CORPSE_frq = 5e6
#cfg.set(branch+ 'CORPSE_pi_60_duration', 1./CORPSE_frq/6.)
#cfg.set(branch+ 'CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)
#cfg.set(branch+ 'CORPSE_pi_420_duration', 7./CORPSE_frq/6.)
#cfg.set(branch+ 'CORPSE_pi_mod_frq', finit + Nsplit/2.)
#cfg.set(branch+ 'CORPSE_pi_amp', 0.529)
#
## ### TODO
#cfg.set(branch+ 'CORPSE_pi_phase_shift', 104.0)
#
## ### TODO
#cfg.set(branch+ 'CORPSE_pi_center_shift', 0.e-9)
#
#### CORPSE for the full ms=-1 manifold, driven in the center
#### (resonant with mI = 0)
#CORPSE_frq = 6.5e6
#cfg.set(branch+ 'msm1_CORPSE_pi_60_duration', 1./CORPSE_frq/6.)
#cfg.set(branch+ 'msm1_CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)
#cfg.set(branch+ 'msm1_CORPSE_pi_420_duration', 7./CORPSE_frq/6.)
#cfg.set(branch+ 'msm1_CORPSE_pi_mod_frq', f_msm1_cntr - mw0)
#cfg.set(branch+ 'msm1_CORPSE_pi_amp', 0.797641)
#
## cfg.set(branch+ 'msm1_CORPSE_pi2_24p3_duration', 24.3/CORPSE_frq/360.)
## cfg.set(branch+ 'msm1_CORPSE_pi2_m318p6_duration', 318.6/CORPSE_frq/360.)
## cfg.set(branch+ 'msm1_CORPSE_pi2_384p3_duration', 384.3/CORPSE_frq/360.)
## cfg.set(branch+ 'msm1_CORPSE_pi2_mod_frq', f_msm1_cntr - mw0)
## cfg.set(branch+ 'msm1_CORPSE_pi2_amp', 0.818) ###not calibrated
#
#cfg.set(branch+ 'first_C_revival', 50.90e-6)
#
#### Nitrogen pulses
#cfg.set(branch+ 'N_pi_duration', 47.3e-6)
#cfg.set(branch+ 'N_pi_amp', 1)
#
#cfg.set(branch+ 'N_pi2_duration', 47.3e-6/2.)
#cfg.set(branch+ 'N_pi2_amp', 1)
#
#
"""
branch='protocols/sil2-default/pulses/'
tof = 0#11e-9
cfg.set(branch+ 't_offset', tof)
cfg.set(branch+ '4MHz_pi_duration', tof + 125e-9)
cfg.set(branch+ '4MHz_pi_amp', 0.599)
cfg.set(branch+ '4MHz_pi_mod_frq', finit)
cfg.set(branch+ '4MHz_pi2_duration', tof + 62e-9)
cfg.set(branch+ '4MHz_pi2_amp', 0.599)
cfg.set(branch+ '4MHz_pi2_mod_frq', finit)
cfg.set(branch+ 'selective_pi_duration', 2600e-9)
cfg.set(branch+ 'selective_pi_amp', 0.02)
cfg.set(branch+ 'selective_pi_mod_frq', finit)
CORPSE_frq = 3.991e6
cfg.set(branch+ 'CORPSE_pi_60_duration', tof/2. + 1./CORPSE_frq/6.)
cfg.set(branch+ 'CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)
cfg.set(branch+ 'CORPSE_pi_420_duration', tof/2. + 7./CORPSE_frq/6.)
cfg.set(branch+ 'CORPSE_pi_mod_frq', finit + Nsplit/2.)
cfg.set(branch+ 'CORPSE_pi_amp', 0.703)
cfg.set(branch+ 'CORPSE_pi_phase_shift', 74.0)
cfg.set(branch+ 'CORPSE_pi_center_shift', 13e-9)
cfg.set(branch+ 'pi2pi_mIm1_duration', tof + 395e-9)
cfg.set(branch+ 'pi2pi_mIm1_amp', 0.164)
cfg.set(branch+ 'pi2pi_mIm1_mod_frq', finit)
cfg.set(branch+ 'pi2pi_mI0_duration', tof + 395e-9)
cfg.set(branch+ 'pi2pi_mI0_amp', 0.170)
cfg.set(branch+ 'pi2pi_mI0_mod_frq', f0)
cfg.set(branch+ 'pi2pi_mIp1_duration', tof + 395e-9)
cfg.set(branch+ 'pi2pi_mIp1_amp', 0.185)
cfg.set(branch+ 'pi2pi_mIp1_mod_frq', fmIp1)
### set some other pulses that determinine their values from the ones above
cfg.set(branch+ 'AWG_N_CNOT_pulse_duration',
cfg.get(branch+ 'pi2pi_mIm1_duration'))
cfg.set(branch+ 'AWG_N_CNOT_pulse_amp',
cfg.get(branch+ 'pi2pi_mIm1_amp'))
cfg.set(branch+ 'AWG_N_CNOT_pulse_mod_frq',
cfg.get(branch+ 'pi2pi_mIm1_mod_frq'))
cfg.set(branch+ 'AWG_MBI_MW_pulse_mod_frq',
finit)
cfg.set(branch+ 'AWG_MBI_MW_pulse_amp',
cfg.get(branch+ 'selective_pi_amp'))
cfg.set(branch+ 'AWG_MBI_MW_pulse_duration',
cfg.get(branch+ 'selective_pi_duration'))
cfg.set(branch+ 'AWG_shelving_pulse_duration',
cfg.get(branch+ '4MHz_pi_duration'))
cfg.set(branch+ 'AWG_shelving_pulse_amp',
cfg.get(branch+ '4MHz_pi_amp'))
### Nitrogen pulses
cfg.set(branch+ 'N_pi_duration', 91.1e-6)
cfg.set(branch+ 'N_pi_amp', 1)
cfg.set(branch+ 'N_pi2_duration', 91.1e-6/2.)
cfg.set(branch+ 'N_pi2_amp', 1)
##############################################################################
### The111/2 - SSRO
##############################################################################
branch='protocols/sil2-default/AdwinSSRO/'
cfg.set(branch+ 'A_CR_amplitude', 10e-9)
cfg.set(branch+ 'A_RO_amplitude', 0.)
cfg.set(branch+ 'A_SP_amplitude', 10e-9)
cfg.set(branch+ 'CR_duration', 100)
cfg.set(branch+ 'CR_preselect', 40)
cfg.set(branch+ 'CR_probe', 40)
cfg.set(branch+ 'CR_repump', 1000)
cfg.set(branch+ 'Ex_CR_amplitude', 5e-9)
cfg.set(branch+ 'Ex_RO_amplitude', 5e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 0.)
cfg.set(branch+ 'SP_duration', 250)
cfg.set(branch+ 'SP_filter_duration', 0)
cfg.set(branch+ 'SSRO_duration', 50)
cfg.set(branch+ 'SSRO_repetitions', 1000)
cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)
### integrated SSRO
cfg.set('protocols/sil2-default/AdwinSSRO-integrated/SSRO_duration', 15)
##############################################################################
### The111/2 - MBI
##############################################################################
branch='protocols/sil2-default/AdwinSSRO+MBI/'
cfg.set(branch+ 'mw_frq', mw0)
cfg.set(branch+ 'mw_power', 20)
cfg.set(branch+ 'Ex_MBI_amplitude', 5e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 10e-9)
cfg.set(branch+ 'MBI_duration', 4)
cfg.set(branch+ 'max_MBI_attempts', 1)
cfg.set(branch+ 'MBI_threshold', 1)
cfg.set(branch+ 'SP_E_duration', 100)
cfg.set(branch+ 'repump_after_MBI_duration', 100)
cfg.set(branch+ 'repump_after_MBI_amplitude', 25e-9)
cfg.set(branch+ 'repump_after_E_RO_duration', 100)
cfg.set(branch+ 'repump_after_E_RO_amplitude', 25e-9)
# MBI pulse
cfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 50e-9)
cfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration', 15e-6)
### sil2, BSM
cfg.set('protocols/sil2-default/BSM/N_ref_frq', N_frq)
cfg.set('protocols/sil2-default/BSM/e_ref_frq', finit)
##############################################################################
### HANS/1 --- Pulses
##############################################################################
branch='protocols/hans-sil1-default/pulses/'
cfg.set(branch+ 'selective_pi_duration', 2500e-9)
cfg.set(branch+ 'selective_pi_amp', 0.015)
cfg.set(branch+ 'selective_pi_mod_frq', finit)
cfg.set(branch+ 'AWG_MBI_MW_pulse_mod_frq',
finit)
cfg.set(branch+ 'AWG_MBI_MW_pulse_ssbmod_frq',
finit)
cfg.set(branch+ 'AWG_MBI_MW_pulse_amp',
cfg.get(branch+ 'selective_pi_amp'))
cfg.set(branch+ 'AWG_MBI_MW_pulse_duration',
cfg.get(branch+ 'selective_pi_duration'))
##############################################################################
### HANS/1 --- SSRO
##############################################################################
branch='protocols/hans-sil1-default/AdwinSSRO/'
cfg.set(branch+ 'A_CR_amplitude', 15e-9)
cfg.set(branch+ 'A_RO_amplitude', 0.)
cfg.set(branch+ 'A_SP_amplitude', 15e-9)
cfg.set(branch+ 'CR_duration', 50)
cfg.set(branch+ 'CR_preselect', 1000)
cfg.set(branch+ 'CR_probe', 20)
cfg.set(branch+ 'CR_repump', 1000)
cfg.set(branch+ 'Ex_CR_amplitude', 5e-9)
cfg.set(branch+ 'Ex_RO_amplitude', 5e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 0.)
cfg.set(branch+ 'SP_duration', 250)
cfg.set(branch+ 'SP_filter_duration', 0)
cfg.set(branch+ 'SSRO_duration', 50)
cfg.set(branch+ 'SSRO_repetitions', 5000)
cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)
cfg.set('protocols/hans-sil1-default/AdwinSSRO-integrated/SSRO_duration', 15)
##############################################################################
### HANS/1 --- MBI
##############################################################################
branch='protocols/hans-sil1-default/AdwinSSRO+MBI/'
cfg.set(branch+ 'mw_frq', mw0)
cfg.set(branch+ 'mw_power', 20)
cfg.set(branch+ 'Ex_MBI_amplitude', 5e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 15e-9)
cfg.set(branch+ 'MBI_duration', 4)
cfg.set(branch+ 'max_MBI_attempts', 1)
cfg.set(branch+ 'MBI_threshold', 1)
cfg.set(branch+ 'SP_E_duration', 60)
cfg.set(branch+ 'repump_after_MBI_duration', 15)
cfg.set(branch+ 'repump_after_MBI_amplitude', 15e-9)
cfg.set(branch+ 'repump_after_E_RO_duration', 15)
cfg.set(branch+ 'repump_after_E_RO_amplitude', 15e-9)
# MBI pulse
cfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 50e-9)
cfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration', 15e-6)
##############################################################################
### HANS/7
##############################################################################
branch='protocols/hans-sil7-default/AdwinSSRO/'
cfg.set(branch+ 'A_CR_amplitude', 10e-9)
cfg.set(branch+ 'A_RO_amplitude', 0.)
cfg.set(branch+ 'A_SP_amplitude', 10e-9)
cfg.set(branch+ 'CR_duration', 100)
cfg.set(branch+ 'CR_preselect', 15)
cfg.set(branch+ 'CR_probe', 5)
cfg.set(branch+ 'CR_repump', 1000)
cfg.set(branch+ 'Ex_CR_amplitude', 5e-9)
cfg.set(branch+ 'Ex_RO_amplitude', 5e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 0.)
cfg.set(branch+ 'SP_duration', 250)
cfg.set(branch+ 'SP_filter_duration', 0)
cfg.set(branch+ 'SSRO_duration', 50)
cfg.set(branch+ 'SSRO_repetitions', 5000)
cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)
cfg.set('protocols/hans-sil7-default/AdwinSSRO-integrated/SSRO_duration', 50)
"""
##############################################################################
### HANS/4 --- SSRO
##############################################################################
branch='protocols/hans-sil4-default/AdwinSSRO/'
cfg.set(branch+ 'A_CR_amplitude', 5e-9)
cfg.set(branch+ 'A_RO_amplitude', 0.)
cfg.set(branch+ 'A_SP_amplitude', 60e-9)
cfg.set(branch+ 'CR_duration', 50)
cfg.set(branch+ 'CR_preselect', 1000)
cfg.set(branch+ 'CR_probe', 20)
cfg.set(branch+ 'CR_repump', 1000)
cfg.set(branch+ 'Ex_CR_amplitude', 5e-9)
cfg.set(branch+ 'Ex_RO_amplitude', 10e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 0.)
cfg.set(branch+ 'SP_duration', 9)
cfg.set(branch+ 'SP_filter_duration', 0)
cfg.set(branch+ 'SSRO_duration', 10)
cfg.set(branch+ 'SSRO_repetitions', 5000)
cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)
cfg.set('protocols/hans-sil4-default/AdwinSSRO-integrated/SSRO_duration', 10)
cfg.set('protocols/hans-sil4-default/AdwinSSRO+espin/mw_frq', mw0)
cfg.set('protocols/hans-sil4-default/AdwinSSRO+espin/mw_power', 20)
##############################################################################
### HANS/4 --- MBI
##############################################################################
branch='protocols/hans-sil4-default/AdwinSSRO+MBI/'
cfg.set(branch+ 'mw_frq', mw0)
cfg.set(branch+ 'mw_power', 20)
cfg.set(branch+ 'Ex_MBI_amplitude', 5e-9)
cfg.set(branch+ 'Ex_SP_amplitude', 10e-9)
cfg.set(branch+ 'MBI_duration', 4)
cfg.set(branch+ 'max_MBI_attempts', 1)
cfg.set(branch+ 'MBI_threshold', 1)
cfg.set(branch+ 'SP_E_duration', 100)
cfg.set(branch+ 'repump_after_MBI_duration', 15)
cfg.set(branch+ 'repump_after_MBI_A_amplitude', [5e-9])
cfg.set(branch+ 'repump_after_MBI_E_amplitude', [0e-9])
cfg.set(branch+ 'repump_after_E_RO_duration', 15)
cfg.set(branch+ 'repump_after_E_RO_amplitude', 5e-9)
# MBI pulse
cfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 50e-9)
cfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration', 15e-6)
### BSM
cfg.set('protocols/hans-sil4-default/BSM/N_ref_frq', N_frq)
cfg.set('protocols/hans-sil4-default/BSM/e_ref_frq', finit)
cfg.set('protocols/hans-sil4-default/BSM/pi2_evolution_time', 51.086e-6)
cfg.set('protocols/hans-sil4-default/BSM/H_evolution_time', 50.746e-6)
cfg.set('protocols/hans-sil4-default/BSM/H_phase', 46)
##############################################################################
### HANS/4 --- Pulses
##############################################################################
branch='protocols/hans-sil4-default/pulses/'
cfg.set(branch+ 'selective_pi_duration', 2500e-9)
cfg.set(branch+ 'selective_pi_amp', 0.011)
cfg.set(branch+ 'selective_pi_mod_frq', finit)
cfg.set(branch+ 'AWG_MBI_MW_pulse_mod_frq',
finit)
cfg.set(branch+ 'AWG_MBI_MW_pulse_ssbmod_frq',
finit)
cfg.set(branch+ 'AWG_MBI_MW_pulse_amp',
cfg.get(branch+ 'selective_pi_amp'))
cfg.set(branch+ 'AWG_MBI_MW_pulse_duration',
cfg.get(branch+ 'selective_pi_duration'))
cfg.set(branch+ 'fast_pi_duration', 62e-9)
cfg.set(branch+ 'fast_pi_amp', 0.844)
cfg.set(branch+ 'fast_pi_mod_frq', finit)
cfg.set(branch+ 'fast_pi2_duration', 33e-9)
cfg.set(branch+ 'fast_pi2_amp', 0.812)
cfg.set(branch+ 'fast_pi2_mod_frq', finit)
### CNOTs
cfg.set(branch+ 'pi2pi_mIm1_duration', 396e-9)
cfg.set(branch+ 'pi2pi_mIm1_amp', 0.083)
cfg.set(branch+ 'pi2pi_mIm1_mod_frq', finit)
### CORPSE used in the BSM
CORPSE_frq = 7.5e6
cfg.set(branch+ 'CORPSE_pi_60_duration', 1./CORPSE_frq/6.)
cfg.set(branch+ 'CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)
cfg.set(branch+ 'CORPSE_pi_420_duration', 7./CORPSE_frq/6.)
cfg.set(branch+ 'CORPSE_pi_mod_frq', finit + Nsplit/2.)
cfg.set(branch+ 'CORPSE_pi_amp', 0.363)
cfg.set(branch+ 'CORPSE_pi2_24p3_duration', 24.3/CORPSE_frq/360.)
cfg.set(branch+ 'CORPSE_pi2_m318p6_duration', 318.6/CORPSE_frq/360.)
cfg.set(branch+ 'CORPSE_pi2_384p3_duration', 384.3/CORPSE_frq/360.)
cfg.set(branch+ 'CORPSE_pi2_mod_frq', f_msm1_cntr - mw0)
cfg.set(branch+ 'CORPSE_pi2_amp', 0.55) ###not calibrated
"""
# ### TODO
cfg.set(branch+ 'CORPSE_pi_phase_shift', 104.0)
# ### TODO
cfg.set(branch+ 'CORPSE_pi_center_shift', 0.e-9)
### CORPSE for the full ms=-1 manifold, driven in the center
### (resonant with mI = 0)
CORPSE_frq = 8e6
cfg.set(branch+ 'msm1_CORPSE_pi_60_duration', 1./CORPSE_frq/6.)
cfg.set(branch+ 'msm1_CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)
cfg.set(branch+ 'msm1_CORPSE_pi_420_duration', 7./CORPSE_frq/6.)
cfg.set(branch+ 'msm1_CORPSE_pi_mod_frq', f_msm1_cntr - mw0)
cfg.set(branch+ 'msm1_CORPSE_pi_amp', 0.782)
cfg.set(branch+ 'msm1_CORPSE_pi2_24p3_duration', 24.3/CORPSE_frq/360.)
cfg.set(branch+ 'msm1_CORPSE_pi2_m318p6_duration', 318.6/CORPSE_frq/360.)
cfg.set(branch+ 'msm1_CORPSE_pi2_384p3_duration', 384.3/CORPSE_frq/360.)
cfg.set(branch+ 'msm1_CORPSE_pi2_mod_frq', f_msm1_cntr - mw0)
cfg.set(branch+ 'msm1_CORPSE_pi2_amp', 0.818) ###not calibrated
cfg.set(branch+ 'first_C_revival', 50.90e-6)
### Nitrogen pulses
cfg.set(branch+ 'N_pi_duration', 47.3e-6)
cfg.set(branch+ 'N_pi_amp', 1)
cfg.set(branch+ 'N_pi2_duration', 47.3e-6/2.)
cfg.set(branch+ 'N_pi2_amp', 1)
"""
##############################################################################
##############################################################################
##############################################################################
### DONE
##############################################################################
##############################################################################
##############################################################################
cfg.save_all()
|
normal
|
{
"blob_id": "3f20438b0dd2ae8de470e5456dbb764eabf69645",
"index": 8092,
"step-1": "import types\nimport qt\ncfg = qt.cfgman\n\ncfg.remove_cfg('protocols')\ncfg.remove_cfg('samples')\ncfg.remove_cfg('setup')\ncfg.add_cfg('protocols')\ncfg.add_cfg('samples')\ncfg.add_cfg('setup')\n\ncfg['samples']['current'] = 'hans-sil13'\ncfg['protocols']['current'] = 'hans-sil13-default'\n\nprint 'updating msmt params for {}'.format(cfg['samples']['current'])\n\n##############################################################################\n##############################################################################\n### Protocols\n###\n### To make sure everything works fine, only leave the current sample\n### un-commented here (we use local variables here for convenience,\n### that WILL lead to trouble because the get overwritten in each sample \n### section)\n##############################################################################\n##############################################################################\n\n\n##############################################################################\n### The111/2\n##############################################################################\n\n# branch='samples/sil2/'\n\n# f_msm1_cntr = 2.826961e9\n# N_frq = 7.13429e6\n# N_HF_frq = 2.19290e6\n# cfg.set(branch+'ms-1_cntr_frq', f_msm1_cntr)\n# cfg.set(branch+'N_0-1_splitting_ms-1', N_frq)\n# cfg.set(branch+'N_HF_frq', N_HF_frq)\n# mw0 = 2.8e9\n# f0 = f_msm1_cntr - mw0\n# Nsplit = N_HF_frq\n# finit = f0 - Nsplit\n# fmIp1 = f_msm1_cntr - mw0 + N_HF_frq\n# cfg.set(branch+'mIm1_mod_frq', f_msm1_cntr - mw0 - N_HF_frq)\n# cfg.set(branch+'mI0_mod_frq', f_msm1_cntr - mw0)\n# cfg.set(branch+'mIp1_mod_frq', f_msm1_cntr - mw0 + N_HF_frq)\n\n\n##############################################################################\n### HANS/4\n##############################################################################\n\n# branch='samples/hans-sil4/'\n\n# f_msm1_cntr = 2.826455e9\n# N_frq = 7.13377e6\n# N_HF_frq = 2.19290e6\n# cfg.set(branch+'ms-1_cntr_frq', f_msm1_cntr)\n# cfg.set(branch+'N_0-1_splitting_ms-1', N_frq)\n# cfg.set(branch+'N_HF_frq', N_HF_frq)\n# mw0 = 2.8e9\n# f0 = f_msm1_cntr - mw0\n# Nsplit = N_HF_frq\n# finit = f0 - Nsplit\n# fmIp1 = f_msm1_cntr - mw0 + N_HF_frq\n# cfg.set(branch+'mIm1_mod_frq', f_msm1_cntr - mw0 - N_HF_frq)\n# cfg.set(branch+'mI0_mod_frq', f_msm1_cntr - mw0)\n# cfg.set(branch+'mIp1_mod_frq', f_msm1_cntr - mw0 + N_HF_frq)\n\nbranch='samples/hans-sil4/'\n\nf_msm1_cntr = 2.827962e9\nN_frq = 7.13456e6\nN_HF_frq = 2.19290e6\ncfg.set(branch+'ms-1_cntr_frq', f_msm1_cntr)\ncfg.set(branch+'N_0-1_splitting_ms-1', N_frq)\ncfg.set(branch+'N_HF_frq', N_HF_frq)\nmw0 = 2.8e9\nf0 = f_msm1_cntr - mw0\nNsplit = N_HF_frq\nfinit = f0 - Nsplit\nfmIp1 = f_msm1_cntr - mw0 + N_HF_frq\ncfg.set(branch+'mIm1_mod_frq', f_msm1_cntr - mw0 - N_HF_frq)\ncfg.set(branch+'mI0_mod_frq', f_msm1_cntr - mw0)\ncfg.set(branch+'mIp1_mod_frq', f_msm1_cntr - mw0 + N_HF_frq)\n\n##############################################################################\n##############################################################################\n### Protocols\n##############################################################################\n##############################################################################\n\n### General settings for AdwinSSRO\nbranch='protocols/AdwinSSRO/'\ncfg.set(branch+ 'AWG_done_DI_channel', 16)\ncfg.set(branch+ 'AWG_event_jump_DO_channel', 14)\ncfg.set(branch+ 'AWG_start_DO_channel', 10)\ncfg.set(branch+ 'A_laser_DAC_channel', 6)\ncfg.set(branch+ 'Ex_laser_DAC_channel', 7)\ncfg.set(branch+ 'counter_channel', 1)\ncfg.set(branch+ 'cycle_duration', 300)\ncfg.set(branch+ 'green_laser_DAC_channel', 4)\ncfg.set(branch+ 'green_off_amplitude', 0.0)\ncfg.set(branch+ 'green_repump_amplitude', 50e-6)\ncfg.set(branch+ 'green_repump_duration', 10)\ncfg.set(branch+ 'send_AWG_start', 0)\ncfg.set(branch+ 'sequence_wait_time', 1)\ncfg.set(branch+ 'wait_after_RO_pulse_duration', 3)\ncfg.set(branch+ 'wait_after_pulse_duration', 3)\ncfg.set(branch+ 'cr_wait_after_pulse_duration', 1)\ncfg.set(branch+ 'wait_for_AWG_done', 0)\ncfg.set(branch+ 'green_off_voltage', 0)\ncfg.set(branch+ 'repump_off_voltage', 0)\ncfg.set(branch+ 'yellow_repump_amplitude', 50e-9)\ncfg.set(branch+ 'yellow_repump_duration', 500)\ncfg.set(branch+ 'yellow_repump_after_repetitions',100)\ncfg.set(branch+ 'yellow_CR_repump', 1)\ncfg.set(branch+ 'green_repump_after_repetitions',1)\ncfg.set(branch+ 'green_CR_repump', 1000)\ncfg.set(branch+ 'CR_probe_max_time', 1000000)\n\ncfg.set(branch+ 'SP_filter_duration', 0)\ncfg.set(branch+ 'SSRO_duration', 50)\ncfg.set(branch+ 'SSRO_repetitions', 1000)\ncfg.set(branch+ 'SSRO_stop_after_first_photon', 0)\n\nyellow=True\ncfg.set(branch + 'yellow', yellow)\nif yellow:\n cfg.set(branch + 'repump_duration', cfg.get(branch+ 'yellow_repump_duration'))\n cfg.set(branch + 'repump_amplitude', cfg.get(branch+ 'yellow_repump_amplitude')) \n cfg.set(branch + 'CR_repump', cfg.get(branch+ 'yellow_CR_repump'))\n cfg.set(branch + 'repump_after_repetitions', cfg.get(branch+ 'yellow_repump_after_repetitions'))\nelse:\n cfg.set(branch + 'repump_duration', cfg.get(branch+ 'green_repump_duration'))\n cfg.set(branch + 'repump_amplitude', cfg.get(branch+ 'green_repump_amplitude')) \n cfg.set(branch + 'CR_repump', cfg.get(branch+ 'green_CR_repump'))\n cfg.set(branch + 'repump_after_repetitions', cfg.get(branch+ 'green_repump_after_repetitions'))\n\n### General settings for AdwinSSRO+espin\nbranch='protocols/AdwinSSRO+espin/'\ncfg.set(branch+ 'send_AWG_start', 1)\ncfg.set(branch+ 'MW_pulse_mod_risetime', 10e-9)\ncfg.set(branch+ 'mw_frq', mw0)\ncfg.set(branch+ 'mw_power', 20)\n\n### General settings for AdwinSSRO+MBI\nbranch='protocols/AdwinSSRO+MBI/'\ncfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 1e-6)\ncfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration', \n np.array([15e-6]).tolist())\ncfg.set(branch+ 'AWG_wait_duration_before_shelving_pulse', 100e-9)\ncfg.set(branch+ 'nr_of_ROsequences', 1)\ncfg.set(branch+ 'MW_pulse_mod_risetime', 10e-9)\ncfg.set(branch+ 'AWG_to_adwin_ttl_trigger_duration', 5e-6)\ncfg.set(branch+ 'max_MBI_attempts', 1)\ncfg.set(branch+ 'N_randomize_duration', 50)\ncfg.set(branch+ 'Ex_N_randomize_amplitude', 0e-9)\ncfg.set(branch+ 'A_N_randomize_amplitude', 0e-9)\ncfg.set(branch+ 'yellow_N_randomize_amplitude', 0e-9)\n\n##############################################################################\n##############################################################################\n### Specific sample settings for protocols\n##############################################################################\n##############################################################################\n\n##############################################################################\n### HANS/13 --- SSRO\n##############################################################################\n\nbranch='protocols/hans-sil13-default/AdwinSSRO/' \ncfg.set(branch+ 'A_CR_amplitude', 10e-9)\ncfg.set(branch+ 'A_RO_amplitude', 0.)\ncfg.set(branch+ 'A_SP_amplitude', 10e-9)\ncfg.set(branch+ 'CR_duration', 50)\ncfg.set(branch+ 'CR_preselect', 1000)\ncfg.set(branch+ 'CR_probe', 20)\ncfg.set(branch+ 'CR_repump', 1000)\ncfg.set(branch+ 'Ex_CR_amplitude', 10e-9)\ncfg.set(branch+ 'Ex_RO_amplitude', 5e-9)\ncfg.set(branch+ 'Ex_SP_amplitude', 0.)\ncfg.set(branch+ 'SP_duration', 100)\ncfg.set(branch+ 'SP_filter_duration', 0)\ncfg.set(branch+ 'SSRO_duration', 25)\ncfg.set(branch+ 'SSRO_repetitions', 5000)\ncfg.set(branch+ 'SSRO_stop_after_first_photon', 0)\n\ncfg.set('protocols/hans-sil1-default/AdwinSSRO-integrated/SSRO_duration', 25)\n\n\n###############################################################################\n#### HANS/1 --- SSRO\n###############################################################################\n#\n#branch='protocols/hans-sil1-default/AdwinSSRO/' \n#cfg.set(branch+ 'A_CR_amplitude', 15e-9)\n#cfg.set(branch+ 'A_RO_amplitude', 0.)\n#cfg.set(branch+ 'A_SP_amplitude', 15e-9)\n#cfg.set(branch+ 'CR_duration', 50)\n#cfg.set(branch+ 'CR_preselect', 1000)\n#cfg.set(branch+ 'CR_probe', 20)\n#cfg.set(branch+ 'CR_repump', 1000)\n#cfg.set(branch+ 'Ex_CR_amplitude', 4e-9)\n#cfg.set(branch+ 'Ex_RO_amplitude', 4e-9)\n#cfg.set(branch+ 'Ex_SP_amplitude', 0.)\n#cfg.set(branch+ 'SP_duration', 250)\n#cfg.set(branch+ 'SP_filter_duration', 0)\n#cfg.set(branch+ 'SSRO_duration', 50)\n#cfg.set(branch+ 'SSRO_repetitions', 5000)\n#cfg.set(branch+ 'SSRO_stop_after_first_photon', 0)\n#\n#cfg.set('protocols/hans-sil1-default/AdwinSSRO-integrated/SSRO_duration', 11)\n#\n###############################################################################\n#### HANS/1 --- MBI\n###############################################################################\n#\n#branch='protocols/hans-sil1-default/AdwinSSRO+MBI/'\n#cfg.set(branch+ 'mw_frq', mw0)\n#cfg.set(branch+ 'mw_power', 20)\n#cfg.set(branch+ 'Ex_MBI_amplitude', 4e-9)\n#cfg.set(branch+ 'Ex_SP_amplitude', 10e-9)\n#cfg.set(branch+ 'MBI_duration', 4) #put back to 4 with gate\n#cfg.set(branch+ 'max_MBI_attempts', 1)\n#cfg.set(branch+ 'MBI_threshold', 1)\n#cfg.set(branch+ 'SP_E_duration', 60)\n#cfg.set(branch+ 'repump_after_MBI_duration', 15)\n#cfg.set(branch+ 'repump_after_MBI_amplitude', 15e-9)\n#cfg.set(branch+ 'repump_after_E_RO_duration', 15)\n#cfg.set(branch+ 'repump_after_E_RO_amplitude', 15e-9)\n#\n## MBI pulse\n#cfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 50e-9)\n#cfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration', 15e-6)\n#\n###############################################################################\n#### HANS/1 --- Pulses\n###############################################################################\n#\n#branch='protocols/hans-sil1-default/pulses/'\n#\n#cfg.set(branch+ 'selective_pi_duration', 2500e-9)\n#cfg.set(branch+ 'selective_pi_amp', 0.0166)\n#cfg.set(branch+ 'selective_pi_mod_frq', finit)\n#cfg.set(branch+ 'AWG_MBI_MW_pulse_mod_frq', \n# finit)\n#cfg.set(branch+ 'AWG_MBI_MW_pulse_ssbmod_frq',\n# finit)\n#cfg.set(branch+ 'AWG_MBI_MW_pulse_amp', \n# cfg.get(branch+ 'selective_pi_amp'))\n#cfg.set(branch+ 'AWG_MBI_MW_pulse_duration', \n# cfg.get(branch+ 'selective_pi_duration'))\n#\n#cfg.set(branch+ 'fast_pi_duration', 80e-9)\n#cfg.set(branch+ 'fast_pi_amp', 0.816)\n#cfg.set(branch+ 'fast_pi_mod_frq', finit)\n#\n#cfg.set(branch+ 'fast_pi2_duration', 40e-9)\n#cfg.set(branch+ 'fast_pi2_amp', 0.816)\n#cfg.set(branch+ 'fast_pi2_mod_frq', finit)\n#\n#### CNOTs\n#cfg.set(branch+ 'pi2pi_mIm1_duration', 396e-9)\n#cfg.set(branch+ 'pi2pi_mIm1_amp', 0.109166)\n#cfg.set(branch+ 'pi2pi_mIm1_mod_frq', finit)\n#\n#### CORPSE used in the BSM\n#CORPSE_frq = 5e6\n#cfg.set(branch+ 'CORPSE_pi_60_duration', 1./CORPSE_frq/6.)\n#cfg.set(branch+ 'CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)\n#cfg.set(branch+ 'CORPSE_pi_420_duration', 7./CORPSE_frq/6.)\n#cfg.set(branch+ 'CORPSE_pi_mod_frq', finit + Nsplit/2.)\n#cfg.set(branch+ 'CORPSE_pi_amp', 0.529)\n#\n## ### TODO\n#cfg.set(branch+ 'CORPSE_pi_phase_shift', 104.0)\n#\n## ### TODO\n#cfg.set(branch+ 'CORPSE_pi_center_shift', 0.e-9)\n#\n#### CORPSE for the full ms=-1 manifold, driven in the center \n#### (resonant with mI = 0)\n#CORPSE_frq = 6.5e6\n#cfg.set(branch+ 'msm1_CORPSE_pi_60_duration', 1./CORPSE_frq/6.)\n#cfg.set(branch+ 'msm1_CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)\n#cfg.set(branch+ 'msm1_CORPSE_pi_420_duration', 7./CORPSE_frq/6.)\n#cfg.set(branch+ 'msm1_CORPSE_pi_mod_frq', f_msm1_cntr - mw0)\n#cfg.set(branch+ 'msm1_CORPSE_pi_amp', 0.797641)\n#\n## cfg.set(branch+ 'msm1_CORPSE_pi2_24p3_duration', 24.3/CORPSE_frq/360.)\n## cfg.set(branch+ 'msm1_CORPSE_pi2_m318p6_duration', 318.6/CORPSE_frq/360.)\n## cfg.set(branch+ 'msm1_CORPSE_pi2_384p3_duration', 384.3/CORPSE_frq/360.)\n## cfg.set(branch+ 'msm1_CORPSE_pi2_mod_frq', f_msm1_cntr - mw0)\n## cfg.set(branch+ 'msm1_CORPSE_pi2_amp', 0.818) ###not calibrated\n#\n#cfg.set(branch+ 'first_C_revival', 50.90e-6)\n#\n#### Nitrogen pulses\n#cfg.set(branch+ 'N_pi_duration', 47.3e-6)\n#cfg.set(branch+ 'N_pi_amp', 1)\n#\n#cfg.set(branch+ 'N_pi2_duration', 47.3e-6/2.)\n#cfg.set(branch+ 'N_pi2_amp', 1)\n#\n#\n\n\n\n\n\"\"\"\n\nbranch='protocols/sil2-default/pulses/'\n\ntof = 0#11e-9\ncfg.set(branch+ 't_offset', tof)\n\ncfg.set(branch+ '4MHz_pi_duration', tof + 125e-9)\ncfg.set(branch+ '4MHz_pi_amp', 0.599)\ncfg.set(branch+ '4MHz_pi_mod_frq', finit)\n\ncfg.set(branch+ '4MHz_pi2_duration', tof + 62e-9)\ncfg.set(branch+ '4MHz_pi2_amp', 0.599)\ncfg.set(branch+ '4MHz_pi2_mod_frq', finit)\n\ncfg.set(branch+ 'selective_pi_duration', 2600e-9)\ncfg.set(branch+ 'selective_pi_amp', 0.02)\ncfg.set(branch+ 'selective_pi_mod_frq', finit)\n\nCORPSE_frq = 3.991e6\ncfg.set(branch+ 'CORPSE_pi_60_duration', tof/2. + 1./CORPSE_frq/6.)\ncfg.set(branch+ 'CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)\ncfg.set(branch+ 'CORPSE_pi_420_duration', tof/2. + 7./CORPSE_frq/6.)\ncfg.set(branch+ 'CORPSE_pi_mod_frq', finit + Nsplit/2.)\ncfg.set(branch+ 'CORPSE_pi_amp', 0.703)\ncfg.set(branch+ 'CORPSE_pi_phase_shift', 74.0)\ncfg.set(branch+ 'CORPSE_pi_center_shift', 13e-9)\n\ncfg.set(branch+ 'pi2pi_mIm1_duration', tof + 395e-9)\ncfg.set(branch+ 'pi2pi_mIm1_amp', 0.164)\ncfg.set(branch+ 'pi2pi_mIm1_mod_frq', finit)\n\ncfg.set(branch+ 'pi2pi_mI0_duration', tof + 395e-9)\ncfg.set(branch+ 'pi2pi_mI0_amp', 0.170)\ncfg.set(branch+ 'pi2pi_mI0_mod_frq', f0)\n\ncfg.set(branch+ 'pi2pi_mIp1_duration', tof + 395e-9)\ncfg.set(branch+ 'pi2pi_mIp1_amp', 0.185)\ncfg.set(branch+ 'pi2pi_mIp1_mod_frq', fmIp1)\n\n\n### set some other pulses that determinine their values from the ones above\ncfg.set(branch+ 'AWG_N_CNOT_pulse_duration', \n cfg.get(branch+ 'pi2pi_mIm1_duration'))\ncfg.set(branch+ 'AWG_N_CNOT_pulse_amp', \n cfg.get(branch+ 'pi2pi_mIm1_amp'))\ncfg.set(branch+ 'AWG_N_CNOT_pulse_mod_frq', \n cfg.get(branch+ 'pi2pi_mIm1_mod_frq'))\n\ncfg.set(branch+ 'AWG_MBI_MW_pulse_mod_frq', \n finit)\ncfg.set(branch+ 'AWG_MBI_MW_pulse_amp', \n cfg.get(branch+ 'selective_pi_amp'))\ncfg.set(branch+ 'AWG_MBI_MW_pulse_duration', \n cfg.get(branch+ 'selective_pi_duration'))\n\ncfg.set(branch+ 'AWG_shelving_pulse_duration',\n cfg.get(branch+ '4MHz_pi_duration'))\ncfg.set(branch+ 'AWG_shelving_pulse_amp', \n cfg.get(branch+ '4MHz_pi_amp'))\n\n### Nitrogen pulses\ncfg.set(branch+ 'N_pi_duration', 91.1e-6)\ncfg.set(branch+ 'N_pi_amp', 1)\n\ncfg.set(branch+ 'N_pi2_duration', 91.1e-6/2.)\ncfg.set(branch+ 'N_pi2_amp', 1)\n\n##############################################################################\n### The111/2 - SSRO\n##############################################################################\n\nbranch='protocols/sil2-default/AdwinSSRO/' \ncfg.set(branch+ 'A_CR_amplitude', 10e-9)\ncfg.set(branch+ 'A_RO_amplitude', 0.)\ncfg.set(branch+ 'A_SP_amplitude', 10e-9)\ncfg.set(branch+ 'CR_duration', 100)\ncfg.set(branch+ 'CR_preselect', 40)\ncfg.set(branch+ 'CR_probe', 40)\ncfg.set(branch+ 'CR_repump', 1000)\ncfg.set(branch+ 'Ex_CR_amplitude', 5e-9)\ncfg.set(branch+ 'Ex_RO_amplitude', 5e-9)\ncfg.set(branch+ 'Ex_SP_amplitude', 0.)\ncfg.set(branch+ 'SP_duration', 250)\ncfg.set(branch+ 'SP_filter_duration', 0)\ncfg.set(branch+ 'SSRO_duration', 50)\ncfg.set(branch+ 'SSRO_repetitions', 1000)\ncfg.set(branch+ 'SSRO_stop_after_first_photon', 0)\n\n### integrated SSRO\ncfg.set('protocols/sil2-default/AdwinSSRO-integrated/SSRO_duration', 15)\n\n##############################################################################\n### The111/2 - MBI\n##############################################################################\n\nbranch='protocols/sil2-default/AdwinSSRO+MBI/'\ncfg.set(branch+ 'mw_frq', mw0)\ncfg.set(branch+ 'mw_power', 20)\ncfg.set(branch+ 'Ex_MBI_amplitude', 5e-9)\ncfg.set(branch+ 'Ex_SP_amplitude', 10e-9)\ncfg.set(branch+ 'MBI_duration', 4)\ncfg.set(branch+ 'max_MBI_attempts', 1)\ncfg.set(branch+ 'MBI_threshold', 1)\ncfg.set(branch+ 'SP_E_duration', 100)\ncfg.set(branch+ 'repump_after_MBI_duration', 100)\ncfg.set(branch+ 'repump_after_MBI_amplitude', 25e-9)\ncfg.set(branch+ 'repump_after_E_RO_duration', 100)\ncfg.set(branch+ 'repump_after_E_RO_amplitude', 25e-9)\n\n# MBI pulse\ncfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 50e-9)\ncfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration', 15e-6)\n\n### sil2, BSM\ncfg.set('protocols/sil2-default/BSM/N_ref_frq', N_frq)\ncfg.set('protocols/sil2-default/BSM/e_ref_frq', finit)\n\n##############################################################################\n### HANS/1 --- Pulses\n##############################################################################\n\nbranch='protocols/hans-sil1-default/pulses/'\n\ncfg.set(branch+ 'selective_pi_duration', 2500e-9)\ncfg.set(branch+ 'selective_pi_amp', 0.015)\ncfg.set(branch+ 'selective_pi_mod_frq', finit)\ncfg.set(branch+ 'AWG_MBI_MW_pulse_mod_frq', \n finit)\ncfg.set(branch+ 'AWG_MBI_MW_pulse_ssbmod_frq',\n finit)\ncfg.set(branch+ 'AWG_MBI_MW_pulse_amp', \n cfg.get(branch+ 'selective_pi_amp'))\ncfg.set(branch+ 'AWG_MBI_MW_pulse_duration', \n cfg.get(branch+ 'selective_pi_duration'))\n\n##############################################################################\n### HANS/1 --- SSRO\n##############################################################################\n\nbranch='protocols/hans-sil1-default/AdwinSSRO/' \ncfg.set(branch+ 'A_CR_amplitude', 15e-9)\ncfg.set(branch+ 'A_RO_amplitude', 0.)\ncfg.set(branch+ 'A_SP_amplitude', 15e-9)\ncfg.set(branch+ 'CR_duration', 50)\ncfg.set(branch+ 'CR_preselect', 1000)\ncfg.set(branch+ 'CR_probe', 20)\ncfg.set(branch+ 'CR_repump', 1000)\ncfg.set(branch+ 'Ex_CR_amplitude', 5e-9)\ncfg.set(branch+ 'Ex_RO_amplitude', 5e-9)\ncfg.set(branch+ 'Ex_SP_amplitude', 0.)\ncfg.set(branch+ 'SP_duration', 250)\ncfg.set(branch+ 'SP_filter_duration', 0)\ncfg.set(branch+ 'SSRO_duration', 50)\ncfg.set(branch+ 'SSRO_repetitions', 5000)\ncfg.set(branch+ 'SSRO_stop_after_first_photon', 0)\n\ncfg.set('protocols/hans-sil1-default/AdwinSSRO-integrated/SSRO_duration', 15)\n\n##############################################################################\n### HANS/1 --- MBI\n##############################################################################\n\nbranch='protocols/hans-sil1-default/AdwinSSRO+MBI/'\ncfg.set(branch+ 'mw_frq', mw0)\ncfg.set(branch+ 'mw_power', 20)\ncfg.set(branch+ 'Ex_MBI_amplitude', 5e-9)\ncfg.set(branch+ 'Ex_SP_amplitude', 15e-9)\ncfg.set(branch+ 'MBI_duration', 4)\ncfg.set(branch+ 'max_MBI_attempts', 1)\ncfg.set(branch+ 'MBI_threshold', 1)\ncfg.set(branch+ 'SP_E_duration', 60)\ncfg.set(branch+ 'repump_after_MBI_duration', 15)\ncfg.set(branch+ 'repump_after_MBI_amplitude', 15e-9)\ncfg.set(branch+ 'repump_after_E_RO_duration', 15)\ncfg.set(branch+ 'repump_after_E_RO_amplitude', 15e-9)\n\n# MBI pulse\ncfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 50e-9)\ncfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration', 15e-6)\n\n##############################################################################\n### HANS/7\n##############################################################################\n\nbranch='protocols/hans-sil7-default/AdwinSSRO/' \ncfg.set(branch+ 'A_CR_amplitude', 10e-9)\ncfg.set(branch+ 'A_RO_amplitude', 0.)\ncfg.set(branch+ 'A_SP_amplitude', 10e-9)\ncfg.set(branch+ 'CR_duration', 100)\ncfg.set(branch+ 'CR_preselect', 15)\ncfg.set(branch+ 'CR_probe', 5)\ncfg.set(branch+ 'CR_repump', 1000)\ncfg.set(branch+ 'Ex_CR_amplitude', 5e-9)\ncfg.set(branch+ 'Ex_RO_amplitude', 5e-9)\ncfg.set(branch+ 'Ex_SP_amplitude', 0.)\ncfg.set(branch+ 'SP_duration', 250)\ncfg.set(branch+ 'SP_filter_duration', 0)\ncfg.set(branch+ 'SSRO_duration', 50)\ncfg.set(branch+ 'SSRO_repetitions', 5000)\ncfg.set(branch+ 'SSRO_stop_after_first_photon', 0)\n\ncfg.set('protocols/hans-sil7-default/AdwinSSRO-integrated/SSRO_duration', 50)\n\"\"\"\n##############################################################################\n### HANS/4 --- SSRO\n##############################################################################\n\nbranch='protocols/hans-sil4-default/AdwinSSRO/' \ncfg.set(branch+ 'A_CR_amplitude', 5e-9)\ncfg.set(branch+ 'A_RO_amplitude', 0.)\ncfg.set(branch+ 'A_SP_amplitude', 60e-9)\ncfg.set(branch+ 'CR_duration', 50)\ncfg.set(branch+ 'CR_preselect', 1000)\ncfg.set(branch+ 'CR_probe', 20)\ncfg.set(branch+ 'CR_repump', 1000)\ncfg.set(branch+ 'Ex_CR_amplitude', 5e-9)\ncfg.set(branch+ 'Ex_RO_amplitude', 10e-9)\ncfg.set(branch+ 'Ex_SP_amplitude', 0.)\ncfg.set(branch+ 'SP_duration', 9)\ncfg.set(branch+ 'SP_filter_duration', 0)\ncfg.set(branch+ 'SSRO_duration', 10)\ncfg.set(branch+ 'SSRO_repetitions', 5000)\ncfg.set(branch+ 'SSRO_stop_after_first_photon', 0)\ncfg.set('protocols/hans-sil4-default/AdwinSSRO-integrated/SSRO_duration', 10)\n\ncfg.set('protocols/hans-sil4-default/AdwinSSRO+espin/mw_frq', mw0)\ncfg.set('protocols/hans-sil4-default/AdwinSSRO+espin/mw_power', 20)\n\n##############################################################################\n### HANS/4 --- MBI\n##############################################################################\n\nbranch='protocols/hans-sil4-default/AdwinSSRO+MBI/'\ncfg.set(branch+ 'mw_frq', mw0)\ncfg.set(branch+ 'mw_power', 20)\ncfg.set(branch+ 'Ex_MBI_amplitude', 5e-9)\ncfg.set(branch+ 'Ex_SP_amplitude', 10e-9)\ncfg.set(branch+ 'MBI_duration', 4)\ncfg.set(branch+ 'max_MBI_attempts', 1)\ncfg.set(branch+ 'MBI_threshold', 1)\ncfg.set(branch+ 'SP_E_duration', 100)\ncfg.set(branch+ 'repump_after_MBI_duration', 15)\ncfg.set(branch+ 'repump_after_MBI_A_amplitude', [5e-9])\ncfg.set(branch+ 'repump_after_MBI_E_amplitude', [0e-9])\ncfg.set(branch+ 'repump_after_E_RO_duration', 15)\ncfg.set(branch+ 'repump_after_E_RO_amplitude', 5e-9)\n\n# MBI pulse\ncfg.set(branch+ 'AWG_wait_duration_before_MBI_MW_pulse', 50e-9)\ncfg.set(branch+ 'AWG_wait_for_adwin_MBI_duration', 15e-6)\n\n### BSM\ncfg.set('protocols/hans-sil4-default/BSM/N_ref_frq', N_frq)\ncfg.set('protocols/hans-sil4-default/BSM/e_ref_frq', finit)\ncfg.set('protocols/hans-sil4-default/BSM/pi2_evolution_time', 51.086e-6)\ncfg.set('protocols/hans-sil4-default/BSM/H_evolution_time', 50.746e-6)\ncfg.set('protocols/hans-sil4-default/BSM/H_phase', 46)\n\n##############################################################################\n### HANS/4 --- Pulses\n##############################################################################\n\nbranch='protocols/hans-sil4-default/pulses/'\n\ncfg.set(branch+ 'selective_pi_duration', 2500e-9)\ncfg.set(branch+ 'selective_pi_amp', 0.011)\ncfg.set(branch+ 'selective_pi_mod_frq', finit)\ncfg.set(branch+ 'AWG_MBI_MW_pulse_mod_frq', \n finit)\ncfg.set(branch+ 'AWG_MBI_MW_pulse_ssbmod_frq',\n finit)\ncfg.set(branch+ 'AWG_MBI_MW_pulse_amp', \n cfg.get(branch+ 'selective_pi_amp'))\ncfg.set(branch+ 'AWG_MBI_MW_pulse_duration', \n cfg.get(branch+ 'selective_pi_duration'))\n\ncfg.set(branch+ 'fast_pi_duration', 62e-9)\ncfg.set(branch+ 'fast_pi_amp', 0.844)\ncfg.set(branch+ 'fast_pi_mod_frq', finit)\n\ncfg.set(branch+ 'fast_pi2_duration', 33e-9)\ncfg.set(branch+ 'fast_pi2_amp', 0.812)\ncfg.set(branch+ 'fast_pi2_mod_frq', finit)\n\n### CNOTs\ncfg.set(branch+ 'pi2pi_mIm1_duration', 396e-9)\ncfg.set(branch+ 'pi2pi_mIm1_amp', 0.083)\ncfg.set(branch+ 'pi2pi_mIm1_mod_frq', finit)\n\n### CORPSE used in the BSM\nCORPSE_frq = 7.5e6\ncfg.set(branch+ 'CORPSE_pi_60_duration', 1./CORPSE_frq/6.)\ncfg.set(branch+ 'CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)\ncfg.set(branch+ 'CORPSE_pi_420_duration', 7./CORPSE_frq/6.)\ncfg.set(branch+ 'CORPSE_pi_mod_frq', finit + Nsplit/2.)\ncfg.set(branch+ 'CORPSE_pi_amp', 0.363)\ncfg.set(branch+ 'CORPSE_pi2_24p3_duration', 24.3/CORPSE_frq/360.)\ncfg.set(branch+ 'CORPSE_pi2_m318p6_duration', 318.6/CORPSE_frq/360.)\ncfg.set(branch+ 'CORPSE_pi2_384p3_duration', 384.3/CORPSE_frq/360.)\ncfg.set(branch+ 'CORPSE_pi2_mod_frq', f_msm1_cntr - mw0)\ncfg.set(branch+ 'CORPSE_pi2_amp', 0.55) ###not calibrated\n\"\"\"\n# ### TODO\ncfg.set(branch+ 'CORPSE_pi_phase_shift', 104.0)\n\n# ### TODO\ncfg.set(branch+ 'CORPSE_pi_center_shift', 0.e-9)\n\n### CORPSE for the full ms=-1 manifold, driven in the center \n### (resonant with mI = 0)\nCORPSE_frq = 8e6\ncfg.set(branch+ 'msm1_CORPSE_pi_60_duration', 1./CORPSE_frq/6.)\ncfg.set(branch+ 'msm1_CORPSE_pi_m300_duration', 5./CORPSE_frq/6.)\ncfg.set(branch+ 'msm1_CORPSE_pi_420_duration', 7./CORPSE_frq/6.)\ncfg.set(branch+ 'msm1_CORPSE_pi_mod_frq', f_msm1_cntr - mw0)\ncfg.set(branch+ 'msm1_CORPSE_pi_amp', 0.782)\n\ncfg.set(branch+ 'msm1_CORPSE_pi2_24p3_duration', 24.3/CORPSE_frq/360.)\ncfg.set(branch+ 'msm1_CORPSE_pi2_m318p6_duration', 318.6/CORPSE_frq/360.)\ncfg.set(branch+ 'msm1_CORPSE_pi2_384p3_duration', 384.3/CORPSE_frq/360.)\ncfg.set(branch+ 'msm1_CORPSE_pi2_mod_frq', f_msm1_cntr - mw0)\ncfg.set(branch+ 'msm1_CORPSE_pi2_amp', 0.818) ###not calibrated\n\ncfg.set(branch+ 'first_C_revival', 50.90e-6)\n\n### Nitrogen pulses\ncfg.set(branch+ 'N_pi_duration', 47.3e-6)\ncfg.set(branch+ 'N_pi_amp', 1)\n\ncfg.set(branch+ 'N_pi2_duration', 47.3e-6/2.)\ncfg.set(branch+ 'N_pi2_amp', 1)\n\n\"\"\"\n\n##############################################################################\n##############################################################################\n##############################################################################\n### DONE\n##############################################################################\n##############################################################################\n##############################################################################\n\ncfg.save_all()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from project import db
from project.models import User, Recipe, Association, Ingre, Recipe_ingre
user=User.query.filter_by(username="xiaofan").first()
recipe=Recipe.query.filter_by(recipename="Jerry").first()
recipes = Recipe.query.filter(Recipe.users.any(username="xiaofan")).all()
if recipe not in recipes:
user.add_recipes([recipe])
# commit the changes
db.session.commit()
|
normal
|
{
"blob_id": "07f8fd305e2311c0e37a785da0a826b8ea4e78ba",
"index": 4154,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif recipe not in recipes:\n user.add_recipes([recipe])\n db.session.commit()\n",
"step-3": "<mask token>\nuser = User.query.filter_by(username='xiaofan').first()\nrecipe = Recipe.query.filter_by(recipename='Jerry').first()\nrecipes = Recipe.query.filter(Recipe.users.any(username='xiaofan')).all()\nif recipe not in recipes:\n user.add_recipes([recipe])\n db.session.commit()\n",
"step-4": "from project import db\nfrom project.models import User, Recipe, Association, Ingre, Recipe_ingre\nuser = User.query.filter_by(username='xiaofan').first()\nrecipe = Recipe.query.filter_by(recipename='Jerry').first()\nrecipes = Recipe.query.filter(Recipe.users.any(username='xiaofan')).all()\nif recipe not in recipes:\n user.add_recipes([recipe])\n db.session.commit()\n",
"step-5": "from project import db\nfrom project.models import User, Recipe, Association, Ingre, Recipe_ingre\n\n\n\n\nuser=User.query.filter_by(username=\"xiaofan\").first()\nrecipe=Recipe.query.filter_by(recipename=\"Jerry\").first()\nrecipes = Recipe.query.filter(Recipe.users.any(username=\"xiaofan\")).all()\n\nif recipe not in recipes:\n user.add_recipes([recipe])\n\n # commit the changes\n db.session.commit()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import weakref
from soma.controller import Controller
from soma.functiontools import SomaPartial
from traits.api import File, Undefined, Instance
class MatlabConfig(Controller):
executable = File(Undefined, output=False,
desc='Full path of the matlab executable')
def load_module(capsul_engine, module_name):
capsul_engine.add_trait('matlab', Instance(MatlabConfig))
capsul_engine.matlab = MatlabConfig()
capsul_engine.matlab.on_trait_change(SomaPartial(update_execution_context,
weakref.proxy(capsul_engine)))
def init_module(capul_engine, module_name, loaded_module):
pass
def update_execution_context(capsul_engine):
if capsul_engine.matlab.executable is not Undefined:
capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'] \
= capsul_engine.matlab.executable
|
normal
|
{
"blob_id": "4a8e8994ec8734664a5965b81da9d146d8504f8d",
"index": 6096,
"step-1": "<mask token>\n\n\nclass MatlabConfig(Controller):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MatlabConfig(Controller):\n executable = File(Undefined, output=False, desc=\n 'Full path of the matlab executable')\n\n\ndef load_module(capsul_engine, module_name):\n capsul_engine.add_trait('matlab', Instance(MatlabConfig))\n capsul_engine.matlab = MatlabConfig()\n capsul_engine.matlab.on_trait_change(SomaPartial(\n update_execution_context, weakref.proxy(capsul_engine)))\n\n\n<mask token>\n\n\ndef update_execution_context(capsul_engine):\n if capsul_engine.matlab.executable is not Undefined:\n capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'\n ] = capsul_engine.matlab.executable\n",
"step-3": "<mask token>\n\n\nclass MatlabConfig(Controller):\n executable = File(Undefined, output=False, desc=\n 'Full path of the matlab executable')\n\n\ndef load_module(capsul_engine, module_name):\n capsul_engine.add_trait('matlab', Instance(MatlabConfig))\n capsul_engine.matlab = MatlabConfig()\n capsul_engine.matlab.on_trait_change(SomaPartial(\n update_execution_context, weakref.proxy(capsul_engine)))\n\n\ndef init_module(capul_engine, module_name, loaded_module):\n pass\n\n\ndef update_execution_context(capsul_engine):\n if capsul_engine.matlab.executable is not Undefined:\n capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'\n ] = capsul_engine.matlab.executable\n",
"step-4": "import weakref\nfrom soma.controller import Controller\nfrom soma.functiontools import SomaPartial\nfrom traits.api import File, Undefined, Instance\n\n\nclass MatlabConfig(Controller):\n executable = File(Undefined, output=False, desc=\n 'Full path of the matlab executable')\n\n\ndef load_module(capsul_engine, module_name):\n capsul_engine.add_trait('matlab', Instance(MatlabConfig))\n capsul_engine.matlab = MatlabConfig()\n capsul_engine.matlab.on_trait_change(SomaPartial(\n update_execution_context, weakref.proxy(capsul_engine)))\n\n\ndef init_module(capul_engine, module_name, loaded_module):\n pass\n\n\ndef update_execution_context(capsul_engine):\n if capsul_engine.matlab.executable is not Undefined:\n capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'\n ] = capsul_engine.matlab.executable\n",
"step-5": "import weakref\n\nfrom soma.controller import Controller\nfrom soma.functiontools import SomaPartial\nfrom traits.api import File, Undefined, Instance\n\nclass MatlabConfig(Controller):\n executable = File(Undefined, output=False,\n desc='Full path of the matlab executable')\n \ndef load_module(capsul_engine, module_name):\n capsul_engine.add_trait('matlab', Instance(MatlabConfig))\n capsul_engine.matlab = MatlabConfig()\n capsul_engine.matlab.on_trait_change(SomaPartial(update_execution_context, \n weakref.proxy(capsul_engine)))\n\ndef init_module(capul_engine, module_name, loaded_module):\n pass\n\n\ndef update_execution_context(capsul_engine):\n if capsul_engine.matlab.executable is not Undefined:\n capsul_engine.execution_context.environ['MATLAB_EXECUTABLE'] \\\n = capsul_engine.matlab.executable\n\n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
#!/bin/python
import numpy as np
import os
from sklearn.svm.classes import SVC
import pickle
import sys
# Apply the SVM model to the testing videos; Output the score for each video
if __name__ == '__main__':
if len(sys.argv) != 5:
print("Usage: {0} model_file feat_dir feat_dim output_file".format(sys.argv[0]))
print("model_file -- path of the trained svm file")
print("feat_dir -- dir of feature files")
print("file_list_path -- path of list file (val.lst or test.lst)")
print("output_file -- path to save the prediction score")
exit(1)
model_file = sys.argv[1]
feat_dir = sys.argv[2]
file_list_path = sys.argv[3]
output_file = sys.argv[4]
file_list = []
with open(file_list_path) as f:
for line in f.readlines():
L = line.replace('\n', ' ').split()
file_list.append(L[0])
smodel = pickle.load(open(model_file,"rb"))
possible_results = ['NULL', 'P001','P002','P003']
pred = []
conf = []
print('SVM_MODEL: {}'.format(model_file))
for file in file_list:
bow_file = feat_dir + 'bow' + file + '.pkl'
if os.path.isfile(bow_file):
with open(bow_file,'rb') as f:
data = pickle.load(f)
pred.extend(smodel.predict([data]))
conf.extend(smodel.decision_function([data]))
else:
pred.extend(['NULL'])
conf.extend([[1, 0, 0, 0]])
print('NUM PREDICTION TO TEST: {}'.format(len(pred)))
with open(output_file,'w') as f:
for i in range(0, len(file_list)):
video = file_list[i]
f.write(str(video) + ' ' + pred[i] + '\n')
for i in range(1,4):
# tmp = np.asarray(pred)
# template = np.zeros(np.size(tmp))
# with open(possible_results[i] +'_val','w') as f:
# ind = np.where(tmp == possible_results[i])
# for j in range(0, len(ind)):
# template[ind[j]] = 1
# for j in range(0, len(template)):
# f.write(str(int(template[j])) +'\n')
print(output_file[0:-4]+'_'+possible_results[i] +'_val_label')
with open(output_file[0:-4]+'_'+possible_results[i] +'_val_label','w') as f:
for j in range(0, len(pred)):
video = file_list[j]
if j< len(pred)-1:
f.write(str(conf[j][i])+' # confidence for video ' + video + '\n')
else:
f.write(str(conf[j][i])+' # confidence for video ' + video + '\n')
|
normal
|
{
"blob_id": "385dccfab4d7c37d10d968658b51e231691a7b49",
"index": 1556,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n if len(sys.argv) != 5:\n print('Usage: {0} model_file feat_dir feat_dim output_file'.format(\n sys.argv[0]))\n print('model_file -- path of the trained svm file')\n print('feat_dir -- dir of feature files')\n print('file_list_path -- path of list file (val.lst or test.lst)')\n print('output_file -- path to save the prediction score')\n exit(1)\n model_file = sys.argv[1]\n feat_dir = sys.argv[2]\n file_list_path = sys.argv[3]\n output_file = sys.argv[4]\n file_list = []\n with open(file_list_path) as f:\n for line in f.readlines():\n L = line.replace('\\n', ' ').split()\n file_list.append(L[0])\n smodel = pickle.load(open(model_file, 'rb'))\n possible_results = ['NULL', 'P001', 'P002', 'P003']\n pred = []\n conf = []\n print('SVM_MODEL: {}'.format(model_file))\n for file in file_list:\n bow_file = feat_dir + 'bow' + file + '.pkl'\n if os.path.isfile(bow_file):\n with open(bow_file, 'rb') as f:\n data = pickle.load(f)\n pred.extend(smodel.predict([data]))\n conf.extend(smodel.decision_function([data]))\n else:\n pred.extend(['NULL'])\n conf.extend([[1, 0, 0, 0]])\n print('NUM PREDICTION TO TEST: {}'.format(len(pred)))\n with open(output_file, 'w') as f:\n for i in range(0, len(file_list)):\n video = file_list[i]\n f.write(str(video) + ' ' + pred[i] + '\\n')\n for i in range(1, 4):\n print(output_file[0:-4] + '_' + possible_results[i] + '_val_label')\n with open(output_file[0:-4] + '_' + possible_results[i] +\n '_val_label', 'w') as f:\n for j in range(0, len(pred)):\n video = file_list[j]\n if j < len(pred) - 1:\n f.write(str(conf[j][i]) + ' # confidence for video ' +\n video + '\\n')\n else:\n f.write(str(conf[j][i]) + ' # confidence for video ' +\n video + '\\n')\n",
"step-3": "import numpy as np\nimport os\nfrom sklearn.svm.classes import SVC\nimport pickle\nimport sys\nif __name__ == '__main__':\n if len(sys.argv) != 5:\n print('Usage: {0} model_file feat_dir feat_dim output_file'.format(\n sys.argv[0]))\n print('model_file -- path of the trained svm file')\n print('feat_dir -- dir of feature files')\n print('file_list_path -- path of list file (val.lst or test.lst)')\n print('output_file -- path to save the prediction score')\n exit(1)\n model_file = sys.argv[1]\n feat_dir = sys.argv[2]\n file_list_path = sys.argv[3]\n output_file = sys.argv[4]\n file_list = []\n with open(file_list_path) as f:\n for line in f.readlines():\n L = line.replace('\\n', ' ').split()\n file_list.append(L[0])\n smodel = pickle.load(open(model_file, 'rb'))\n possible_results = ['NULL', 'P001', 'P002', 'P003']\n pred = []\n conf = []\n print('SVM_MODEL: {}'.format(model_file))\n for file in file_list:\n bow_file = feat_dir + 'bow' + file + '.pkl'\n if os.path.isfile(bow_file):\n with open(bow_file, 'rb') as f:\n data = pickle.load(f)\n pred.extend(smodel.predict([data]))\n conf.extend(smodel.decision_function([data]))\n else:\n pred.extend(['NULL'])\n conf.extend([[1, 0, 0, 0]])\n print('NUM PREDICTION TO TEST: {}'.format(len(pred)))\n with open(output_file, 'w') as f:\n for i in range(0, len(file_list)):\n video = file_list[i]\n f.write(str(video) + ' ' + pred[i] + '\\n')\n for i in range(1, 4):\n print(output_file[0:-4] + '_' + possible_results[i] + '_val_label')\n with open(output_file[0:-4] + '_' + possible_results[i] +\n '_val_label', 'w') as f:\n for j in range(0, len(pred)):\n video = file_list[j]\n if j < len(pred) - 1:\n f.write(str(conf[j][i]) + ' # confidence for video ' +\n video + '\\n')\n else:\n f.write(str(conf[j][i]) + ' # confidence for video ' +\n video + '\\n')\n",
"step-4": "#!/bin/python \n\nimport numpy as np\nimport os\nfrom sklearn.svm.classes import SVC\nimport pickle\nimport sys\n\n# Apply the SVM model to the testing videos; Output the score for each video\n\nif __name__ == '__main__':\n if len(sys.argv) != 5:\n print(\"Usage: {0} model_file feat_dir feat_dim output_file\".format(sys.argv[0]))\n print(\"model_file -- path of the trained svm file\")\n print(\"feat_dir -- dir of feature files\")\n print(\"file_list_path -- path of list file (val.lst or test.lst)\")\n print(\"output_file -- path to save the prediction score\")\n exit(1)\n\n model_file = sys.argv[1]\n feat_dir = sys.argv[2]\n file_list_path = sys.argv[3]\n output_file = sys.argv[4]\n \n file_list = []\n with open(file_list_path) as f:\n for line in f.readlines():\n L = line.replace('\\n', ' ').split()\n file_list.append(L[0])\n \n smodel = pickle.load(open(model_file,\"rb\"))\n possible_results = ['NULL', 'P001','P002','P003'] \n \n pred = []\n conf = []\n print('SVM_MODEL: {}'.format(model_file))\n for file in file_list:\n bow_file = feat_dir + 'bow' + file + '.pkl'\n if os.path.isfile(bow_file):\n with open(bow_file,'rb') as f:\n data = pickle.load(f)\n pred.extend(smodel.predict([data]))\n conf.extend(smodel.decision_function([data]))\n else:\n pred.extend(['NULL'])\n conf.extend([[1, 0, 0, 0]])\n \n print('NUM PREDICTION TO TEST: {}'.format(len(pred)))\n\n \n with open(output_file,'w') as f:\n for i in range(0, len(file_list)):\n video = file_list[i]\n f.write(str(video) + ' ' + pred[i] + '\\n')\n \n for i in range(1,4):\n# tmp = np.asarray(pred)\n# template = np.zeros(np.size(tmp))\n# with open(possible_results[i] +'_val','w') as f:\n# ind = np.where(tmp == possible_results[i])\n# for j in range(0, len(ind)):\n# template[ind[j]] = 1\n# for j in range(0, len(template)):\n# f.write(str(int(template[j])) +'\\n')\n \n print(output_file[0:-4]+'_'+possible_results[i] +'_val_label')\n with open(output_file[0:-4]+'_'+possible_results[i] +'_val_label','w') as f:\n for j in range(0, len(pred)):\n video = file_list[j]\n if j< len(pred)-1:\n f.write(str(conf[j][i])+' # confidence for video ' + video + '\\n')\n else:\n f.write(str(conf[j][i])+' # confidence for video ' + video + '\\n')\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views.generic import TemplateView
from pos.service.sumup import API_URL, create_checkout
from pos.models.sumup import SumUpAPIKey, SumUpOnline
from pos.forms import RemotePayForm
from pos.models.user import User
class RemotePayView(TemplateView):
template_name = 'remotepay/pay.djhtml'
def pay(request):
if request.method == 'POST':
form = RemotePayForm(request.POST)
if form.is_valid():
phone = form.cleaned_data['phone']
amount = form.cleaned_data['amount']
# Check if user exists
try:
user = User.objects.get(phone=phone, is_crew=False)
except User.DoesNotExist:
return render(request, 'remotepay/pay.djhtml', {'form': form, 'error': True})
# Assuming the user exists, we proceed
t = SumUpOnline.objects.create(user=user, amount=amount)
try:
txid = create_checkout(SumUpAPIKey.objects.all().last(), t.id, t.amount, user.phone)
t.transaction_id = txid
t.status = 1
t.save()
return render(request, 'remotepay/process.djhtml', {'txid': txid, 'phone': phone, 'amount': amount})
except:
return render(request, 'remotepay/pay.djhtml', {'form': form, 'systemerror': True})
else:
form = RemotePayForm
return render(request, 'remotepay/pay.djhtml', {'form': form})
def pay_callback(request, checkoutid):
# Get the status of the transaction for the user
t = SumUpOnline.objects.get(transaction_id=checkoutid)
if (t.status == 0 or t.status == 3):
return HttpResponseRedirect('/pay/error/')
elif (t.status == 4):
return HttpResponseRedirect('/pay/success/')
elif (t.status == 1) or (t.status == 2):
return render(request, 'remotepay/hold.djhtml', {'checkoutid': checkoutid})
def pay_success(request):
return render(request, 'remotepay/success.djhtml')
def pay_error(request):
return render(request, 'remotepay/error.djhtml')
def pay_hold(request):
return render(request, 'remotepay/hold.djhtml')
|
normal
|
{
"blob_id": "731d2891bbc29879fd8900a11077c93550e4e88d",
"index": 4251,
"step-1": "<mask token>\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\n<mask token>\n\n\ndef pay_callback(request, checkoutid):\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n if t.status == 0 or t.status == 3:\n return HttpResponseRedirect('/pay/error/')\n elif t.status == 4:\n return HttpResponseRedirect('/pay/success/')\n elif t.status == 1 or t.status == 2:\n return render(request, 'remotepay/hold.djhtml', {'checkoutid':\n checkoutid})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\n<mask token>\n\n\ndef pay_callback(request, checkoutid):\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n if t.status == 0 or t.status == 3:\n return HttpResponseRedirect('/pay/error/')\n elif t.status == 4:\n return HttpResponseRedirect('/pay/success/')\n elif t.status == 1 or t.status == 2:\n return render(request, 'remotepay/hold.djhtml', {'checkoutid':\n checkoutid})\n\n\ndef pay_success(request):\n return render(request, 'remotepay/success.djhtml')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\ndef pay(request):\n if request.method == 'POST':\n form = RemotePayForm(request.POST)\n if form.is_valid():\n phone = form.cleaned_data['phone']\n amount = form.cleaned_data['amount']\n try:\n user = User.objects.get(phone=phone, is_crew=False)\n except User.DoesNotExist:\n return render(request, 'remotepay/pay.djhtml', {'form':\n form, 'error': True})\n t = SumUpOnline.objects.create(user=user, amount=amount)\n try:\n txid = create_checkout(SumUpAPIKey.objects.all().last(), t.\n id, t.amount, user.phone)\n t.transaction_id = txid\n t.status = 1\n t.save()\n return render(request, 'remotepay/process.djhtml', {'txid':\n txid, 'phone': phone, 'amount': amount})\n except:\n return render(request, 'remotepay/pay.djhtml', {'form':\n form, 'systemerror': True})\n else:\n form = RemotePayForm\n return render(request, 'remotepay/pay.djhtml', {'form': form})\n\n\ndef pay_callback(request, checkoutid):\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n if t.status == 0 or t.status == 3:\n return HttpResponseRedirect('/pay/error/')\n elif t.status == 4:\n return HttpResponseRedirect('/pay/success/')\n elif t.status == 1 or t.status == 2:\n return render(request, 'remotepay/hold.djhtml', {'checkoutid':\n checkoutid})\n\n\ndef pay_success(request):\n return render(request, 'remotepay/success.djhtml')\n\n\ndef pay_error(request):\n return render(request, 'remotepay/error.djhtml')\n\n\ndef pay_hold(request):\n return render(request, 'remotepay/hold.djhtml')\n",
"step-4": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom pos.service.sumup import API_URL, create_checkout\nfrom pos.models.sumup import SumUpAPIKey, SumUpOnline\nfrom pos.forms import RemotePayForm\nfrom pos.models.user import User\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\ndef pay(request):\n if request.method == 'POST':\n form = RemotePayForm(request.POST)\n if form.is_valid():\n phone = form.cleaned_data['phone']\n amount = form.cleaned_data['amount']\n try:\n user = User.objects.get(phone=phone, is_crew=False)\n except User.DoesNotExist:\n return render(request, 'remotepay/pay.djhtml', {'form':\n form, 'error': True})\n t = SumUpOnline.objects.create(user=user, amount=amount)\n try:\n txid = create_checkout(SumUpAPIKey.objects.all().last(), t.\n id, t.amount, user.phone)\n t.transaction_id = txid\n t.status = 1\n t.save()\n return render(request, 'remotepay/process.djhtml', {'txid':\n txid, 'phone': phone, 'amount': amount})\n except:\n return render(request, 'remotepay/pay.djhtml', {'form':\n form, 'systemerror': True})\n else:\n form = RemotePayForm\n return render(request, 'remotepay/pay.djhtml', {'form': form})\n\n\ndef pay_callback(request, checkoutid):\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n if t.status == 0 or t.status == 3:\n return HttpResponseRedirect('/pay/error/')\n elif t.status == 4:\n return HttpResponseRedirect('/pay/success/')\n elif t.status == 1 or t.status == 2:\n return render(request, 'remotepay/hold.djhtml', {'checkoutid':\n checkoutid})\n\n\ndef pay_success(request):\n return render(request, 'remotepay/success.djhtml')\n\n\ndef pay_error(request):\n return render(request, 'remotepay/error.djhtml')\n\n\ndef pay_hold(request):\n return render(request, 'remotepay/hold.djhtml')\n",
"step-5": "from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom pos.service.sumup import API_URL, create_checkout\nfrom pos.models.sumup import SumUpAPIKey, SumUpOnline\n\nfrom pos.forms import RemotePayForm\nfrom pos.models.user import User\n\n\nclass RemotePayView(TemplateView):\n template_name = 'remotepay/pay.djhtml'\n\n\ndef pay(request):\n if request.method == 'POST':\n form = RemotePayForm(request.POST)\n\n if form.is_valid():\n phone = form.cleaned_data['phone']\n amount = form.cleaned_data['amount']\n # Check if user exists\n try:\n user = User.objects.get(phone=phone, is_crew=False)\n except User.DoesNotExist:\n return render(request, 'remotepay/pay.djhtml', {'form': form, 'error': True})\n\n # Assuming the user exists, we proceed\n t = SumUpOnline.objects.create(user=user, amount=amount)\n\n try:\n txid = create_checkout(SumUpAPIKey.objects.all().last(), t.id, t.amount, user.phone)\n t.transaction_id = txid\n t.status = 1\n t.save()\n return render(request, 'remotepay/process.djhtml', {'txid': txid, 'phone': phone, 'amount': amount})\n except:\n return render(request, 'remotepay/pay.djhtml', {'form': form, 'systemerror': True})\n\n else:\n form = RemotePayForm\n\n return render(request, 'remotepay/pay.djhtml', {'form': form})\n\ndef pay_callback(request, checkoutid):\n # Get the status of the transaction for the user\n t = SumUpOnline.objects.get(transaction_id=checkoutid)\n\n if (t.status == 0 or t.status == 3):\n return HttpResponseRedirect('/pay/error/')\n elif (t.status == 4):\n return HttpResponseRedirect('/pay/success/')\n elif (t.status == 1) or (t.status == 2):\n return render(request, 'remotepay/hold.djhtml', {'checkoutid': checkoutid})\n\n\ndef pay_success(request):\n return render(request, 'remotepay/success.djhtml')\n\n\ndef pay_error(request):\n return render(request, 'remotepay/error.djhtml')\n\n\ndef pay_hold(request):\n return render(request, 'remotepay/hold.djhtml')\n",
"step-ids": [
3,
4,
7,
8,
9
]
}
|
[
3,
4,
7,
8,
9
] |
class Circle():
def __init__(self, radius, color="white"):
self.radius = radius
self.color = color
c1 = Circle(10, "black")
print("半径:{}, 色: {}".format(c1.radius, c1.color))
|
normal
|
{
"blob_id": "6ce50552571594c7be77ac0bf3b5274f2f39e545",
"index": 5086,
"step-1": "class Circle:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Circle:\n\n def __init__(self, radius, color='white'):\n self.radius = radius\n self.color = color\n\n\n<mask token>\n",
"step-3": "class Circle:\n\n def __init__(self, radius, color='white'):\n self.radius = radius\n self.color = color\n\n\n<mask token>\nprint('半径:{}, 色: {}'.format(c1.radius, c1.color))\n",
"step-4": "class Circle:\n\n def __init__(self, radius, color='white'):\n self.radius = radius\n self.color = color\n\n\nc1 = Circle(10, 'black')\nprint('半径:{}, 色: {}'.format(c1.radius, c1.color))\n",
"step-5": "class Circle():\n def __init__(self, radius, color=\"white\"):\n self.radius = radius\n self.color = color\n \nc1 = Circle(10, \"black\")\nprint(\"半径:{}, 色: {}\".format(c1.radius, c1.color))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
Generates a temperature celsius to fahrenheit conversion table
AT
11-10-2018
"""
__author__ = "Aspen Thompson"
header = "| Celsius | Fahrenheit |"
line = "-" * len(header)
print("{0}\n{1}\n{0}".format(line, header))
for i in range(-10, 31):
print("| {:^7} | {:^10.10} |".format(i, i * 1.8 + 32))
|
normal
|
{
"blob_id": "591d0a166af5b8d0bed851c2f56ecc3da4f3a5eb",
"index": 4367,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('{0}\\n{1}\\n{0}'.format(line, header))\nfor i in range(-10, 31):\n print('| {:^7} | {:^10.10} |'.format(i, i * 1.8 + 32))\n",
"step-3": "<mask token>\n__author__ = 'Aspen Thompson'\nheader = '| Celsius | Fahrenheit |'\nline = '-' * len(header)\nprint('{0}\\n{1}\\n{0}'.format(line, header))\nfor i in range(-10, 31):\n print('| {:^7} | {:^10.10} |'.format(i, i * 1.8 + 32))\n",
"step-4": "\"\"\"\nGenerates a temperature celsius to fahrenheit conversion table\n\nAT\n11-10-2018\n\"\"\"\n\n__author__ = \"Aspen Thompson\"\n\nheader = \"| Celsius | Fahrenheit |\"\nline = \"-\" * len(header)\nprint(\"{0}\\n{1}\\n{0}\".format(line, header))\n\nfor i in range(-10, 31):\n print(\"| {:^7} | {:^10.10} |\".format(i, i * 1.8 + 32))\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.conf import settings
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from fish.labinterface.models import *
from registration import signals
from registration.forms import RegistrationForm
from registration.models import RegistrationProfile
from labinterface.models import StaffMember
class CustomRegistrationBackend(object):
def register(self, request, **kwargs):
username, email, password = kwargs['username'], kwargs['email'], kwargs['password1']
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
new_user = RegistrationProfile.objects.create_inactive_user(username, email, password, site)
signals.user_registered.send(sender=self.__class__, user=new_user, request=request)
new_profile = StaffMember.objects.get(user=new_user)
new_profile.first_name=kwargs['first_name']
new_profile.last_name=kwargs['last_name']
new_profile.position=kwargs['position']
new_profile.save()
return new_user
def activate(self, request, activation_key):
activated = RegistrationProfile.objects.activate_user(activation_key)
if activated:
signals.user_activated.send(sender=self.__class__,
user=activated,
request=request)
return activated
def registration_allowed(self, request):
"""
Indicate whether account registration is currently permitted,
based on the value of the setting ``REGISTRATION_OPEN``. This
is determined as follows:
* If ``REGISTRATION_OPEN`` is not specified in settings, or is
set to ``True``, registration is permitted.
* If ``REGISTRATION_OPEN`` is both specified and set to
``False``, registration is not permitted.
"""
return getattr(settings, 'REGISTRATION_OPEN', True)
def get_form_class(self, request):
"""
Return the default form class used for user registration.
"""
return RegistrationForm
def post_registration_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
user registration.
"""
return ('registration_complete', (), {})
def post_activation_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
account activation.
"""
newMember = StaffMember.objects.filter(user_id__exact=user.pk).get()
labGroup = LabGroup.objects.filter(pk=1).get()
newMember.lab_group = labGroup
newMember.save()
return ('registration_activation_complete', (), {})
|
normal
|
{
"blob_id": "201279c0cba2d52b6863204bfadb6291a0065f60",
"index": 3961,
"step-1": "<mask token>\n\n\nclass CustomRegistrationBackend(object):\n <mask token>\n\n def activate(self, request, activation_key):\n activated = RegistrationProfile.objects.activate_user(activation_key)\n if activated:\n signals.user_activated.send(sender=self.__class__, user=\n activated, request=request)\n return activated\n\n def registration_allowed(self, request):\n \"\"\"\n\t\tIndicate whether account registration is currently permitted,\n\t\tbased on the value of the setting ``REGISTRATION_OPEN``. This\n\t\tis determined as follows:\n\n\t\t* If ``REGISTRATION_OPEN`` is not specified in settings, or is\n\t\tset to ``True``, registration is permitted.\n\n\t\t* If ``REGISTRATION_OPEN`` is both specified and set to\n\t\t``False``, registration is not permitted.\n\t\t\n\t\t\"\"\"\n return getattr(settings, 'REGISTRATION_OPEN', True)\n\n def get_form_class(self, request):\n \"\"\"\n\t\tReturn the default form class used for user registration.\n\t\t\n\t\t\"\"\"\n return RegistrationForm\n\n def post_registration_redirect(self, request, user):\n \"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\tuser registration.\n\t\t\n\t\t\"\"\"\n return 'registration_complete', (), {}\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CustomRegistrationBackend(object):\n\n def register(self, request, **kwargs):\n username, email, password = kwargs['username'], kwargs['email'\n ], kwargs['password1']\n if Site._meta.installed:\n site = Site.objects.get_current()\n else:\n site = RequestSite(request)\n new_user = RegistrationProfile.objects.create_inactive_user(username,\n email, password, site)\n signals.user_registered.send(sender=self.__class__, user=new_user,\n request=request)\n new_profile = StaffMember.objects.get(user=new_user)\n new_profile.first_name = kwargs['first_name']\n new_profile.last_name = kwargs['last_name']\n new_profile.position = kwargs['position']\n new_profile.save()\n return new_user\n\n def activate(self, request, activation_key):\n activated = RegistrationProfile.objects.activate_user(activation_key)\n if activated:\n signals.user_activated.send(sender=self.__class__, user=\n activated, request=request)\n return activated\n\n def registration_allowed(self, request):\n \"\"\"\n\t\tIndicate whether account registration is currently permitted,\n\t\tbased on the value of the setting ``REGISTRATION_OPEN``. This\n\t\tis determined as follows:\n\n\t\t* If ``REGISTRATION_OPEN`` is not specified in settings, or is\n\t\tset to ``True``, registration is permitted.\n\n\t\t* If ``REGISTRATION_OPEN`` is both specified and set to\n\t\t``False``, registration is not permitted.\n\t\t\n\t\t\"\"\"\n return getattr(settings, 'REGISTRATION_OPEN', True)\n\n def get_form_class(self, request):\n \"\"\"\n\t\tReturn the default form class used for user registration.\n\t\t\n\t\t\"\"\"\n return RegistrationForm\n\n def post_registration_redirect(self, request, user):\n \"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\tuser registration.\n\t\t\n\t\t\"\"\"\n return 'registration_complete', (), {}\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CustomRegistrationBackend(object):\n\n def register(self, request, **kwargs):\n username, email, password = kwargs['username'], kwargs['email'\n ], kwargs['password1']\n if Site._meta.installed:\n site = Site.objects.get_current()\n else:\n site = RequestSite(request)\n new_user = RegistrationProfile.objects.create_inactive_user(username,\n email, password, site)\n signals.user_registered.send(sender=self.__class__, user=new_user,\n request=request)\n new_profile = StaffMember.objects.get(user=new_user)\n new_profile.first_name = kwargs['first_name']\n new_profile.last_name = kwargs['last_name']\n new_profile.position = kwargs['position']\n new_profile.save()\n return new_user\n\n def activate(self, request, activation_key):\n activated = RegistrationProfile.objects.activate_user(activation_key)\n if activated:\n signals.user_activated.send(sender=self.__class__, user=\n activated, request=request)\n return activated\n\n def registration_allowed(self, request):\n \"\"\"\n\t\tIndicate whether account registration is currently permitted,\n\t\tbased on the value of the setting ``REGISTRATION_OPEN``. This\n\t\tis determined as follows:\n\n\t\t* If ``REGISTRATION_OPEN`` is not specified in settings, or is\n\t\tset to ``True``, registration is permitted.\n\n\t\t* If ``REGISTRATION_OPEN`` is both specified and set to\n\t\t``False``, registration is not permitted.\n\t\t\n\t\t\"\"\"\n return getattr(settings, 'REGISTRATION_OPEN', True)\n\n def get_form_class(self, request):\n \"\"\"\n\t\tReturn the default form class used for user registration.\n\t\t\n\t\t\"\"\"\n return RegistrationForm\n\n def post_registration_redirect(self, request, user):\n \"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\tuser registration.\n\t\t\n\t\t\"\"\"\n return 'registration_complete', (), {}\n\n def post_activation_redirect(self, request, user):\n \"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\taccount activation.\n\t\t\n\t\t\"\"\"\n newMember = StaffMember.objects.filter(user_id__exact=user.pk).get()\n labGroup = LabGroup.objects.filter(pk=1).get()\n newMember.lab_group = labGroup\n newMember.save()\n return 'registration_activation_complete', (), {}\n",
"step-4": "from django.conf import settings\nfrom django.contrib.sites.models import RequestSite\nfrom django.contrib.sites.models import Site\nfrom fish.labinterface.models import *\nfrom registration import signals\nfrom registration.forms import RegistrationForm\nfrom registration.models import RegistrationProfile\nfrom labinterface.models import StaffMember\n\n\nclass CustomRegistrationBackend(object):\n\n def register(self, request, **kwargs):\n username, email, password = kwargs['username'], kwargs['email'\n ], kwargs['password1']\n if Site._meta.installed:\n site = Site.objects.get_current()\n else:\n site = RequestSite(request)\n new_user = RegistrationProfile.objects.create_inactive_user(username,\n email, password, site)\n signals.user_registered.send(sender=self.__class__, user=new_user,\n request=request)\n new_profile = StaffMember.objects.get(user=new_user)\n new_profile.first_name = kwargs['first_name']\n new_profile.last_name = kwargs['last_name']\n new_profile.position = kwargs['position']\n new_profile.save()\n return new_user\n\n def activate(self, request, activation_key):\n activated = RegistrationProfile.objects.activate_user(activation_key)\n if activated:\n signals.user_activated.send(sender=self.__class__, user=\n activated, request=request)\n return activated\n\n def registration_allowed(self, request):\n \"\"\"\n\t\tIndicate whether account registration is currently permitted,\n\t\tbased on the value of the setting ``REGISTRATION_OPEN``. This\n\t\tis determined as follows:\n\n\t\t* If ``REGISTRATION_OPEN`` is not specified in settings, or is\n\t\tset to ``True``, registration is permitted.\n\n\t\t* If ``REGISTRATION_OPEN`` is both specified and set to\n\t\t``False``, registration is not permitted.\n\t\t\n\t\t\"\"\"\n return getattr(settings, 'REGISTRATION_OPEN', True)\n\n def get_form_class(self, request):\n \"\"\"\n\t\tReturn the default form class used for user registration.\n\t\t\n\t\t\"\"\"\n return RegistrationForm\n\n def post_registration_redirect(self, request, user):\n \"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\tuser registration.\n\t\t\n\t\t\"\"\"\n return 'registration_complete', (), {}\n\n def post_activation_redirect(self, request, user):\n \"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\taccount activation.\n\t\t\n\t\t\"\"\"\n newMember = StaffMember.objects.filter(user_id__exact=user.pk).get()\n labGroup = LabGroup.objects.filter(pk=1).get()\n newMember.lab_group = labGroup\n newMember.save()\n return 'registration_activation_complete', (), {}\n",
"step-5": "from django.conf import settings\nfrom django.contrib.sites.models import RequestSite\nfrom django.contrib.sites.models import Site\n\nfrom fish.labinterface.models import *\n\nfrom registration import signals\nfrom registration.forms import RegistrationForm\nfrom registration.models import RegistrationProfile\nfrom labinterface.models import StaffMember\n\n\nclass CustomRegistrationBackend(object):\n\tdef register(self, request, **kwargs):\n\t\tusername, email, password = kwargs['username'], kwargs['email'], kwargs['password1']\n\t\tif Site._meta.installed:\n\t\t\tsite = Site.objects.get_current()\n\t\telse:\n\t\t\tsite = RequestSite(request)\n\t\tnew_user = RegistrationProfile.objects.create_inactive_user(username, email, password, site)\n\t\tsignals.user_registered.send(sender=self.__class__, user=new_user, request=request)\n\t\tnew_profile = StaffMember.objects.get(user=new_user)\n\t\tnew_profile.first_name=kwargs['first_name']\n\t\tnew_profile.last_name=kwargs['last_name']\n\t\tnew_profile.position=kwargs['position']\n\t\tnew_profile.save()\n\t\treturn new_user\n\tdef activate(self, request, activation_key):\n\t\tactivated = RegistrationProfile.objects.activate_user(activation_key)\n\t\tif activated:\n\t\t\tsignals.user_activated.send(sender=self.__class__,\n\t\t\t\t\t\t\t\t\t\tuser=activated,\n\t\t\t\t\t\t\t\t\t\trequest=request)\n\t\treturn activated\n\n\tdef registration_allowed(self, request):\n\t\t\"\"\"\n\t\tIndicate whether account registration is currently permitted,\n\t\tbased on the value of the setting ``REGISTRATION_OPEN``. This\n\t\tis determined as follows:\n\n\t\t* If ``REGISTRATION_OPEN`` is not specified in settings, or is\n\t\tset to ``True``, registration is permitted.\n\n\t\t* If ``REGISTRATION_OPEN`` is both specified and set to\n\t\t``False``, registration is not permitted.\n\t\t\n\t\t\"\"\"\n\t\treturn getattr(settings, 'REGISTRATION_OPEN', True)\n\n\tdef get_form_class(self, request):\n\t\t\"\"\"\n\t\tReturn the default form class used for user registration.\n\t\t\n\t\t\"\"\"\n\t\treturn RegistrationForm\n\n\tdef post_registration_redirect(self, request, user):\n\t\t\"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\tuser registration.\n\t\t\n\t\t\"\"\"\n\t\treturn ('registration_complete', (), {})\n\n\tdef post_activation_redirect(self, request, user):\n\t\t\"\"\"\n\t\tReturn the name of the URL to redirect to after successful\n\t\taccount activation.\n\t\t\n\t\t\"\"\"\n\t\tnewMember = StaffMember.objects.filter(user_id__exact=user.pk).get()\n\t\tlabGroup = LabGroup.objects.filter(pk=1).get()\n\t\tnewMember.lab_group = labGroup\n\t\tnewMember.save()\n\t\treturn ('registration_activation_complete', (), {})",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# inserting logical unit ids for splitting texts into logical chunks
import re
import os
splitter = "#META#Header#End#"
def logical_units(file):
ar_ra = re.compile("^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$")
with open(file, "r", encoding="utf8") as f1:
book = f1.read()
# splitter test
if splitter in book:
# logical units
log_ids = re.findall("\n#\d+#", book)
if len(log_ids) > 0:
print("\tthe text already have %d logical units of this length" % len(log_ids))
pass
else:
# insert logical unit ids
new_data = []
head = book.split(splitter)[0]
text = book.split(splitter)[1]
token_count = 0
data = re.findall(r"\w+|\W+", text)
word_len = len(str(len(data)))
data_len = len(data)
for i in range(0, data_len):
if "\n#" in data[i]:
if "Page" in data[i + 1]:# or ar_token_cnt(ar_ra, data[i + 1]) <= 0:
new_data.append(data[i])
else:
last = data[i].rfind("#")
token_cnt_str = str(token_count + 1)
if len(token_cnt_str) < word_len:
tmp_cnt = token_cnt_str.zfill(word_len)
else:
tmp_cnt = token_cnt_str
tmp = data[i][:last] + "#" + tmp_cnt + data[i][last:]
new_data.append(tmp)
elif ar_token_cnt(ar_ra, data[i]):
token_count += 1
new_data.append(data[i])
else:
new_data.append(data[i])
log_text = "".join(new_data)
log_text = head + splitter + log_text
with open(file + "_logical", "w", encoding="utf8") as f:
f.write(log_text)
else:
print("The file is missing the splitter!")
print(file)
def ar_token_cnt(ar_ra, text):
return sum(ar_ra.search(t) is not None for t in re.findall(r"\w+|\W+", text))
# process all texts in OpenITI
def process_all(folder):
exclude = (["OpenITI.github.io", "Annotation", "_maintenance", "i.mech"])
for root, dirs, files in os.walk(folder):
# print("root: ",root)
dirs[:] = [d for d in dirs if d not in exclude]
# print("dir: ",dirs)
for file in files:
if re.search("^\d{4}\w+\.\w+\.\w+-ara\d$", file):
logical_units(os.path.join(root, file))
# return
# input()
# /media/rostam/Seagate Backup Plus Drive
# process_all("/home/rostam/projs/KITAB/test")
# print("Done!")
|
normal
|
{
"blob_id": "5c001303962315afe2512eb307376f6f7a883cf9",
"index": 6831,
"step-1": "<mask token>\n\n\ndef process_all(folder):\n exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']\n for root, dirs, files in os.walk(folder):\n dirs[:] = [d for d in dirs if d not in exclude]\n for file in files:\n if re.search('^\\\\d{4}\\\\w+\\\\.\\\\w+\\\\.\\\\w+-ara\\\\d$', file):\n logical_units(os.path.join(root, file))\n",
"step-2": "<mask token>\n\n\ndef logical_units(file):\n ar_ra = re.compile(\n '^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$'\n )\n with open(file, 'r', encoding='utf8') as f1:\n book = f1.read()\n if splitter in book:\n log_ids = re.findall('\\n#\\\\d+#', book)\n if len(log_ids) > 0:\n print(\n '\\tthe text already have %d logical units of this length' %\n len(log_ids))\n pass\n else:\n new_data = []\n head = book.split(splitter)[0]\n text = book.split(splitter)[1]\n token_count = 0\n data = re.findall('\\\\w+|\\\\W+', text)\n word_len = len(str(len(data)))\n data_len = len(data)\n for i in range(0, data_len):\n if '\\n#' in data[i]:\n if 'Page' in data[i + 1]:\n new_data.append(data[i])\n else:\n last = data[i].rfind('#')\n token_cnt_str = str(token_count + 1)\n if len(token_cnt_str) < word_len:\n tmp_cnt = token_cnt_str.zfill(word_len)\n else:\n tmp_cnt = token_cnt_str\n tmp = data[i][:last] + '#' + tmp_cnt + data[i][last\n :]\n new_data.append(tmp)\n elif ar_token_cnt(ar_ra, data[i]):\n token_count += 1\n new_data.append(data[i])\n else:\n new_data.append(data[i])\n log_text = ''.join(new_data)\n log_text = head + splitter + log_text\n with open(file + '_logical', 'w', encoding='utf8') as f:\n f.write(log_text)\n else:\n print('The file is missing the splitter!')\n print(file)\n\n\ndef ar_token_cnt(ar_ra, text):\n return sum(ar_ra.search(t) is not None for t in re.findall('\\\\w+|\\\\W+',\n text))\n\n\ndef process_all(folder):\n exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']\n for root, dirs, files in os.walk(folder):\n dirs[:] = [d for d in dirs if d not in exclude]\n for file in files:\n if re.search('^\\\\d{4}\\\\w+\\\\.\\\\w+\\\\.\\\\w+-ara\\\\d$', file):\n logical_units(os.path.join(root, file))\n",
"step-3": "<mask token>\nsplitter = '#META#Header#End#'\n\n\ndef logical_units(file):\n ar_ra = re.compile(\n '^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$'\n )\n with open(file, 'r', encoding='utf8') as f1:\n book = f1.read()\n if splitter in book:\n log_ids = re.findall('\\n#\\\\d+#', book)\n if len(log_ids) > 0:\n print(\n '\\tthe text already have %d logical units of this length' %\n len(log_ids))\n pass\n else:\n new_data = []\n head = book.split(splitter)[0]\n text = book.split(splitter)[1]\n token_count = 0\n data = re.findall('\\\\w+|\\\\W+', text)\n word_len = len(str(len(data)))\n data_len = len(data)\n for i in range(0, data_len):\n if '\\n#' in data[i]:\n if 'Page' in data[i + 1]:\n new_data.append(data[i])\n else:\n last = data[i].rfind('#')\n token_cnt_str = str(token_count + 1)\n if len(token_cnt_str) < word_len:\n tmp_cnt = token_cnt_str.zfill(word_len)\n else:\n tmp_cnt = token_cnt_str\n tmp = data[i][:last] + '#' + tmp_cnt + data[i][last\n :]\n new_data.append(tmp)\n elif ar_token_cnt(ar_ra, data[i]):\n token_count += 1\n new_data.append(data[i])\n else:\n new_data.append(data[i])\n log_text = ''.join(new_data)\n log_text = head + splitter + log_text\n with open(file + '_logical', 'w', encoding='utf8') as f:\n f.write(log_text)\n else:\n print('The file is missing the splitter!')\n print(file)\n\n\ndef ar_token_cnt(ar_ra, text):\n return sum(ar_ra.search(t) is not None for t in re.findall('\\\\w+|\\\\W+',\n text))\n\n\ndef process_all(folder):\n exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']\n for root, dirs, files in os.walk(folder):\n dirs[:] = [d for d in dirs if d not in exclude]\n for file in files:\n if re.search('^\\\\d{4}\\\\w+\\\\.\\\\w+\\\\.\\\\w+-ara\\\\d$', file):\n logical_units(os.path.join(root, file))\n",
"step-4": "import re\nimport os\nsplitter = '#META#Header#End#'\n\n\ndef logical_units(file):\n ar_ra = re.compile(\n '^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$'\n )\n with open(file, 'r', encoding='utf8') as f1:\n book = f1.read()\n if splitter in book:\n log_ids = re.findall('\\n#\\\\d+#', book)\n if len(log_ids) > 0:\n print(\n '\\tthe text already have %d logical units of this length' %\n len(log_ids))\n pass\n else:\n new_data = []\n head = book.split(splitter)[0]\n text = book.split(splitter)[1]\n token_count = 0\n data = re.findall('\\\\w+|\\\\W+', text)\n word_len = len(str(len(data)))\n data_len = len(data)\n for i in range(0, data_len):\n if '\\n#' in data[i]:\n if 'Page' in data[i + 1]:\n new_data.append(data[i])\n else:\n last = data[i].rfind('#')\n token_cnt_str = str(token_count + 1)\n if len(token_cnt_str) < word_len:\n tmp_cnt = token_cnt_str.zfill(word_len)\n else:\n tmp_cnt = token_cnt_str\n tmp = data[i][:last] + '#' + tmp_cnt + data[i][last\n :]\n new_data.append(tmp)\n elif ar_token_cnt(ar_ra, data[i]):\n token_count += 1\n new_data.append(data[i])\n else:\n new_data.append(data[i])\n log_text = ''.join(new_data)\n log_text = head + splitter + log_text\n with open(file + '_logical', 'w', encoding='utf8') as f:\n f.write(log_text)\n else:\n print('The file is missing the splitter!')\n print(file)\n\n\ndef ar_token_cnt(ar_ra, text):\n return sum(ar_ra.search(t) is not None for t in re.findall('\\\\w+|\\\\W+',\n text))\n\n\ndef process_all(folder):\n exclude = ['OpenITI.github.io', 'Annotation', '_maintenance', 'i.mech']\n for root, dirs, files in os.walk(folder):\n dirs[:] = [d for d in dirs if d not in exclude]\n for file in files:\n if re.search('^\\\\d{4}\\\\w+\\\\.\\\\w+\\\\.\\\\w+-ara\\\\d$', file):\n logical_units(os.path.join(root, file))\n",
"step-5": "# inserting logical unit ids for splitting texts into logical chunks\n\nimport re\nimport os\n\nsplitter = \"#META#Header#End#\"\n\n\ndef logical_units(file):\n ar_ra = re.compile(\"^[ذ١٢٣٤٥٦٧٨٩٠ّـضصثقفغعهخحجدًٌَُلإإشسيبلاتنمكطٍِلأأـئءؤرلاىةوزظْلآآ]+$\")\n\n with open(file, \"r\", encoding=\"utf8\") as f1:\n book = f1.read()\n\n # splitter test\n if splitter in book:\n # logical units\n log_ids = re.findall(\"\\n#\\d+#\", book)\n if len(log_ids) > 0:\n print(\"\\tthe text already have %d logical units of this length\" % len(log_ids))\n pass\n else:\n # insert logical unit ids\n new_data = []\n head = book.split(splitter)[0]\n text = book.split(splitter)[1]\n token_count = 0\n\n data = re.findall(r\"\\w+|\\W+\", text)\n word_len = len(str(len(data)))\n data_len = len(data)\n\n for i in range(0, data_len):\n if \"\\n#\" in data[i]:\n if \"Page\" in data[i + 1]:# or ar_token_cnt(ar_ra, data[i + 1]) <= 0:\n new_data.append(data[i])\n else:\n last = data[i].rfind(\"#\")\n token_cnt_str = str(token_count + 1)\n if len(token_cnt_str) < word_len:\n tmp_cnt = token_cnt_str.zfill(word_len)\n else:\n tmp_cnt = token_cnt_str\n tmp = data[i][:last] + \"#\" + tmp_cnt + data[i][last:]\n new_data.append(tmp)\n\n elif ar_token_cnt(ar_ra, data[i]):\n token_count += 1\n new_data.append(data[i])\n else:\n new_data.append(data[i])\n\n log_text = \"\".join(new_data)\n log_text = head + splitter + log_text\n\n with open(file + \"_logical\", \"w\", encoding=\"utf8\") as f:\n f.write(log_text)\n\n else:\n print(\"The file is missing the splitter!\")\n print(file)\n\n\ndef ar_token_cnt(ar_ra, text):\n return sum(ar_ra.search(t) is not None for t in re.findall(r\"\\w+|\\W+\", text))\n\n\n# process all texts in OpenITI\n\n\ndef process_all(folder):\n exclude = ([\"OpenITI.github.io\", \"Annotation\", \"_maintenance\", \"i.mech\"])\n for root, dirs, files in os.walk(folder):\n # print(\"root: \",root)\n dirs[:] = [d for d in dirs if d not in exclude]\n # print(\"dir: \",dirs)\n for file in files:\n if re.search(\"^\\d{4}\\w+\\.\\w+\\.\\w+-ara\\d$\", file):\n logical_units(os.path.join(root, file))\n # return\n # input()\n\n\n# /media/rostam/Seagate Backup Plus Drive\n# process_all(\"/home/rostam/projs/KITAB/test\")\n\n# print(\"Done!\")\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
def solve(bt):
if len(bt) == n:
print(*bt, sep="")
exit()
for i in [1, 2, 3]:
if is_good(bt + [i]):
solve(bt + [i])
def is_good(arr):
for i in range(1, len(arr)//2+1):
if arr[-i:] == arr[-(i*2):-i]:
return False
return True
if __name__ == "__main__":
n = int(input())
solve([1])
|
normal
|
{
"blob_id": "65d5cee6899b0b75474e3898459bf2cfa8b3635b",
"index": 1042,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_good(arr):\n for i in range(1, len(arr) // 2 + 1):\n if arr[-i:] == arr[-(i * 2):-i]:\n return False\n return True\n\n\n<mask token>\n",
"step-3": "def solve(bt):\n if len(bt) == n:\n print(*bt, sep='')\n exit()\n for i in [1, 2, 3]:\n if is_good(bt + [i]):\n solve(bt + [i])\n\n\ndef is_good(arr):\n for i in range(1, len(arr) // 2 + 1):\n if arr[-i:] == arr[-(i * 2):-i]:\n return False\n return True\n\n\n<mask token>\n",
"step-4": "def solve(bt):\n if len(bt) == n:\n print(*bt, sep='')\n exit()\n for i in [1, 2, 3]:\n if is_good(bt + [i]):\n solve(bt + [i])\n\n\ndef is_good(arr):\n for i in range(1, len(arr) // 2 + 1):\n if arr[-i:] == arr[-(i * 2):-i]:\n return False\n return True\n\n\nif __name__ == '__main__':\n n = int(input())\n solve([1])\n",
"step-5": "def solve(bt):\n if len(bt) == n:\n print(*bt, sep=\"\")\n exit()\n\n for i in [1, 2, 3]:\n if is_good(bt + [i]):\n solve(bt + [i])\n\n\ndef is_good(arr):\n for i in range(1, len(arr)//2+1):\n if arr[-i:] == arr[-(i*2):-i]:\n return False\n return True\n\nif __name__ == \"__main__\":\n n = int(input())\n\n solve([1])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright (C) 2019-Antti Kärki.
# Author: Antti Kärki.
#
# You can modify it under the terms of the GNU AFFERO
# GENERAL PUBLIC LICENSE (AGPL v3), Version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE (AGPL v3) for more details.
#
# You should have received a copy of the GNU AFFERO GENERAL PUBLIC LICENSE
# (AGPL v3) along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
from odoo import api, fields, models
from odoo import exceptions
import logging
_logger = logging.getLogger(__name__)
class rocker_connection():
@api.multi
def create_connection(self):
_database_record = self
_datasource = _database_record.name
_driver = _database_record.driver
_odbcdriver = _database_record.odbcdriver
_sid = _database_record.database
_database = _database_record.database
_host = _database_record.host
_port = _database_record.port
_user = _database_record.user
_password = _database_record.password
con = None
_logger.info('Connecting to database: ' + _database)
try:
if _driver == 'postgresql':
try:
import psycopg2
except:
raise exceptions.ValidationError('No Postgres drivers')
con = psycopg2.connect(host=_host, port=_port, database=_database, user=_user, password=_password)
elif _driver == "mysql":
try:
import mysql.connector
except:
raise exceptions.ValidationError('No MySQL drivers')
con = mysql.connector.connect(host=_host, port=_port, database=_database, user=_user,
password=_password)
elif _driver == "mariadb":
try:
import mysql.connector
except:
raise exceptions.ValidationError('No MariaDB drivers')
con = mysql.connector.connect(host=_host, port=_port, database=_database, user=_user,
password=_password)
elif _driver == "oracle":
try:
import cx_Oracle
except:
raise exceptions.ValidationError('No Oracle drivers')
con = cx_Oracle.connect(_user + '/' + _password + '@//' + _host + ':' + _port + '/' + _sid)
elif _driver == "sqlserver":
try:
import pyodbc
except:
raise exceptions.ValidationError('No SQLServer (ODBC) drivers')
_logger.debug(
'DRIVER={' + _odbcdriver + '};SERVER=' + _host + ';DATABASE=' + _database + ';UID=' + _user + ';PWD=' + _password)
con = pyodbc.connect(
'DRIVER={' + _odbcdriver + '};SERVER=' + _host + ';DATABASE=' + _database + ';UID=' + _user + ';PWD=' + _password)
self._sqldriver = 'sqlserver'
elif _driver == "odbc":
try:
import pyodbc
except:
raise exceptions.ValidationError('No ODBC drivers')
_logger.debug(
'DRIVER={' + _odbcdriver + '};SERVER=' + _host + ';DATABASE=' + _database + ';UID=' + _user + ';PWD=' + _password)
con = pyodbc.connect(
'DRIVER={' + _odbcdriver + '};SERVER=' + _host + ';DATABASE=' + _database + ';UID=' + _user + ';PWD=' + _password)
self._sqldriver = 'odbc'
else:
raise exceptions.ValidationError('Driver not supported')
except:
raise exceptions.ValidationError('Database connection failed')
return con
|
normal
|
{
"blob_id": "96131e3d6c67c0ee4ff7f69d4ffedcbf96470f14",
"index": 7069,
"step-1": "<mask token>\n\n\nclass rocker_connection:\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass rocker_connection:\n\n @api.multi\n def create_connection(self):\n _database_record = self\n _datasource = _database_record.name\n _driver = _database_record.driver\n _odbcdriver = _database_record.odbcdriver\n _sid = _database_record.database\n _database = _database_record.database\n _host = _database_record.host\n _port = _database_record.port\n _user = _database_record.user\n _password = _database_record.password\n con = None\n _logger.info('Connecting to database: ' + _database)\n try:\n if _driver == 'postgresql':\n try:\n import psycopg2\n except:\n raise exceptions.ValidationError('No Postgres drivers')\n con = psycopg2.connect(host=_host, port=_port, database=\n _database, user=_user, password=_password)\n elif _driver == 'mysql':\n try:\n import mysql.connector\n except:\n raise exceptions.ValidationError('No MySQL drivers')\n con = mysql.connector.connect(host=_host, port=_port,\n database=_database, user=_user, password=_password)\n elif _driver == 'mariadb':\n try:\n import mysql.connector\n except:\n raise exceptions.ValidationError('No MariaDB drivers')\n con = mysql.connector.connect(host=_host, port=_port,\n database=_database, user=_user, password=_password)\n elif _driver == 'oracle':\n try:\n import cx_Oracle\n except:\n raise exceptions.ValidationError('No Oracle drivers')\n con = cx_Oracle.connect(_user + '/' + _password + '@//' +\n _host + ':' + _port + '/' + _sid)\n elif _driver == 'sqlserver':\n try:\n import pyodbc\n except:\n raise exceptions.ValidationError(\n 'No SQLServer (ODBC) drivers')\n _logger.debug('DRIVER={' + _odbcdriver + '};SERVER=' +\n _host + ';DATABASE=' + _database + ';UID=' + _user +\n ';PWD=' + _password)\n con = pyodbc.connect('DRIVER={' + _odbcdriver + '};SERVER=' +\n _host + ';DATABASE=' + _database + ';UID=' + _user +\n ';PWD=' + _password)\n self._sqldriver = 'sqlserver'\n elif _driver == 'odbc':\n try:\n import pyodbc\n except:\n raise exceptions.ValidationError('No ODBC drivers')\n _logger.debug('DRIVER={' + _odbcdriver + '};SERVER=' +\n _host + ';DATABASE=' + _database + ';UID=' + _user +\n ';PWD=' + _password)\n con = pyodbc.connect('DRIVER={' + _odbcdriver + '};SERVER=' +\n _host + ';DATABASE=' + _database + ';UID=' + _user +\n ';PWD=' + _password)\n self._sqldriver = 'odbc'\n else:\n raise exceptions.ValidationError('Driver not supported')\n except:\n raise exceptions.ValidationError('Database connection failed')\n return con\n",
"step-3": "<mask token>\n_logger = logging.getLogger(__name__)\n\n\nclass rocker_connection:\n\n @api.multi\n def create_connection(self):\n _database_record = self\n _datasource = _database_record.name\n _driver = _database_record.driver\n _odbcdriver = _database_record.odbcdriver\n _sid = _database_record.database\n _database = _database_record.database\n _host = _database_record.host\n _port = _database_record.port\n _user = _database_record.user\n _password = _database_record.password\n con = None\n _logger.info('Connecting to database: ' + _database)\n try:\n if _driver == 'postgresql':\n try:\n import psycopg2\n except:\n raise exceptions.ValidationError('No Postgres drivers')\n con = psycopg2.connect(host=_host, port=_port, database=\n _database, user=_user, password=_password)\n elif _driver == 'mysql':\n try:\n import mysql.connector\n except:\n raise exceptions.ValidationError('No MySQL drivers')\n con = mysql.connector.connect(host=_host, port=_port,\n database=_database, user=_user, password=_password)\n elif _driver == 'mariadb':\n try:\n import mysql.connector\n except:\n raise exceptions.ValidationError('No MariaDB drivers')\n con = mysql.connector.connect(host=_host, port=_port,\n database=_database, user=_user, password=_password)\n elif _driver == 'oracle':\n try:\n import cx_Oracle\n except:\n raise exceptions.ValidationError('No Oracle drivers')\n con = cx_Oracle.connect(_user + '/' + _password + '@//' +\n _host + ':' + _port + '/' + _sid)\n elif _driver == 'sqlserver':\n try:\n import pyodbc\n except:\n raise exceptions.ValidationError(\n 'No SQLServer (ODBC) drivers')\n _logger.debug('DRIVER={' + _odbcdriver + '};SERVER=' +\n _host + ';DATABASE=' + _database + ';UID=' + _user +\n ';PWD=' + _password)\n con = pyodbc.connect('DRIVER={' + _odbcdriver + '};SERVER=' +\n _host + ';DATABASE=' + _database + ';UID=' + _user +\n ';PWD=' + _password)\n self._sqldriver = 'sqlserver'\n elif _driver == 'odbc':\n try:\n import pyodbc\n except:\n raise exceptions.ValidationError('No ODBC drivers')\n _logger.debug('DRIVER={' + _odbcdriver + '};SERVER=' +\n _host + ';DATABASE=' + _database + ';UID=' + _user +\n ';PWD=' + _password)\n con = pyodbc.connect('DRIVER={' + _odbcdriver + '};SERVER=' +\n _host + ';DATABASE=' + _database + ';UID=' + _user +\n ';PWD=' + _password)\n self._sqldriver = 'odbc'\n else:\n raise exceptions.ValidationError('Driver not supported')\n except:\n raise exceptions.ValidationError('Database connection failed')\n return con\n",
"step-4": "from odoo import api, fields, models\nfrom odoo import exceptions\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nclass rocker_connection:\n\n @api.multi\n def create_connection(self):\n _database_record = self\n _datasource = _database_record.name\n _driver = _database_record.driver\n _odbcdriver = _database_record.odbcdriver\n _sid = _database_record.database\n _database = _database_record.database\n _host = _database_record.host\n _port = _database_record.port\n _user = _database_record.user\n _password = _database_record.password\n con = None\n _logger.info('Connecting to database: ' + _database)\n try:\n if _driver == 'postgresql':\n try:\n import psycopg2\n except:\n raise exceptions.ValidationError('No Postgres drivers')\n con = psycopg2.connect(host=_host, port=_port, database=\n _database, user=_user, password=_password)\n elif _driver == 'mysql':\n try:\n import mysql.connector\n except:\n raise exceptions.ValidationError('No MySQL drivers')\n con = mysql.connector.connect(host=_host, port=_port,\n database=_database, user=_user, password=_password)\n elif _driver == 'mariadb':\n try:\n import mysql.connector\n except:\n raise exceptions.ValidationError('No MariaDB drivers')\n con = mysql.connector.connect(host=_host, port=_port,\n database=_database, user=_user, password=_password)\n elif _driver == 'oracle':\n try:\n import cx_Oracle\n except:\n raise exceptions.ValidationError('No Oracle drivers')\n con = cx_Oracle.connect(_user + '/' + _password + '@//' +\n _host + ':' + _port + '/' + _sid)\n elif _driver == 'sqlserver':\n try:\n import pyodbc\n except:\n raise exceptions.ValidationError(\n 'No SQLServer (ODBC) drivers')\n _logger.debug('DRIVER={' + _odbcdriver + '};SERVER=' +\n _host + ';DATABASE=' + _database + ';UID=' + _user +\n ';PWD=' + _password)\n con = pyodbc.connect('DRIVER={' + _odbcdriver + '};SERVER=' +\n _host + ';DATABASE=' + _database + ';UID=' + _user +\n ';PWD=' + _password)\n self._sqldriver = 'sqlserver'\n elif _driver == 'odbc':\n try:\n import pyodbc\n except:\n raise exceptions.ValidationError('No ODBC drivers')\n _logger.debug('DRIVER={' + _odbcdriver + '};SERVER=' +\n _host + ';DATABASE=' + _database + ';UID=' + _user +\n ';PWD=' + _password)\n con = pyodbc.connect('DRIVER={' + _odbcdriver + '};SERVER=' +\n _host + ';DATABASE=' + _database + ';UID=' + _user +\n ';PWD=' + _password)\n self._sqldriver = 'odbc'\n else:\n raise exceptions.ValidationError('Driver not supported')\n except:\n raise exceptions.ValidationError('Database connection failed')\n return con\n",
"step-5": "# -*- coding: utf-8 -*-\r\n#############################################################################\r\n#\r\n# Copyright (C) 2019-Antti Kärki.\r\n# Author: Antti Kärki.\r\n#\r\n# You can modify it under the terms of the GNU AFFERO\r\n# GENERAL PUBLIC LICENSE (AGPL v3), Version 3.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU AFFERO GENERAL PUBLIC LICENSE (AGPL v3) for more details.\r\n#\r\n# You should have received a copy of the GNU AFFERO GENERAL PUBLIC LICENSE\r\n# (AGPL v3) along with this program.\r\n# If not, see <http://www.gnu.org/licenses/>.\r\n#\r\n#############################################################################\r\n\r\n\r\nfrom odoo import api, fields, models\r\nfrom odoo import exceptions\r\nimport logging\r\n\r\n_logger = logging.getLogger(__name__)\r\n\r\n\r\nclass rocker_connection():\r\n\r\n @api.multi\r\n def create_connection(self):\r\n\r\n _database_record = self\r\n _datasource = _database_record.name\r\n _driver = _database_record.driver\r\n _odbcdriver = _database_record.odbcdriver\r\n _sid = _database_record.database\r\n _database = _database_record.database\r\n _host = _database_record.host\r\n _port = _database_record.port\r\n _user = _database_record.user\r\n _password = _database_record.password\r\n\r\n con = None\r\n _logger.info('Connecting to database: ' + _database)\r\n\r\n try:\r\n if _driver == 'postgresql':\r\n try:\r\n import psycopg2\r\n except:\r\n raise exceptions.ValidationError('No Postgres drivers')\r\n con = psycopg2.connect(host=_host, port=_port, database=_database, user=_user, password=_password)\r\n elif _driver == \"mysql\":\r\n try:\r\n import mysql.connector\r\n except:\r\n raise exceptions.ValidationError('No MySQL drivers')\r\n con = mysql.connector.connect(host=_host, port=_port, database=_database, user=_user,\r\n password=_password)\r\n elif _driver == \"mariadb\":\r\n try:\r\n import mysql.connector\r\n except:\r\n raise exceptions.ValidationError('No MariaDB drivers')\r\n con = mysql.connector.connect(host=_host, port=_port, database=_database, user=_user,\r\n password=_password)\r\n elif _driver == \"oracle\":\r\n try:\r\n import cx_Oracle\r\n except:\r\n raise exceptions.ValidationError('No Oracle drivers')\r\n con = cx_Oracle.connect(_user + '/' + _password + '@//' + _host + ':' + _port + '/' + _sid)\r\n elif _driver == \"sqlserver\":\r\n try:\r\n import pyodbc\r\n except:\r\n raise exceptions.ValidationError('No SQLServer (ODBC) drivers')\r\n _logger.debug(\r\n 'DRIVER={' + _odbcdriver + '};SERVER=' + _host + ';DATABASE=' + _database + ';UID=' + _user + ';PWD=' + _password)\r\n con = pyodbc.connect(\r\n 'DRIVER={' + _odbcdriver + '};SERVER=' + _host + ';DATABASE=' + _database + ';UID=' + _user + ';PWD=' + _password)\r\n self._sqldriver = 'sqlserver'\r\n elif _driver == \"odbc\":\r\n try:\r\n import pyodbc\r\n except:\r\n raise exceptions.ValidationError('No ODBC drivers')\r\n _logger.debug(\r\n 'DRIVER={' + _odbcdriver + '};SERVER=' + _host + ';DATABASE=' + _database + ';UID=' + _user + ';PWD=' + _password)\r\n con = pyodbc.connect(\r\n 'DRIVER={' + _odbcdriver + '};SERVER=' + _host + ';DATABASE=' + _database + ';UID=' + _user + ';PWD=' + _password)\r\n self._sqldriver = 'odbc'\r\n else:\r\n raise exceptions.ValidationError('Driver not supported')\r\n except:\r\n raise exceptions.ValidationError('Database connection failed')\r\n return con\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def missing_value_count_and_percent(df):
"""
Return the number and percent of missing values for each column.
Args:
df (Dataframe): A dataframe with many columns
Return:
df (Dataframe): A dataframe with one column showing number of missing values, one column showing percentage of missing values with 4 digits
"""
df = pd.concat({'num_missing_values':df.isnull().sum(), 'pct_missing_values':df.isnull().mean().round(4)}, axis=1)
)
return df
|
normal
|
{
"blob_id": "88c304f224ab60062582abbfa1146a651e1233e6",
"index": 183,
"step-1": "def missing_value_count_and_percent(df):\n \"\"\"\n Return the number and percent of missing values for each column. \n\n Args:\n df (Dataframe): A dataframe with many columns\n \n Return:\n df (Dataframe): A dataframe with one column showing number of missing values, one column showing percentage of missing values with 4 digits\n \n \"\"\"\n df = pd.concat({'num_missing_values':df.isnull().sum(), 'pct_missing_values':df.isnull().mean().round(4)}, axis=1)\n)\n return df",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
"""
@author: chris
Modified from THOMAS MCTAVISH (2010-11-04).
mpiexec -f ~/machinefile -enable-x -n 96 python Population.py --noplot
"""
from __future__ import with_statement
from __future__ import division
import sys
sys.path.append('../NET/sheff/weasel/')
sys.path.append('../NET/sheffprk/template/')
import os
#use_pc = True
import sys
argv = sys.argv
if "-python" in argv:
use_pc = True
else:
use_pc = False
if use_pc == True:
from neuron import h
pc = h.ParallelContext()
rank = int(pc.id())
nhost = pc.nhost()
else:
from mpi4py import MPI
from neuron import h
rank = MPI.COMM_WORLD.rank
#print sys.version
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-o', action='store', dest='opt')
parser.add_argument('--noplot', action='store_true')
parser.add_argument('--norun', action='store_true')
parser.add_argument('--noconst', action='store_true')
parser.add_argument('--noqual', action='store_true')
pars, unknown = parser.parse_known_args(['-o','--noplot','--norun','--noconst','--noqual'])
if __name__ == '__main__':
import matplotlib
if rank == 0:
matplotlib.use('Tkagg', warn=True)
else:
matplotlib.use('Agg', warn=True)
if __name__ == '__main__':
do_plot = 1
if results.noplot: # do not plot to windows
matplotlib.use('Agg', warn=True)
if rank == 0: print "- No plotting"
do_plot = 0
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import random as rnd
import neuronpy.util.spiketrain
#set_printoptions(threshold='nan')
from Stimulation import *
from Stimhelp import *
from units import *
from cells.PassiveCell import *
from itertools import izip
try:
import cPickle as pickle
except:
import pickle
import gzip
import h5py
from templates.synapse.synapse import Synapse
from synapsepfpurk import Synapse as Synapse2
if use_pc is False: import mdp
import time as ttime
from scipy.optimize import fmin, leastsq
from NeuroTools import stgen, signals
import md5
#from guppy import hpy
#hpy = hpy()
class Population:
"""
A population of N cells
"""
def __init__(self, cellimport = [], celltype = None, N = [10], temperature = 6.3, cell_exe = 0, ihold = [0*nA], ihold_sigma = [0*nA], amp = [0*nA], amod = [None], anoise = [None], give_freq = False, do_run = 1, pickle_prefix = "default", istart = 0, istop = 0.07, di = 0.001, dt = 0.025*ms, use_mpi = True, use_pc = False):
"""
:param N: Number of cells.
:param fluct_m:
:param fluct_s:
:param fluct_tau:
"""
self.use_pc = use_pc
if type(celltype) is not list: celltype = [celltype] #convert to list if it is not given as one
self.celltype = celltype
if type(cell_exe) is not list: cell_exe = [cell_exe] #convert to list if it is not given as one
self.cell_exe = cell_exe
if cellimport is not None:
if cellimport == []:
for n in range(len(celltype)):
cellimport.append("from cells." + self.celltype[n] + " import *")
self.cellimport = cellimport
if type(N) is not list: N = [N]
self.N = N # Total number of cells in the net
self.n_celltypes = len(self.N)
self.a_celltype = [0] # celltype to analyse
self.factor_celltype = [1]*self.n_celltypes
self.set_init(ihold, ihold_sigma, amp, amod)
self.CF_var = False
self.inh_hold_sigma = [0]
self.intr_hold_sigma = [0]
#self.sigma_inh_hold = 0
#self.sigma_ihold = 0
if type(anoise) is not list: anoise = [anoise]*self.n_celltypes
if len(anoise) < self.n_celltypes: anoise = [anoise[0]]*self.n_celltypes
self.anoise = anoise # RUN self.set_i()
self.give_freq = give_freq # RUN self.set_i()
self.temperature = temperature
self.gid_count = 0
self.gidlist = [] # List of global identifiers on this host
self.global_gidlist = [] # List of global identifiers
self.cells = [] # Cells on this host
self.t_vec = []
self.id_vec = []
self.rec_v = []
for n in range(self.n_celltypes):
if use_mpi:
self.t_vec.append(h.Vector()) # np.array([0])
self.id_vec.append(h.Vector()) # np.array([-1], dtype=int)
else:
self.t_vec.append([])
self.rec_v.append(h.Vector())
#self.t_vec = h.Vector(np.array([0])) # Spike time of all cells on this host
#self.id_vec = h.Vector(np.array([-1])) # Ids of spike times on this host
self.flucts = [] # Fluctuating inputs on this host
self.fluct_m = 0 # [nA]
self.fluct_s = [0] # [nA]
self.fluct_tau = 0*ms # [ms]
self.noises = [] # Random number generators on this host
self.plays = [] # Play inputs on this host
self.rec_is = []
self.trains = []
self.vecstim = []
self.nc_vecstim = []
self.spike_vec = []
self.syn_tau1 = 5*ms # Synapse of virtual target neuron
self.syn_tau2 = 5*ms # Synapse of virtual target neuron
self.tmax = 10*sec # maximum length of plot that should be plotted!!
self.nc_delay = 0 #500*ms # only important if syn_output is used, not used currently
self.dt = dt
self.bin_width = dt
self.jitter = 0*ms
self.delta_t = 0*ms
self.istart = istart
self.istop = istop
self.di = di
self.ic_holds = []
self.i_holdrs = []
self.i_holds = []
self.ic_starts = []
self.vc_starts = []
self.ic_steps = []
self.rec_step = []
self.tvecs = []
self.ivecs = []
self.noises = []
self.record_syn = []
self.id_all_vec_input = []
self.t_all_vec_input = []
if len(self.N) == len(self.cell_exe) == len(self.celltype):
pass
else:
raise ValueError('N, cell_exe, celltype do NOT have equal length!')
self.use_mpi = use_mpi
self.use_pc = use_pc
if self.use_mpi:
#### Make a new ParallelContext object
self.pc = h.ParallelContext()
self.id = self.pc.id()
self.nhost = int(self.pc.nhost())
if self.use_pc == False:
s = "mpi4py thinks I am %d of %d on %s, NEURON thinks I am %d of %d\n"
processorname = MPI.Get_processor_name()
self.comm = MPI.COMM_WORLD
if self.id == 0:
print s % (self.comm.rank, self.comm.size, processorname, self.id, self.nhost)
else:
s = "NEURON thinks I am %d of %d\n"
if self.id == 0:
print s % (self.id, self.nhost)
self.barrier()
else:
self.id = 0
self.nhost = 1
self.do_run = do_run
self.first_run = True
self.set_numcells() # Build the portion of cells on this host.
self.pickle_prefix = pickle_prefix
# plot options
self.ymax = 0
self.ax = None
self.linewidth = 1.5
self.color_vec = None
self.alpha = 0.8
self.method_interpol = np.array(['bin','syn'])
self.dumpsave = 1
self.called_syn_out_all = False
self.no_fmean=False
self.tau1_ex=[0*ms]*self.n_celltypes
self.tau2_ex=[10*ms]*self.n_celltypes
self.tau1_inh=[0*ms]*self.n_celltypes
self.tau2_inh=[100*ms]*self.n_celltypes
self.n_syn_ex = [0]*self.n_celltypes
self.g_syn_ex = [1]*self.n_celltypes
self.g_syn_ex_s = [0]*self.n_celltypes
self.mglufac_ex = [1,0]
self.noise_syn = [0]*self.n_celltypes
self.noise_syn_tau = [0*ms]*self.n_celltypes
self.noise_syn_inh = [0]*self.n_celltypes
self.noise_syn_tau_inh = [0*ms]*self.n_celltypes
self.noise_a = [1e9]*self.n_celltypes
self.noise_a_inh = [1e9]*self.n_celltypes
self.inh_hold = [0]*self.n_celltypes
self.n_syn_inh = [0]*self.n_celltypes
self.g_syn_inh = [1]*self.n_celltypes
self.g_syn_inh_s = [0]*self.n_celltypes
self.intr_hold = [0]*self.n_celltypes
self.n_syn_intr = [0]*self.n_celltypes
self.g_syn_intr = [0]*self.n_celltypes
self.syn_max_mf = [1]*self.n_celltypes # possible mossy fibres per synapse
self.syn_max_inh = [1]*self.n_celltypes # possible Golgi cells per synapse
self.syn_max_intr = [1]*self.n_celltypes # possible Intruding cells per synapse
self.seed = 50
self.force_run = False
self.give_psd = False
self.do_if = True
self.fluct_g_e0 = []
self.fluct_g_i0 = []
self.fluct_std_e = []
self.fluct_std_i = []
self.fluct_tau_e = []
self.fluct_tau_i = []
self.adjinh = True # adjust inhibition to get CFo instead of g_ex
self.adjfinh = True # adjust frequnecy of inhibition to get CFo instead of g_ex
self.syn_ex_dist = []
self.syn_inh_dist = []
self.stdp_used = False
self.xmax = 20
self.use_multisplit = False
self.use_local_dt = False
self.simstep = 0
self.plot_train = True
self.inh_delay = 0 # in ms
self.plot_input = True
self.delay_baseline = 8
self.tstop_if = 1
self.gsyn_in_fac = []
self.netcons = [] # keeping track of!
self.nclist = []
self.ST_stims = []
self.PF_stims = []
self.data_dir = "./data"
self.minimal_dir = False
def set_init(self, ihold, ihold_sigma, amp, amod):
# important for all methods:
if type(ihold) is not list: ihold = [ihold] #convert to list if it is not given as one
self.ihold = ihold
self.ihold_orig = ihold
if type(amp) is not list: amp = [amp]
if len(amp) < self.n_celltypes: amp = [amp[0]]*self.n_celltypes
self.amp = amp
if type(amod) is not list: amod = [amod]*self.n_celltypes
self.amod = amod # RUN self.set_i()
self.ihold_sigma = ihold_sigma
def barrier(self):
if self.use_mpi:
if self.use_pc == True:
self.pc.barrier()
else:
self.comm.Barrier()
def broadcast(self, vec, root = 0, fast = False):
if self.use_mpi:
if self.use_pc:
if fast:
hvec = h.Vector(vec)
v = self.pc.broadcast(hvec,root)
vec = np.array(hvec)
else:
sendlist = [None]*self.nhost
if self.id == root:
for i in range(self.nhost):
sendlist[i] = vec
getlist = self.pc.py_alltoall(sendlist)
vec = getlist[root]
else:
#vec = np.array(vec, dtype=np.float64)
#self.comm.Bcast([vec, MPI.DOUBLE])
vec = self.comm.bcast(vec, root=0)
return vec
def set_numcells(self, N = []):
"""
Create, layout, and connect N cells.
"""
self.set_gids(N)
self.create_cells()
#self.syn_output() # generate synaptic "output" in neuron
#self.connect_cells()
def set_gids(self, N = []):
"""Set the gidlist on this host.
Round-robin counting. Each host as an id from 0 to pc.nhost()-1.
Example:
if N = 5 cells and nhost() = 3
node id() = 0 will get cells [0, 3]
node id() = 1 will get cells [1, 4]
node id() = 2 will get cells [2]
"""
self.gidlist = []
if N == []:
N = self.N
# borders where another celltype begins
self.global_gidlist = []
self.n_borders = [0]
for l in range(1,self.n_celltypes+1):
self.n_borders.append(sum(N[0:l]))
self.global_gidlist.append(range(self.n_borders[-2], self.n_borders[-1]))
for n in range(self.n_celltypes): # create list in list
self.gidlist.append([])
for i in range(int(self.id), sum(N), int(self.nhost)): # loop over all cells
n = np.where((np.array(self.n_borders)-i)>0)[0][0]-1 # find out what cell type this is
self.gidlist[n].append(i) # put in specific gidlist for that celltype
self.gid_count = self.gid_count + sum(N)
if self.id == 0: print "nodeid:" , self.id , ", gidlist:" , self.gidlist , ", total gids:" , len(self.global_gidlist) , ", sum(N):" , sum(N) # check gids of node
def del_cells(self):
if self.cells != []:
for n in range(self.n_celltypes):
for m in self.cells[n]:
print "deleting cell", m
del m
del self.cells
self.cells = []
if self.use_mpi: self.pc.gid_clear()
def create_cells(self):
"""
Create cell objects on this host.
"""
if self.do_run:
self.del_cells()
if self.id == 0: print "creating cells"
for n in range(self.n_celltypes):
self.cells.append([]) # create list in list
#print self.cellimport[n]
exec self.cellimport[n]
#print self.gidlist
for i in self.gidlist[n]:
#if "sigma" not in self.cell_exe[n]:
# exec self.cell_exe[n]
# cell.gid = i # tell cell it's gid!
# print i
#else:
if (self.celltype[n] == "IfCell") or (self.celltype[n] == "Grc"):
# add gid to cell and execute!
if self.cell_exe[n][-2] == "(":
exec self.cell_exe[n][0:-1] + "gid=" + str(i) + ")"
else:
exec self.cell_exe[n][0:-1] + ", gid=" + str(i) + ")"
else:
exec self.cell_exe[n]
cell.gid = i
self.cells[n].append(cell) # add to (local) list
if self.use_mpi:
#### Tell this host it has this gid
#### gids can be any integer, they just need to be unique.
#### In this simple case, we set the gid to i.
self.pc.set_gid2node(i, int(self.id))
self.pc.cell(i, cell.nc_spike) # Associate the cell with this host and gid
## NOT NECESSARY ANYMORE ##
#### Means to tell the ParallelContext that this cell is a source.
#nc = cell.connect_target(None)
#self.ncs[n].append(nc)
#### Record spikes of this cell
self.pc.spike_record(i, self.t_vec[n], self.id_vec[n])
#print n, self.cells[n][-1].nc_spike.thresh
else:
self.t_vec[n].append(h.Vector())
cell.nc_spike.record(self.t_vec[n][-1])
def connect_cells(self, conntype=[], stdp=[], tend=1e9):
"""
Connect cells as specified.
"""
if self.do_run:
stdp = stdp[:]
conntype = conntype[:]
if len(stdp) == 0:
for i in conntype:
stdp.append({'wmax':0, 'taupre':0, 'taupost':0, 'apre':0, 'apost':0})
else:
self.stdp_used = True
for i, conn in enumerate(conntype):
typ = conn['type']
conv = conn['conv']
src = conn['src']
tgt = conn['tgt']
w0 = conn['w']
var = conn['var']
tau1 = conn['tau1']
tau2 = conn['tau2']
if 'mgr2' in conn.keys():
mgr2 = conn['mgr2']
mgr2_var = conn['mgr2_var']
else:
mgr2 = 0
mgr2_var = 0
if 'e_inh' in conn.keys():
e_inh = conn['e_inh']
else:
e_inh = -65
if 'e_ex' in conn.keys():
e_ex = conn['e_ex']
else:
e_ex = 0
wmax = stdp[i]['wmax']
taupre = stdp[i]['taupre']
taupost = stdp[i]['taupost']
apre = stdp[i]['apre']
apost = stdp[i]['apost']
# Connect conv cells of celltype src to every cell of celltype tgt
for ni, i in enumerate(self.cells[tgt]):
rnd.seed(i.gid*10*self.seed)
if conv >= len(self.global_gidlist[src]):
gids = self.global_gidlist[src]
if self.id == 0: print "more or equal conv to len(self.global_gidlist[src])"
else:
gids = rnd.sample(self.global_gidlist[src],conv)
if self.id == 0: print conn['type'], ":", ni, ":", gids[0], "\n"
for ng, g in enumerate(gids):
np.random.seed(g*12)
#np.random.seed(int(g%10+1)*12)
if len(shape(w0))>0: # array is given
print "w array is given"
if len(w0[ng]) == self.N[0]:
w = w0[ng][ni]
elif (var > 0) and (w0>0):
w = np.random.normal(w0, w0*var, 1).clip(min=0)
else:
w = w0
if (mgr2_var > 0) and (mgr2>0):
mg = np.random.normal(mgr2, mgr2*mgr2_var, 1).clip(min=0)
else:
mg = mgr2
#print conn['type'], ":", i.gid, ":", g, ", w:", w, "\n"
if self.celltype[tgt] == 'IfCell':
if typ == 'gogr':
i.whatami = "grc"
i.synlist_inh.append(Synapse('goc', i, i.soma, nrel=0, record_all=0, weight_gmax=w))
i0 = int(len(i.synlist_inh)-1)
i.nc_inh.append(self.pc.gid_connect(g, i.synlist_inh[i0].input))
i.nc_inh[-1].delay = 1
i.nc_inh[-1].weight[0] = 1
if typ == 'grgo':
i.whatami = "goc"
i.synlist.append(Synapse('grc', i, i.soma, syntype = 'D', nrel=0, record_all=0, weight_gmax=w))
e0 = int(len(i.synlist)-1)
i.nc.append(self.pc.gid_connect(g, i.synlist[e0].input))
i.nc[-1].delay = 1
i.nc[-1].weight[0] = 1
if typ == 'grgom':
i.whatami = "goc"
i.synlist.append(Synapse('grc', i, i.soma, syntype = 'DM', nrel=0, record_all=0, weight_gmax=w, mglufac = mg))
e0 = int(len(i.synlist)-1)
i.nc.append(self.pc.gid_connect(g, i.synlist[e0].input))
i.nc[-1].delay = 1
i.nc[-1].weight[0] = 1
if typ == 'e2inh':
i.create_synapses(n_inh=1, tau1_inh=tau1, tau2_inh=tau2, e_inh=e_inh, w = w, wmax = wmax, taupre = taupre, taupost = taupost, apre = apre, apost = apost, tend=tend)
i0 = len(i.synlist_inh)-1
if self.use_mpi:
if wmax == 0:
i.pconnect_target(self.pc, source=g, target=i0, syntype='inh', weight=w, delay=1)
else:
i.pconnect_target(self.pc, source=g, target=i0, syntype='inh', weight=1, delay=1)
else:
if wmax == 0:
i.nc_inh.append(self.cells[1][g-self.N[0]].connect_target(target=i.synlist_inh[i0], weight=w, delay=1))
else:
i.nc_inh.append(self.cells[1][g-self.N[0]].connect_target(target=i.synlist_inh[i0], weight=1, delay=1))
if typ == 'e2ex':
i.create_synapses(n_ex = 1, tau1 = tau1, tau2 = tau2, e_ex=e_ex, w = w, wmax = wmax, taupre = taupre, taupost = taupost, apre = apre, apost = apost, tend=tend)
e0 = len(i.synlist)-1
if self.use_mpi:
if wmax == 0:
i.pconnect_target(self.pc, source=g, target=e0, syntype='ex', weight=w, delay=1)
else:
i.pconnect_target(self.pc, source=g, target=e0, syntype='ex', weight=1, delay=1)
else:
if wmax == 0:
i.nc.append(self.cells[0][g].connect_target(target=i.synlist[e0], weight=w, delay=1))
else:
i.nc.append(self.cells[0][g].connect_target(target=i.synlist[e0], weight=1, delay=1))
else: # No IfCell
if typ == 'gogr':
i.createsyn(ngoc = 1, weight_gmax=w) # multiplication factor
i0 = len(i.GOC_L)-1 # get number of current synapse!
i.pconnect(self.pc,g,i0,'goc')
if typ == 'grgo':
i.createsyn(ngrc = 1, weight_gmax=w) # multiplication factor
i0 = len(i.GRC_L)-1 # get number of current synapse!
i.pconnect(self.pc,g,i0,'grc',conduction_speed=0,grc_positions=[1])
if typ == 'grgom':
#print w, mg
i.createsyn(ngrcm = 1, weight_gmax=w, mglufac = mg) # multiplication factor
i0 = len(i.GRC_L)-1 # get number of current synapse!
i.pconnect(self.pc,g,i0,'grc',conduction_speed=0,grc_positions=[1])
if typ == 'grstl':
i.createsyn(ngrc = 1, weight_gmax=w) # multiplication factor
i0 = len(i.GRC_L)-1 # get number of current synapse!
i.pconnect(self.pc,g,i0,'grc',conduction_speed=0,grc_positions=[1])
if 'e2' in typ:
if 'inh' in typ:
Erev = -65
elif 'ex' in typ:
Erev = 0
if tau1 == 0:
syn = h.ExpSyn(i.soma(0.5))
syn.tau = tau2/ms
else:
if wmax == 0:
syn = h.Exp2Syn(i.soma(0.5))
syn.tau1 = tau1/ms
syn.tau2 = tau2/ms
else: # STDP
syn = h.stdpE2S(i.soma(0.5))
syn.tau1 = tau1/ms
syn.tau2 = tau2/ms
syn.on = 1
syn.thresh = -20
syn.wmax = wmax
syn.w = w
syn.taupre = taupre/ms
syn.taupost = taupost/ms
syn.apre = apre
syn.apost = apost
syn.e = Erev/mV
if self.celltype[tgt] == 'Grc':
i.GOC_L.append(syn)
i0 = int(len(i.GOC_L)-1) # get number of current synapse!
i.gocncpc.append(self.pc.gid_connect(g, i.GOC_L[i0]))
i.gocncpc[-1].delay = 1
if wmax == 0:
i.gocncpc[-1].weight[0] = w
else:
i.gocncpc[-1].weight[0] = 1
elif self.celltype[tgt] == 'Goc':
i.GRC_L.append(syn)
e0 = int(len(i.GRC_L)-1) # get number of current synapse!
i.pfncpc.append(self.pc.gid_connect(g, i.GRC_L[e0]))
i.pfncpc[-1].delay = 1
i.pfncpc[-1].weight[0] = w
if wmax == 0:
i.pfncpc[-1].weight[0] = w
else:
i.pfncpc[-1].weight[0] = 1
#self.rec_s1 = h.Vector()
#self.rec_s1.record(self.cells[0][0].synlist_inh[0]._ref_g)
#self.rec_s2 = h.Vector()
#self.rec_s2.record(self.cells[1][0].synlist_inh[0]._ref_g)
def syn_output(self):
"""
Connect cell n to target cell sum(self.N) + 100.
"""
if self.id == 0: # create target cell
tgt_gid = self.gid_count
self.gid_count = self.gid_count + 1
# Synaptic integrated response
self.rec_g = h.Vector()
self.passive_target = PassiveCell()
if self.use_mpi: self.pc.set_gid2node(tgt_gid, 0) # Tell this host it has this gid
syn = self.passive_target.create_synapses(tau1 = self.syn_tau1, tau2 = self.syn_tau2) # if tau1=tau2: alpha synapse!
for i in range(self.n_borders[self.a_celltype[0]],self.n_borders[self.a_celltype[0]+1]): # take all cells, corresponding to self.a_celltype, not just the ones in self.gidlist:
src_gid = i
if self.use_mpi:
nc = self.pc.gid_connect(src_gid, syn)
nc.weight[0] = 1
nc.delay = self.nc_delay/ms #0.05 # MUST be larger than dt!!!
else:
nc = self.cells[self.a_celltype[0]][src_gid].connect_target(target=syn, weight=1, delay=self.nc_delay/ms)
self.nclist.append(nc)
self.rec_g.record(syn._ref_g)
def syn_out_all(self, tau1 = 1*ms, tau2 = 30*ms):
if self.do_run:
for n in range(self.n_celltypes):
for i, gid in enumerate(self.gidlist[n]):
self.cells[n][i].start_record(tau1 = tau1/ms, tau2 = tau2/ms)
self.called_syn_out_all = True
def get_i(self, a, n, do_plot = True):
import md5
m = md5.new()
if ", sigma" in self.cell_exe[n]:
cell_exe_new = self.cell_exe[n].split(", sigma")[0] + ")"
else:
cell_exe_new = self.cell_exe[n]
m.update(cell_exe_new)
filename = self.data_dir + '/if_' + self.celltype[n] + '_' + m.hexdigest() + '.p'
#print filename
if self.id == 0:
is_there = os.path.isfile(filename)
else:
is_there = None
is_there = self.broadcast(is_there)
if (is_there is not True) or (self.force_run is True): # run i/f estimation
if self.id == 0: print '- running i/f estimation for ', self.celltype[n], ' id: ' , m.hexdigest()
exec self.cellimport[n]
exec cell_exe_new
sim = Stimulation(cell, temperature = self.temperature, use_multisplit = self.use_multisplit)
sim.spikes_from_neuron = False
sim.celltype = self.celltype[n]
current_vector, freq_vector, freq_onset_vector = sim.get_if(istart = self.istart, istop = self.istop, di = self.di, tstop = self.tstop_if)
sim = None
cell = None
if self.id == 0:
if do_plot:
plt.figure(99)
plt.plot(current_vector, freq_vector, 'r*-')
plt.plot(current_vector, freq_onset_vector, 'b*-')
plt.savefig("./figs/dump/latest_if_" + self.celltype[n] + ".pdf", dpi = 300) # save it
plt.clf()
#plt.show()
ifv = {'i':current_vector,'f':freq_vector}
print ifv
pickle.dump(ifv, gzip.GzipFile(filename, "wb" ))
self.barrier()
else:
if self.id == 0:
ifv = pickle.load(gzip.GzipFile(filename, "rb" ))
#print ifv
self.barrier()
if self.id == 0:
f = ifv.get('f')
i = ifv.get('i')
i = i[~isnan(f)]
f = f[~isnan(f)]
iin = if_extrap(a, f, i)
else:
iin = [0]
iin = self.broadcast(iin, root=0, fast = True)
self.barrier()
return iin
def set_i(self, ihold = [0]):
ihold = list(ihold)
self.ihold_orig = list(ihold)
self.barrier() # wait for other nodes
# Ihold given as frequency, convert to current
if ((self.give_freq)):
ihold0 = [[] for _ in range(self.n_celltypes)]
for n in range(self.n_celltypes):
a = np.array([ihold[n]])
#print "a:", a
iin = self.get_i(a, n)
#print "iin:", iin
ihold0[n] = iin[0]
if self.id == 0: print '- ihold: ', ihold, 'Hz, => ihold: ', ihold0, 'nA'
# Modulation depth given, not always applied to current!
for n in range(self.n_celltypes):
if self.amod[n] is not None:
if self.give_freq:
# Apply to amplitude:
a = np.array([ihold[n]]) + self.amod[n]*np.array([ihold[n]])
self.amp[n] = self.get_i(a, n) - ihold0[n]
if self.id == 0:
print '- amp: ihold: ', ihold[n], 'Hz , amod: ', self.amod[n], ', => amp: ', self.amp[n], 'nA (' #, self.get_i(a, n), ')'
elif self.n_syn_ex[n] > 0:
if self.id == 0:
print '- amp: ihold: ', ihold[n], 'Hz , amod: ', self.amod[n], ', => amp will be set for each spike generator'
else:
self.amp[n] = self.amod[n] * ihold[n]
if self.id == 0:
print '- amp: ihold: ', ihold[n], 'nA , amod: ', self.amod[n], ', => amp: ', self.amp[n], 'nA'
# Noise depth given, not always applied to current!
if self.anoise[n] is not None:
if (self.give_freq is True) or (self.n_syn_ex[n] > 0):
# Apply to amplitude:
a = np.array([ihold[n]]) + self.anoise[n]*np.array([ihold[n]])
self.fluct_s[n] = ((self.get_i(a, n) - ihold0[n]))/2. # adjust with /2 so that noise = +-2*std
if self.id == 0:
print '- noise: ihold: ', ihold[n], 'Hz , anoise: ', self.anoise[n], ', => fluct_s: ', self.fluct_s[n], 'nA'
else:
self.fluct_s[n] = self.anoise[n] * ihold[n]
if self.id == 0:
print '- noise: ihold: ', ihold[n], 'nA , anoise: ', self.anoise[n], ', => fluct_s: ', self.fluct_s[n], 'nA'
if self.give_freq is True:
ihold = ihold0
return ihold
def calc_fmean(self, t_vec, t_startstop):
#t_startstop[0] = 1
#t_startstop[1] = 5
f_cells_mean = 0
f_cells_cv = np.nan
f_cells_std = np.nan
if len(t_vec) > 0:
f_start_in = mlab.find(t_vec >= t_startstop[0]) # 1
f_stop_in = mlab.find(t_vec <= t_startstop[1]) # 5
if (len(f_start_in) > 0) & (len(f_stop_in) > 0):
f_start = f_start_in[0]
f_stop = f_stop_in[-1]+1
use_spikes = t_vec[f_start:f_stop]*1e3
if len(use_spikes) > 1:
s1 = signals.SpikeTrain(use_spikes)
isi = s1.isi()
f_cells_mean = s1.mean_rate() # use mean of single cells
f_cells_cv = np.std(isi)/np.mean(isi)
f_cells_std = np.std(isi)
#f_start_in = mlab.find(t_vec >= 1)
#f_stop_in = mlab.find(t_vec <= 2)
#if (len(f_start_in) > 0) & (len(f_stop_in) > 0):
# f_start = f_start_in[0]
# f_stop = f_stop_in[-1]+1
# use_spikes = t_vec[f_start:f_stop]*1e3
# if len(use_spikes) > 1:
# s1 = signals.SpikeTrain(use_spikes)
# isi = s1.isi()
# f_cells_cv = np.std(isi)/np.mean(isi)
return f_cells_mean, f_cells_cv, f_cells_std
def get_fmean(self, t_all_vec_vecn, id_all_vec_vecn, t_startstop, gidlist, facborder = 3): # 1e9
f_cells_mean = zeros(len(gidlist))
f_cells_base = zeros(len(gidlist))
f_cells_std = nans(len(gidlist))
f_cells_cv = nans(len(gidlist))
f_cells_gid = nans(len(gidlist))
fbase = np.nan
fmean = np.nan
fmax = np.nan
fmstd = np.nan
fcvm = np.nan
fstdm = np.nan
f_cells_mean_all = []
f_cells_base_all = []
f_cells_cv_all = []
f_cells_std_all = []
gid_del = np.array([])
if self.no_fmean == False:
if self.id == 0: print "- sorting for fmean"
for i, l in enumerate(gidlist):
t_0_vec = t_all_vec_vecn[where(id_all_vec_vecn==l)]
f_cells_mean[i], f_cells_cv[i], f_cells_std[i] = self.calc_fmean(t_0_vec, t_startstop)
f_cells_base[i], _, _ = self.calc_fmean(t_0_vec, [self.delay_baseline-4,self.delay_baseline])
f_cells_gid[i] = l
if self.id == 0: print "- gather fmean"
f_cells_mean_all = self.do_gather(f_cells_mean)
f_cells_base_all = self.do_gather(f_cells_base)
f_cells_std_all = self.do_gather(f_cells_std)
f_cells_cv_all = self.do_gather(f_cells_cv)
f_cells_gid_all = self.do_gather(f_cells_gid)
if self.id == 0:
#print f_cells_mean_all
f_cells_mean_all = np.nan_to_num(f_cells_mean_all)
fmean = mean(f_cells_mean_all) # compute mean of mean rate for all cells
fmstd = std(f_cells_mean_all)
fmax = max(f_cells_mean_all)
f_cells_base_all = np.nan_to_num(f_cells_base_all)
fbase = mean(f_cells_base_all) # compute mean of mean rate for all cells
f_cells_cv_all = f_cells_cv_all[~np.isnan(f_cells_cv_all)]
f_cells_std_all = f_cells_std_all[~np.isnan(f_cells_std_all)]
fcvm = mean(f_cells_cv_all)
fstdm = mean(f_cells_std_all)
print "- get_fmean, fmean: ",fmean, "fmax: ",fmax, "Hz", "fmstd: ",fmstd, "Hz", "fcvm: ",fcvm, "fstdm: ",fstdm, "Hz" ,"fbase: ", fbase, "Hz"
if facborder < 1e9:
fborder = fmean + facborder*fmstd
i = mlab.find(f_cells_mean_all > fborder)
gid_del = f_cells_gid_all[i]
# f_cells_mean_all[i] = 0
# f_cells_cv_all[i] = np.nan
# f_cells_std_all[i] = np.nan
# fmean2 = mean(np.nan_to_num(f_cells_mean_all)) # compute mean of mean rate for all cells
# fmstd2 = std(np.nan_to_num(f_cells_mean_all))
# fmax2 = max(np.nan_to_num(f_cells_mean_all))
# fcvm2 = mean(f_cells_cv_all[~np.isnan(f_cells_cv_all)])
# fstdm2 = mean(f_cells_std_all[~np.isnan(f_cells_std_all)])
# print "- after facborder: get_fmean, fmean: ",fmean2, "fmax: ",fmax2, "Hz", "fmstd: ",fmstd2, "Hz", "fcvm: ",fcvm2, "fstdm: ",fstdm2, "Hz, gid_del: ", gid_del
return fmean, fmax, fmstd, fcvm, fstdm, gid_del, f_cells_mean_all, f_cells_cv_all, f_cells_std_all, fbase, f_cells_base_all
def connect_fluct(self):
"""
Create fluctuating input onto every cell.
"""
if self.do_run:
for m in self.flucts:
del m
del self.flucts
for m in self.noises:
del m
del self.noises
self.flucts = []
self.noises = []
for n in range(self.n_celltypes):
for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist
#h.mcell_ran4_init(gid)
noiseRandObj = h.Random() # provides NOISE with random stream
self.noises.append(noiseRandObj) # has to be set here not inside the nmodl function!!
# print str(gid) + ": " + str(noiseRandObj.normal(0,1))
fluct = h.Ifluct2(self.cells[n][i].soma(0.5))
fluct.m = self.fluct_m/nA # [nA]
fluct.s = self.fluct_s[n]/nA # [nA]
fluct.tau = self.fluct_tau/ms # [ms]
self.flucts.append(fluct) # add to list
self.flucts[-1].noiseFromRandom(self.noises[-1]) # connect random generator!
self.noises[-1].MCellRan4(1, gid+1) # set lowindex to gid+1, set highindex to > 0
self.noises[-1].normal(0,1)
def connect_gfluct(self, E_e=0, E_i=-65):
"""
Create fluctuating conductance input onto every cell.
"""
if self.do_run:
for m in self.flucts:
del m
del self.flucts
for m in self.noises:
del m
del self.noises
self.flucts = []
self.noises = []
for n in range(self.n_celltypes):
fluct_g_i0_n = self.fluct_g_i0[n]
if type(fluct_g_i0_n) is not ndarray: fluct_g_i0_n = np.array([fluct_g_i0_n])
if len(fluct_g_i0_n) == len(self.global_gidlist[n]):
pass
else:
fluct_g_i0_n = np.ones(int(len(self.global_gidlist[n])))*fluct_g_i0_n[0]
if self.id == 0: print "- single value in fluct_g_i0_n"
#print fluct_g_i0_n
for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist
#h.mcell_ran4_init(gid)
noiseRandObj = h.Random() # provides NOISE with random stream
self.noises.append(noiseRandObj) # has to be set here not inside the nmodl function!!
# print str(gid) + ": " + str(noiseRandObj.normal(0,1))
fluct = h.Gfluct3(self.cells[n][i].soma(0.5))
fluct.E_e = E_e/mV # [mV]
fluct.E_i = E_i/mV # [mV]
fluct.g_e0 = self.fluct_g_e0[n]/uS # [uS]
fluct.g_i0 = fluct_g_i0_n[i]/uS # [uS]
fluct.std_e = self.fluct_std_e[n]/uS # [uS]
fluct.std_i = self.fluct_std_i[n]/uS # [uS]
fluct.tau_e = self.fluct_tau_e/ms #tau_e/ms # [ms]
fluct.tau_i = self.fluct_tau_i/ms #tau_i/ms # [ms]
self.flucts.append(fluct) # add to list
self.flucts[-1].noiseFromRandom(self.noises[-1]) # connect random generator!
self.noises[-1].MCellRan4(1, gid+1) # set lowindex to gid+1, set highindex to > 0
self.noises[-1].normal(0,1)
def connect_synfluct(self, PF_BG_rate=6, PF_BG_cv=1, STL_BG_rate=20, STL_BG_cv=1):
"""
Create fluctuating synaptic input onto every cell.
"""
if self.do_run:
for m in self.ST_stims:
del m
del self.ST_stims
for m in self.PF_stims:
del m
del self.PF_stims
self.ST_stims = []
self.PF_stims = []
for n in range(self.n_celltypes):
for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist
PF_syn_list = self.cells[n][i].createsyn_PF()
for d in PF_syn_list:
d.input.newnetstim.number = 1e9
d.input.newnetstim.noise = PF_BG_cv
d.input.newnetstim.interval = 1000.0 / PF_BG_rate
d.input.newnetstim.start = 0
self.PF_stims.append(PF_syn_list)
ST_stim_list = self.cells[n][i].createsyn_ST(record_all=0)
for d in ST_stim_list:
d.newnetstim.number = 1e9
d.newnetstim.noise = STL_BG_cv
d.newnetstim.interval = 1000.0 / STL_BG_rate
d.newnetstim.start = 0
self.ST_stims.append(ST_stim_list)
if self.id == 0: print "- PF and ST stimulation added."
def set_IStim(self, ihold = None, ihold_sigma = None, random_start = True, tstart_offset = 0):
"""
Add (random) ihold for each cell and offset!
"""
if self.do_run:
# if not given, use the one in self
if ihold == None:
ihold = self.ihold
if ihold_sigma == None:
ihold_sigma = self.ihold_sigma
if ihold[self.a_celltype[0]] != 0:
ihold = self.set_i(ihold)
for m in self.ic_holds:
#m.destroy()
del m
del self.ic_holds
for m in self.ic_starts:
#m.destroy()
del m
del self.ic_starts
for m in self.vc_starts:
#m.destroy()
del m
del self.vc_starts
self.ic_holds = []
self.ic_starts = []
self.vc_starts = []
self.i_holdrs = []
self.i_holds = ihold
for n in range(self.n_celltypes):
self.i_holdrs.append([])
for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist
np.random.seed(gid*20)
tis = 1
if random_start == True:
# random start time
tstart = np.random.uniform(tstart_offset+0, tstart_offset+0.5)
#if self.id == 0: print "tstart:", tstart
vc_start = h.SEClamp(self.cells[n][i].soma(0.5))
vc_start.dur1 = tstart/ms
vc_start.amp1 = -80
self.vc_starts.append(vc_start)
tis = 0
else:
tis = 0
if ihold_sigma[n] != 0:
#print ihold_sigma[n], ihold[n]
ihold_r = np.random.normal(ihold[n], ihold[n]*ihold_sigma[n], 1).clip(min=0)
#ihold_r = np.random.uniform(ihold[n]*ihold_sigma[n], ihold[n])
elif self.CF_var is not False: # CF gets not adapted to current but final frequnecy!
r_ok = False
while r_ok == False:
r_temp = np.random.normal(self.ihold_orig[n], self.CF_var[n][1], 1)
if (r_temp <= self.CF_var[n][2]) and (r_temp >= self.CF_var[n][0]): # check borders!
r_ok = True
#print r_temp
ihold_r = self.get_i(r_temp, n)
#print ihold_r
#if self.id == 0:
print "set self.CF_var", r_temp, ihold_r
else: # same ihold for all cells!
ihold_r = ihold[n]
self.i_holdrs[n].append(ihold_r)
if ihold_r != 0:
if hasattr(self.cells[n][i], 'input_vec'):
ic_hold = []
for vec in self.cells[n][i].input_vec:
for inv in vec:
#print ihold_r
ic_hold.append(h.IClamp(inv(0.5)))
ic_hold[-1].amp = self.cells[n][i].ifac * ihold_r / self.cells[n][i].n_input_spiny / nA
ic_hold[-1].delay = tis/ms
ic_hold[-1].dur = 1e9
else:
# holding current
ic_hold = h.IClamp(self.cells[n][i].soma(0.5))
ic_hold.delay = tis/ms
ic_hold.dur = 1e9
ic_hold.amp = ihold_r/nA
self.ic_holds.append(ic_hold)
if self.id == 0: print "set_IStim finished. ihold: ", ihold, ", ihold_sigma: ", ihold_sigma
def set_IStep(self, istep = [0], istep_sigma = [0], tstep = 5, tdur = 1e6, give_freq = True):
"""
Add istep for each cell and offset!
"""
if self.do_run:
#for m in self.ic_steps:
# m.destroy()
# del m
#del self.ic_steps
#self.ic_steps = []
istep = list(istep)
neg = False
for n in range(self.n_celltypes):
if istep[n] < 0:
neg = True
istep[n] = abs(istep[n]) # make positive again
if istep[n] != 0:
if give_freq is True:
a = np.array([istep[n]])
iin = self.get_i(a, n)[0]
if self.id == 0: print "celltype: ", n, " istep: ", istep[n], "Hz => ", iin, " nA"
istep[n] = iin
for n in range(self.n_celltypes):
for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist
np.random.seed(gid*30)
if self.i_holdrs == []:
if istep_sigma[n] != 0:
istep_r = np.random.normal(istep[n], istep[n]*istep_sigma[n], 1).clip(min=0)
else: # same ihold for all cells!
istep_r = istep[n]
else: # ihold has been set!
if istep_sigma[n] != 0:
istep_r = np.random.normal(istep[n]-self.i_holds[n], (istep[n]-self.i_holds[n])*istep_sigma[n], 1).clip(min=0) # delta now! put on top of hold!
else: # same ihold for all cells!
istep_r = istep[n]-self.i_holds[n] # delta now! put on top of hold!
if neg:
istep_r = -1*istep_r
if istep[n] == 0:
istep_r = -1*self.i_holdrs[n][i]
#print 'is:' + str(istep_r) + 'was:' + str(self.i_holdrs[n][i])
if istep_r != 0:
# step current
ic_step = h.IClamp(self.cells[n][i].soma(0.5))
ic_step.delay = tstep/ms
ic_step.dur = tdur/ms
ic_step.amp = istep_r/nA
self.ic_steps.append(ic_step)
if self.id == 0: print "set_IStep finished. istep: ", istep, ", istep_sigma: ", istep_sigma
def set_IPlay(self, stimulus, t):
"""
Initializes values for current clamp to play a signal.
"""
if self.do_run:
for m in self.tvecs:
#m.destroy()
del m
del self.tvecs
for m in self.ivecs:
#m.destroy()
del m
del self.ivecs
for m in self.plays:
#m.destroy()
del m
del self.plays
self.tvecs = []
self.ivecs = []
self.plays = []
for i, gid in enumerate(self.gidlist[self.a_celltype[0]]): # for every cell in the gidlist
tvec = h.Vector(t/ms)
ivec = h.Vector(stimulus/nA)
play = h.IClamp(self.cells[self.a_celltype[0]][i].soma(0.5))
play.delay = 0
play.dur = 1e9
ivec.play(play._ref_amp, tvec, 1)
self.plays.append(play) # add to list
self.tvecs.append(tvec) # add to list
self.ivecs.append(ivec) # add to list
if self.id == 0: print "set_IPlay finished."
def set_IPlay2(self, stimulus, t):
"""
Initializes values for current clamp to play a signal.
"""
if self.do_run:
for m in self.tvecs:
#m.destroy()
del m
del self.tvecs
for m in self.ivecs:
#m.destroy()
del m
del self.ivecs
for m in self.plays:
#m.destroy()
del m
del self.plays
self.tvecs = []
self.ivecs = []
self.plays = []
for j in self.a_celltype:
tvec = h.Vector(t/ms)
ivec = []
for s in stimulus:
if hasattr(self.cells[j][0], 'input_vec'):
ivec.append(h.Vector(self.factor_celltype[j] * self.cells[j][0].ifac * s / self.cells[j][0].n_input_spiny / nA))
else:
ivec.append(h.Vector(self.factor_celltype[j]*s/nA))
self.tvecs.append(tvec) # add to list
self.ivecs.append(ivec) # add to list
for i, gid in enumerate(self.gidlist[j]): # for every cell in the gidlist
if hasattr(self.cells[j][i], 'input_vec'):
play = []
for iloc, vec in enumerate(self.cells[j][i].input_vec):
isig = self.syn_ex_dist[j][iloc]-1
#print isig
for inv in vec:
play.append(h.IClamp(inv(0.5)))
play[-1].delay = 0
play[-1].dur = 1e9
ivec[isig].play(play[-1]._ref_amp, tvec, 1)
else:
#fluctuating current
play = h.IClamp(self.cells[j][i].soma(0.5))
play.delay = 0
play.dur = 1e9
ivec[0].play(play._ref_amp, tvec, 1)
self.plays.append(play) # add to list
if self.id == 0: print "set_IPlay2 finished."
def set_IPlay3(self, stimulus, t, amp = None):
"""
Initializes values for current clamp to play a signal.
"""
if self.do_run:
for m in self.tvecs:
#m.destroy()
del m
del self.tvecs
for m in self.ivecs:
#m.destroy()
del m
del self.ivecs
for m in self.plays:
#m.destroy()
del m
del self.plays
self.tvecs = []
self.ivecs = []
self.plays = []
for j in self.a_celltype:
if amp is None:
amp0 = 0
else:
amp0 = amp[j]
tvec = h.Vector(t/ms)
self.tvecs.append(tvec) # add to list
for i, gid in enumerate(self.gidlist[j]): # for every cell in the gidlist
if isinstance(self.factor_celltype[j], ( int, long ) ):
ivec = h.Vector(self.factor_celltype[j]*(stimulus*amp0)/nA)
else:
np.random.seed(gid*40)
rnd.seed(gid*40)
if self.factor_celltype[j][1] > 0:
f = np.random.normal(self.factor_celltype[j][0], self.factor_celltype[j][1], 1).clip(min=0)
else:
f = self.factor_celltype[j][0]
if self.factor_celltype[j][2] > 0: # add inverted input with 50% probability, in future versions this will indicate the propability for -1 and 1
f = rnd.sample([-1,1],1)[0] * f
if self.id == 0: print "- inverted input with 50% probability:", f
if self.id == 0: print "- randomize play stimulus height"
ivec = h.Vector(f*(stimulus*amp0)/nA)
self.ivecs.append(ivec) # add to list
#fluctuating current
play = h.IClamp(self.cells[j][i].soma(0.5))
play.delay = 0
play.dur = 1e9
ivec.play(play._ref_amp, tvec, 1)
self.plays.append(play) # add to list
if self.id == 0: print "set_IPlay3 finished."
def set_PulseStim(self, start_time=[100*ms], dur=[1500*ms], steadyf=[100*Hz], pulsef=[150*Hz], pulse_start=[500*ms], pulse_len=[500*ms], weight0=1, tau01=[1*ms], tau02=[20*ms], weight1=1, tau11=[0*ms], tau12=[1*ms], noise = 1):
if self.do_run:
modulation_vec = []
for n in range(self.n_celltypes):
t_input = np.arange(0, dur[n], self.dt) # create stimulus time vector has to be in ms!!
mod = np.concatenate(([np.zeros(round(start_time[n]/self.dt)), steadyf[n]*np.ones(round((pulse_start[n]-start_time[n])/self.dt)), pulsef[n]*np.ones(round(pulse_len[n]/self.dt)),steadyf[n]*np.ones(round((dur[n]-pulse_start[n]-pulse_len[n])/self.dt)) ]))
modulation = (t_input, mod)
#print shape(t_input), shape(mod), shape(modulation)
for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist
if dur[n] > 0:
if self.celltype[n] == 'Grc':
nmf = 4
for j in range(nmf):
self.cells[n][i].createsyn(nmf = 1, ngoc = 0, weight = weight0)
e0 = len(self.cells[n][i].MF_L)-1 # get number of current synapse!
pulse_gid = int(self.gid_count + gid*1000 + j)
train = mod_spike_train(modulation, noise = noise, seed = pulse_gid)
self.setup_Play_train(train = train, input_gid = pulse_gid)
self.cells[n][i].pconnect(self.pc,pulse_gid,int(e0),'mf')
elif self.celltype[n] == 'Goc':
nmf = 53
for j in range(nmf):
self.cells[n][i].createsyn(nmf = 1, weight = weight1)
e0 = len(self.cells[n][i].MF_L)-1 # get number of current synapse!
pulse_gid = int(self.gid_count + gid*1000 + j)
train = mod_spike_train(modulation, noise = noise, seed = pulse_gid)
self.setup_Play_train(train = train, input_gid = pulse_gid)
self.cells[n][i].pconnect(self.pc,pulse_gid,int(e0),'mf')
elif self.celltype[n] == 'Goc_noloop':
ngrc = 100
for j in range(ngrc):
self.cells[n][i].createsyn(ngrc = 1, weight = weight0)
e0 = len(self.cells[n][i].GRC_L)-1 # get number of current synapse!
pulse_gid = int(self.gid_count + gid*1000 + j)
train = mod_spike_train(modulation, noise = noise, seed=pulse_gid)
self.setup_Play_train(train = train, input_gid = pulse_gid)
self.cells[n][i].pconnect(self.pc,pulse_gid,int(e0),'grc')
else:
pulse_gid = int(self.gid_count + gid*1000 + 100)
train = mod_spike_train(modulation, noise = noise, seed = pulse_gid)
self.trains.append(train)
setup_Play_train(train = train, input_gid = pulse_gid)
# NMDA
self.cells[n][i].create_synapses(n_ex=1, tau1=tau01[n], tau2=tau02[n])
e0 = len(self.cells[n][i].synlist)-1
weight=weight0[n]
np.random.seed(gid*60)
#weight = np.random.normal(weight, weight*0.5, 1).clip(min=0)
self.cells[n][i].pconnect_target(self.pc, source=pulse_gid, target=e0, syntype='ex', weight=weight, delay=1)
# AMPA
self.cells[n][i].create_synapses(n_ex=1, tau1=tau11[n], tau2=tau12[n])
e0 = len(self.cells[n][i].synlist)-1
weight=weight1[n]
np.random.seed(gid*60)
#weight = np.random.normal(weight, weight*0.5, 1).clip(min=0)
self.cells[n][i].pconnect_target(self.pc, source=pulse_gid, target=e0, syntype='ex', weight=weight, delay=1)
modulation = (t_input, mod) # mack to s!
modulation_vec.append(modulation)
return modulation_vec
def connect_Synapse(self, pulse_gid, nt, i, n, gid, j, syntype = "ex", nsyn=0):
if self.do_run:
if 'gsyn_in' in self.method_interpol:
if isinstance(self.factor_celltype[nt], ( int, long ) ):
f = self.factor_celltype[nt]
else:
f = self.factor_celltype[nt][0]
if syntype == "ex":
# each cell can receive different g_syn_ex !
if type(self.g_syn_ex[nt]) is ndarray:
if len(self.g_syn_ex[nt]) == len(self.global_gidlist[nt]):
w = self.g_syn_ex[nt][n]
else:
w = self.g_syn_ex[nt]
else:
w = self.g_syn_ex[nt]
seed = int(10000 + 10*gid + j)
np.random.seed(seed*41)
if self.g_syn_ex_s[nt] > 0:
w = np.random.normal(w, w*self.g_syn_ex_s[nt], 1).clip(min=0) # self.g_syn_ex_s[nt]
if self.celltype[nt] == 'Grc':
# delete old
if j == 0:
self.cells[nt][i].MF_L = []
self.cells[nt][i].mfncpc = []
if "gr" not in str(self.tau1_ex[nt]):
if "amfit" in str(self.tau1_ex[nt]):
syn = h.ExpZSyn(self.cells[nt][i].soma(0.5))
syn.tau1_ampa = 0.254
syn.tau2_ampa = 0.254
syn.tau3_ampa = 0.363
syn.tau4_ampa = 6.523
syn.f1_ampa = 8.8376e-05
syn.f2_ampa = 5.5257e-05
syn.f1_nmda = 0
elif "nmfit" in str(self.tau1_ex[nt]):
syn = h.ExpYSyn(self.cells[nt][i].soma(0.5))
syn.f1_ampa = 0
syn.f2_ampa = 0
syn.tau1_nmda = 1.902
syn.tau2_nmda = 82.032
syn.f1_nmda = 7.853857483005277e-05
elif "fit" in str(self.tau1_ex[nt]):
syn = h.ExpGrcSyn(self.cells[nt][i].soma(0.5))
syn.tau1_ampa = 0.254
syn.tau2_ampa = 0.254
syn.tau3_ampa = 0.363
syn.tau4_ampa = 6.523
syn.f1_ampa = 8.8376e-05
syn.f2_ampa = 5.5257e-05
syn.tau1_nmda = 1.902
syn.tau2_nmda = 82.032
syn.f1_nmda = 7.853857483005277e-05
else:
tau1 = self.tau1_ex[nt]
tau2 = self.tau2_ex[nt]
if tau1 == 0:
syn = h.ExpSyn(self.cells[nt][i].soma(0.5))
syn.tau = tau2/ms
else:
syn = h.Exp2Syn(self.cells[nt][i].soma(0.5))
syn.tau1 = tau1/ms
syn.tau2 = tau2/ms
syn.e = 0/mV
self.cells[nt][i].MF_L.append(syn)
e0 = len(self.cells[nt][i].MF_L)-1 # get number of current synapse!
syn_idx = int(e0)
source = int(pulse_gid)
self.cells[nt][i].mfncpc.append(self.pc.gid_connect(source, self.cells[nt][i].MF_L[syn_idx]))
self.cells[nt][i].mfncpc[-1].delay = 1
self.cells[nt][i].mfncpc[-1].weight[0] = w
if 'gsyn_in' in self.method_interpol:
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].MF_L[-1]._ref_g)
self.gsyn_in_fac.append(f)
else:
nrel = 0
if "stoch" in str(self.tau1_ex[nt]):
nrel = 4
self.cells[nt][i].createsyn(nmf = 1, ngoc = 0, weight_gmax = w, nrel=nrel)
if "ampa" in str(self.tau1_ex[nt]):
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].gmax_factor = 0
if "nopre" in str(self.tau1_ex[nt]):
print "- no pre"
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_rec = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_facil = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_1 = 0
if "nostdampa" in str(self.tau1_ex[nt]):
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].gmax_factor = 0
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_rec = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_facil = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_1 = 0
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].r6FIX = 0
if "nostdnmda" in str(self.tau1_ex[nt]):
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].gmax_factor = 0
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_rec = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_facil = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_1 = 0
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].RdRate = 0
if "nmda" in str(self.tau1_ex[nt]):
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].gmax_factor = 0
if "nopre" in str(self.tau1_ex[nt]):
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_rec = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_facil = 1e-9
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_1 = 0
if "nostdgr" in str(self.tau1_ex[nt]):
self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].r6FIX = 0 #1.12
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].RdRate = 0 #12e-3
print "- no std"
if "nomggr" in str(self.tau1_ex[nt]):
self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].v0_block = -1e9
print "- no mg block"
e0 = len(self.cells[nt][i].MF_L)-1 # get number of current synapse!
self.cells[nt][i].pconnect(self.pc,pulse_gid,int(e0),'mf')
if 'gsyn_in' in self.method_interpol:
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0]._ref_g)
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0]._ref_g)
self.gsyn_in_fac.append(f)
self.gsyn_in_fac.append(f)
elif self.celltype[nt] == 'Goc':
# delete old
if j == 0:
self.cells[nt][i].MF_L = []
self.cells[nt][i].mfncpc = []
if "go" not in str(self.tau1_ex[nt]):
tau1 = self.tau1_ex[nt]
tau2 = self.tau2_ex[nt]
if tau1 == 0:
syn = h.ExpSyn(self.cells[nt][i].soma(0.5))
syn.tau = tau2/ms
else:
syn = h.Exp2Syn(self.cells[nt][i].soma(0.5))
syn.tau1 = tau1/ms
syn.tau2 = tau2/ms
syn.e = 0/mV
self.cells[nt][i].MF_L.append(syn)
e0 = len(self.cells[nt][i].MF_L)-1 # get number of current synapse!
syn_idx = int(e0)
source = int(pulse_gid)
self.cells[nt][i].mfncpc.append(self.pc.gid_connect(source, self.cells[nt][i].MF_L[syn_idx]))
self.cells[nt][i].mfncpc[-1].delay = 1
self.cells[nt][i].mfncpc[-1].weight[0] = w
if 'gsyn_in' in self.method_interpol:
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].MF_L[-1]._ref_g)
self.gsyn_in_fac.append(f)
else:
nrel = 0
mg = self.mglufac_ex[0]
if self.mglufac_ex[1] > 0:
mg = np.random.normal(self.mglufac_ex[0], self.mglufac_ex[1]*self.mglufac_ex[0], 1).clip(min=0) # self.g_syn_ex_s[nt]
if "stoch" in str(self.tau1_ex[nt]):
nrel = 4
self.cells[nt][i].createsyn(nmf = 1, weight_gmax = w, nrel=nrel, mglufac = mg)
e0 = len(self.cells[nt][i].MF_L)-1 # get number of current synapse!
self.cells[nt][i].pconnect(self.pc,pulse_gid,int(e0),'mf')
if 'gsyn_in' in self.method_interpol:
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0]._ref_g)
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0]._ref_g)
self.gsyn_in_fac.append(f)
self.gsyn_in_fac.append(f)
elif self.celltype[nt] == 'IfCell':
# delete old
if j == 0:
self.cells[nt][i].synlist = []
self.cells[nt][i].nc = []
if "gr" in str(self.tau1_ex[nt]):
self.cells[nt][i].whatami = "grc"
nrel = 0
if "stoch" in str(self.tau1_ex[nt]):
nrel = 4
self.cells[nt][i].MF_L = self.cells[nt][i].synlist
self.cells[nt][i].synlist.append(Synapse('glom', self.cells[nt][i], self.cells[nt][i].soma, nrel=nrel, record_all=0, weight_gmax = w))
if "ampa" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].gmax_factor = 0
if "nopre" in str(self.tau1_ex[nt]):
print "- no pre"
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_rec = 1e-9
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_facil = 1e-9
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_1 = 0
if "nmda" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].gmax_factor = 0
if "nopre" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_rec = 1e-9
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_facil = 1e-9
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_1 = 0
if "nostdampa" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_rec = 1e-9
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_facil = 1e-9
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_1 = 0
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].r6FIX = 0 #1.12
if "nostdnmda" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_rec = 1e-9
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_facil = 1e-9
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_1 = 0
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].RdRate = 0
if "nostdgr" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].r6FIX = 0 #1.12
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].RdRate = 0 #12e-3
print "- no std"
if "nomggr" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].v0_block = -1e9 #.k_block = 1e-9
print "- no mg block"
e0 = len(self.cells[nt][i].synlist)-1
syn_idx = int(e0)
source = int(pulse_gid)
self.cells[nt][i].nc.append(self.pc.gid_connect(source, self.cells[nt][i].synlist[syn_idx].input))
self.cells[nt][i].nc[-1].delay = 1
self.cells[nt][i].nc[-1].weight[0] = 1
if 'gsyn_in' in self.method_interpol:
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].synlist[syn_idx].postsyns['AMPA'][0]._ref_g)
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].synlist[syn_idx].postsyns['NMDA'][0]._ref_g)
self.gsyn_in_fac.append(f)
self.gsyn_in_fac.append(f)
else:
if "amfit" in str(self.tau1_ex):
syn = h.ExpGrcSyn(self.cells[nt][i].soma(0.5))
syn.tau1_ampa = 0.254
syn.tau2_ampa = 0.254
syn.tau3_ampa = 0.363
syn.tau4_ampa = 6.523
syn.f1_ampa = 8.8376e-05
syn.f2_ampa = 5.5257e-05
syn.f1_nmda = 0
self.cells[nt][i].synlist.append(syn) # synlist is defined in Cell
elif "nmfit" in str(self.tau1_ex):
syn = h.ExpGrcSyn(self.cells[nt][i].soma(0.5))
syn.f1_ampa = 0
syn.f2_ampa = 0
syn.tau1_nmda = 1.902
syn.tau2_nmda = 82.032
syn.f1_nmda = 7.853857483005277e-05
self.cells[nt][i].synlist.append(syn) # synlist is defined in Cell
elif "fit" in str(self.tau1_ex):
syn = h.ExpGrcSyn(self.cells[nt][i].soma(0.5))
syn.tau1_ampa = 0.254
syn.tau2_ampa = 0.254
syn.tau3_ampa = 0.363
syn.tau4_ampa = 6.523
syn.f1_ampa = 8.8376e-05
syn.f2_ampa = 5.5257e-05
syn.tau1_nmda = 1.902
syn.tau2_nmda = 82.032
syn.f1_nmda = 7.853857483005277e-05
self.cells[nt][i].synlist.append(syn) # synlist is defined in Cell
else:
self.cells[nt][i].create_synapses(n_ex=1, tau1=self.tau1_ex[nt], tau2=self.tau2_ex[nt])
e0 = len(self.cells[nt][i].synlist)-1
syn_idx = int(e0)
self.cells[nt][i].pconnect_target(self.pc, source=pulse_gid, target=int(e0), syntype='ex', weight=w, delay=1)
if 'gsyn_in' in self.method_interpol:
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].synlist[syn_idx]._ref_g)
self.gsyn_in_fac.append(f)
elif self.celltype[nt] == 'Prk':
# delete old
if j == 0:
self.cells[nt][i].PF_Lsync = []
self.cells[nt][i].spk_nc_pfsync = []
self.cells[nt][i].pfrand = []
m = len(self.cells[nt][i].dendrange)
seed = int(4*gid)
np.random.seed(seed)
for k in xrange(nsyn):
m -= 1
mi = np.random.randint(0, m)
self.cells[nt][i].dendrange[mi], self.cells[nt][i].dendrange[m] = self.cells[nt][i].dendrange[m], self.cells[nt][i].dendrange[mi]
self.cells[nt][i].pfrand.append(self.cells[nt][i].dendrange[m])
#print self.cells[nt][i].pfrand
if "prk" not in str(self.tau1_ex[nt]):
pass
else:
self.cells[nt][i].PF_Lsync.append(Synapse2('pf',self.cells[nt][i],self.cells[nt][i].pfrand[j],record_all=0))
e0 = len(self.cells[nt][i].PF_Lsync)-1 # get number of current synapse!
syn_idx = int(e0)
self.cells[nt][i].spk_nc_pfsync.append(self.pc.gid_connect(pulse_gid, self.cells[nt][i].PF_Lsync[syn_idx].input.newnetstim))
self.cells[nt][i].spk_nc_pfsync[-1].delay = 1
self.cells[nt][i].spk_nc_pfsync[-1].weight[0] = 1
if 'gsyn_in' in self.method_interpol:
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].PF_Lsync[-1].postsyns['AMPA'][0]._ref_g)
self.gsyn_in_fac.append(f)
elif syntype == "inh":
w = self.g_syn_inh[nt]
seed = int(10000 + 10*gid + j)
np.random.seed(seed*42)
if self.g_syn_inh_s[nt] > 0:
w = np.random.normal(w, w*self.g_syn_inh_s[nt], 1).clip(min=w*0.1) # self.g_syn_inh_s[nt]
if self.celltype[nt] == 'Grc':
if j == 0:
self.cells[nt][i].GOC_L = []
self.cells[nt][i].gocncpc = []
if "gr" not in str(self.tau1_inh[nt]):
tau1 = self.tau1_inh[nt]
tau2 = self.tau2_inh[nt]
if tau1 == 0:
syn = h.ExpSyn(self.cells[nt][i].soma(0.5))
syn.tau = tau2/ms
else:
syn = h.Exp2Syn(self.cells[nt][i].soma(0.5))
syn.tau1 = tau1/ms
syn.tau2 = tau2/ms
syn.e = -65
self.cells[nt][i].GOC_L.append(syn)
i0 = len(self.cells[nt][i].GOC_L)-1 # get number of current synapse!
syn_idx = int(i0)
source = int(pulse_gid)
self.cells[nt][i].gocncpc.append(self.pc.gid_connect(source, self.cells[nt][i].GOC_L[syn_idx]))
self.cells[nt][i].gocncpc[-1].delay = 1
self.cells[nt][i].gocncpc[-1].weight[0] = w
else:
self.cells[nt][i].createsyn(nmf = 0, ngoc = 1, weight_gmax = w)
i0 = len(self.cells[nt][i].GOC_L)-1 # get number of current synapse!
self.cells[nt][i].pconnect(self.pc,pulse_gid,int(i0),'goc')
if self.celltype[nt] == 'IfCell':
if j == 0:
self.cells[nt][i].synlist_inh = []
self.cells[nt][i].nc_inh = []
if "gr" in str(self.tau1_inh[nt]):
nrel = 0
if "stoch" in str(self.tau1_ex[nt]):
nrel = 4
self.cells[nt][i].GOC_L = self.cells[nt][i].synlist
self.cells[nt][i].whatami = "grc"
self.cells[nt][i].synlist_inh.append(Synapse('goc', self.cells[nt][i], self.cells[nt][i].soma, nrel=nrel, record_all=0, weight_gmax = w))
i0 = len(self.cells[nt][i].synlist_inh)-1
syn_idx = int(i0)
source = int(pulse_gid)
self.cells[nt][i].nc_inh.append(self.pc.gid_connect(source, self.cells[nt][i].synlist_inh[syn_idx].input))
self.cells[nt][i].nc_inh[-1].delay = 1
self.cells[nt][i].nc_inh[-1].weight[0] = 1
if "gaba" in str(self.tau1_ex[nt]):
if 'gsyn_in' in self.method_interpol:
if "nostdgaba" in str(self.tau1_ex[nt]):
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].tau_rec = 1e-9
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].tau_facil = 1e-9
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].tau_1 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d3 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d1d2 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d1 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d2 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d3_a6 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d1d2_a6 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d1_a6 = 0
self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d2_a6 = 0
self.record_syn.append(h.Vector())
self.record_syn[-1].record(self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0]._ref_g)
self.gsyn_in_fac.append(f)
else:
self.cells[nt][i].create_synapses(n_inh=1, tau1_inh=self.tau1_inh[nt], tau2_inh=self.tau2_inh[nt], e_inh=-65)
i0 = len(self.cells[nt][i].synlist_inh)-1
syn_idx = int(i0)
self.cells[nt][i].pconnect_target(self.pc, source=pulse_gid, target=int(i0), syntype='inh', weight=w, delay=1)
elif syntype == "intr":
if self.celltype[nt] == 'Prk':
pass
def set_SynPlay(self, farray, tarray, N = [], t_startstop = [], amode = 1):
if self.do_run:
delay = 1
if (self.use_pc is False):
delay = 0.1
if N == []:
N = self.N
self.pulse_list = []
self.global_pulse_list = []
self.global_pulse_list_inh = []
self.global_pulse_list_intr = []
f_cells_mean_local = []
f_cells_cv_local = []
f_cells_std_local = []
for nt in range(self.n_celltypes): # loop over all cells
if (self.n_syn_ex[nt] > 0) or (self.n_syn_inh[nt] > 0) or (self.n_syn_intr[nt] > 0):
local_gid_count = 0
local_gid_count_type = []
# EXCITATION
if str(type(self.g_syn_ex[nt] )) is not ndarray: self.g_syn_ex[nt] = np.array([self.g_syn_ex[nt] ]) # each cell can receive different g_syn_ex !
if len(self.g_syn_ex[nt]) == len(self.global_gidlist[nt]):
pass
else:
self.g_syn_ex[nt] = np.ones(len(self.global_gidlist[nt]))*self.g_syn_ex[nt][0]
#print "- single value in g_syn_ex, cells:", len(self.global_gidlist[nt])
self.global_pulse_list.append([])
for ns in range(self.n_syn_ex[nt]): # loop over all excitatory synapses!
self.global_pulse_list[-1].append([])
for n in range(self.syn_max_mf[nt]): # number of cells of this celltype
self.global_pulse_list[-1][-1].append(local_gid_count+self.gid_count)
local_gid_count += 1
local_gid_count_type.append([])
local_gid_count_type[-1].append('ex')
local_gid_count_type[-1].append(n) # number of cell within their population 0..N[nt]
local_gid_count_type[-1].append(ns) # number of synapse
# INHIBITION
if np.array(self.inh_hold[nt]).size <= 1:
self.inh_hold[nt] = np.ones(len(self.global_gidlist[nt]))*self.inh_hold[nt]
#print "- single value in inh_hold", self.inh_hold[nt]
self.global_pulse_list_inh.append([])
for ns in range(self.n_syn_inh[nt]): # loop over all inhibitory synapses!
self.global_pulse_list_inh[-1].append([])
for n in range(self.syn_max_inh[nt]): # number of cells of this celltype
self.global_pulse_list_inh[-1][-1].append(local_gid_count+self.gid_count)
local_gid_count += 1
local_gid_count_type.append([])
local_gid_count_type[-1].append('inh')
local_gid_count_type[-1].append(n) # number of cell within their population 0..N[nt]
local_gid_count_type[-1].append(ns) # number of synapse
# INTRUDER SYNAPSE
if str(type(self.g_syn_intr[nt] )) is not ndarray: self.g_syn_intr[nt] = np.array([self.g_syn_intr[nt] ]) # each cell can receive different g_syn_intr !
if len(self.g_syn_intr[nt]) == len(self.global_gidlist[nt]):
pass
else:
self.g_syn_intr[nt] = np.ones(len(self.global_gidlist[nt]))*self.g_syn_intr[nt][0]
#print "- single value in g_syn_intr, cells:", len(self.global_gidlist[nt])
self.global_pulse_list_intr.append([])
for ns in range(self.n_syn_intr[nt]): # loop over all intruding synapses!
self.global_pulse_list_intr[-1].append([])
for n in range(self.syn_max_intr[nt]): # number of generators for this celltype
self.global_pulse_list_intr[-1][-1].append(local_gid_count+self.gid_count)
local_gid_count += 1
local_gid_count_type.append([])
local_gid_count_type[-1].append('intr')
local_gid_count_type[-1].append(n) # number of cell within their population 0..N[nt]
local_gid_count_type[-1].append(ns) # number of synapse
t_vec_input = np.array([]) # input trains
id_vec_input = np.array([]) # input trains id
fs = 1 / self.dt
ih_use_v = []
for i in range(int(self.id), local_gid_count, int(self.nhost)): # loop over all train generators and generate them
self.pulse_list.append(i+self.gid_count)
pulse_gid = self.pulse_list[-1]
gid = local_gid_count_type[i][1] # should correspond to this gid when multiple values inserted
if local_gid_count_type[i][0] == 'ex':
seed = int(10001 + pulse_gid) # unique gid for generators!
np.random.seed(seed*423)
if self.ihold_sigma[nt] > 0:
ih_use = np.random.normal(self.ihold[nt], self.ihold[nt]*self.ihold_sigma[nt], 1).clip(min=0) # self.ihold[nt]*self.ihold_sigma[nt]
elif self.ihold_sigma[nt] < 0:
ih_use = np.random.uniform(0.1, self.ihold[nt])
else:
ih_use = self.ihold[nt]
ih_use_v.append(ih_use)
if ih_use > 0:
# train has to be contructed here, to insert different train into each "dendrite"
## different ihold has to be implemented here!!
iholdvec = concatenate((zeros(round(fs)), ones(round(len(tarray) - 1 * fs)) * ih_use))
if isinstance(self.syn_ex_dist[nt], ( tuple ) ): # distribution of amplitude, only one noise source!
np.random.seed(pulse_gid*40)
if self.syn_ex_dist[nt][1] > 0:
f = np.random.normal(self.syn_ex_dist[nt][0], self.syn_ex_dist[nt][1], 1).clip(min=0)
else:
f = self.syn_ex_dist[nt][0]
f2 = f
rnd.seed(pulse_gid*40) # use gid so type 1, 2 is identical for each cell
#rnd.seed(gid*40) # use gid so type 1, 2 is identical for each cell
if self.syn_ex_dist[nt][2] > 0: # add inverted input with 50% probability, in future versions this will indicate the propability for -1 and 1
f2 = rnd.sample([-1,1],1)[0] * f
#f2 = f
if amode == 1:
inamp = (f2 * self.amod[nt] * ih_use)
elif amode == 2:
inamp = (f2 * self.amod[nt] * self.ihold[nt])
modulation = (tarray, inamp * farray[0] + iholdvec)
#if self.id == 0: print "- randomize play stimulus height, pulse_gid=", pulse_gid, " gid=", gid ," f=", f
if (gid==0): print "- randomize play stimulus height, pulse_gid=", pulse_gid, " gid=", gid ," f2=", f2,"inamp=",inamp
#rnd.seed(local_gid_count_type[i][1]*300) # pick seed based on number of cell
#nj = rnd.sample(range(len(farray)),1)[0]
nj = 1
else: # different noise sources can be used at different synapses, linear combination test in openloop
nj = self.syn_ex_dist[nt][local_gid_count_type[i][2]]
if nj == 0:
modulation = (tarray, iholdvec)
else:
if amode == 1:
inamp = (self.factor_celltype[nt] * self.amod[nt] * ih_use)
elif amode == 2:
inamp = (self.factor_celltype[nt] * self.amod[nt] * self.ihold[nt])
modulation = (tarray, inamp * farray[nj-1] + iholdvec)
if self.id == 0: print "ex farray number:", nj-1, "ih_use:", ih_use, "self.amod[nt]:", self.amod[nt], "inamp: ", inamp
# will be done n_syn_ex * number of cells!
if self.noise_syn_tau[nt] < 0: # variable threshold
no = self.noise_syn[nt]
else:
no = self.noise_syn[nt]*ih_use
train, self.n_train_ex = mod_spike_train(modulation, noise = no, seed = seed, noise_tau = self.noise_syn_tau[nt], noise_a = self.noise_a[nt])
#plt.figure("input")
#plt.plot(train, train*0, '|')
#plt.show()
t_vec_input = np.append(t_vec_input, train*ms).flatten() # use ms to save!!
id_vec_input = np.append(id_vec_input, np.ones(len(train))*pulse_gid).flatten()
f_cells_mean_local0, f_cells_cv_local0, f_cells_std_local0 = self.calc_fmean(train*ms, t_startstop)
f_cells_mean_local.append(f_cells_mean_local0); f_cells_cv_local.append(f_cells_cv_local0); f_cells_std_local.append(f_cells_std_local0)
if self.id == 0: print "TRAIN: requ. mean:", ih_use ,"eff. mean:", f_cells_mean_local0, "cv: " , f_cells_cv_local0, "std:" , f_cells_std_local0
else:
train = []
self.n_train_ex = []
elif local_gid_count_type[i][0] == 'intr':
# train has to be contructed here, to insert different train into each "dendrite"
nj = 0
seed = int(10001 + pulse_gid)
np.random.seed(seed*4411)
if self.intr_hold_sigma[nt] > 0:
ih_use = np.random.normal(self.intr_hold[nt], self.intr_hold[nt]*self.intr_hold_sigma[nt], 1).clip(min=0)
else:
ih_use = self.intr_hold[nt]
ih_use_v.append(ih_use)
if ih_use > 0:
iholdvec = concatenate((zeros(round(fs)), ones(round(len(tarray) - 1 * fs)) * ih_use))
modulation = (tarray, iholdvec)
# will be done n_syn_in * number of cells!
if self.noise_syn_tau_intr[nt] < 0: # variable threshold
no = self.noise_syn_intr[nt]
else:
no = self.noise_syn_intr[nt]*ih_use
if self.noise_syn_tau_intr[nt] >= -1:
train, _ = mod_spike_train(modulation, noise = no, seed = seed, noise_tau = self.noise_syn_tau_intr[nt], noise_a = self.noise_a_intr[nt]) # train in ms
else:
train = oscill_spike_train(sor = 4, spike_prob = 1/4, noise_fraction = 4, end_time = tarray[-1]/ms, seed = seed)
elif local_gid_count_type[i][0] == 'inh':
# train has to be contructed here, to insert different train into each "dendrite"
seed = int(10001 + pulse_gid)
np.random.seed(seed*44)
if self.inh_hold_sigma[nt] > 0:
ih_use = np.random.normal(self.inh_hold[nt][gid], self.inh_hold[nt][gid]*self.inh_hold_sigma[nt], 1).clip(min=0)
else:
ih_use = self.inh_hold[nt][gid]
iholdvec = concatenate((zeros(round(fs)), ones(round(len(tarray) - 1 * fs)) * ih_use))
nj = self.syn_inh_dist[nt][local_gid_count_type[i][2]]
if nj == 0:
modulation = (tarray, iholdvec)
else:
inamp = (self.amod[nt] * ih_use)
modulation = (tarray, inamp * farray[nj-1] + iholdvec)
#print "inh farray number:", nj-1, "ih_use:", ih_use, "amp: ", inamp #old: nj-1+nemax
# will be done n_syn_in * number of cells!
if self.noise_syn_tau_inh[nt] < 0: # variable threshold
no = self.noise_syn_inh[nt]
else:
no = self.noise_syn_inh[nt]*ih_use
train, _ = mod_spike_train(modulation, noise = no, seed = seed, noise_tau = self.noise_syn_tau_inh[nt], noise_a = self.noise_a_inh[nt]) # train in ms
#print train
#print train
if len(train) > 0:
if self.id == 0:
print "-", pulse_gid, local_gid_count_type[i], "seed: ", seed, "ih_use:", ih_use, no, nj #, "first spike: ", train[0]
self.setup_Play_train(train = train+self.inh_delay, input_gid = pulse_gid, delay = delay) # train in ms
self.gid_count += local_gid_count # increase gid count
self.barrier()
for i, gid in enumerate(self.gidlist[nt]): # for all input cells
rnd.seed(gid*200)
n = self.global_gidlist[nt].index(gid) # index of cell within their population 0..N[nt]
# i is index on this node only!
self.record_syn = []
for j in range(self.n_syn_ex[nt]):
if N[nt] == len(self.global_pulse_list[nt][j]):
pulse_gid = self.global_pulse_list[nt][j][n] #every cell of this type receives one pulse gid
if self.id == 0: print "- gid:", gid ," n:", n ," one ex train for each synapse:", pulse_gid, "self.g_syn_ex[nt][n]:", self.g_syn_ex[nt][n]
else:
pulse_gid = rnd.sample(self.global_pulse_list[nt][j],1)[0] # not enough, just pick one at random, for inh/f search only one synapse available!
if self.id == 0: print "- gid:", gid ," n:", n ," one ex train from", len(self.global_pulse_list[nt][j]), ":", pulse_gid, "self.g_syn_ex[nt][n]:", self.g_syn_ex[nt][n]
if "gaba" in str(self.tau1_ex[nt]):
self.connect_Synapse(pulse_gid, nt, i, n, gid, j, syntype = "inh")
else:
self.connect_Synapse(pulse_gid, nt, i, n, gid, j, syntype = "ex", nsyn = self.n_syn_ex[nt])
if self.n_syn_inh[nt] > 0:
for j in range(self.n_syn_inh[nt]):
if N[nt] == len(self.global_pulse_list_inh[nt][j]):
pulse_gid = self.global_pulse_list_inh[nt][j][n] #every cell of this type receives one pulse gid
if self.id == 0: print "- one inh train for each synapse:", pulse_gid
else:
pulse_gid = rnd.sample(self.global_pulse_list_inh[nt][j],1)[0] # not enough, just pick one at random
if self.id == 0: print "- one inh train from", len(self.global_pulse_list_inh[nt][j]), ":", pulse_gid
self.connect_Synapse(pulse_gid, nt, i, n, gid, j, syntype = "inh")
if self.n_syn_intr[nt] > 0:
for j in range(self.n_syn_intr[nt]):
if N[nt] == len(self.global_pulse_list_intr[nt][j]):
pulse_gid = self.global_pulse_list_intr[nt][j][n] #every cell of this type receives one pulse gid
if self.id == 0: print "- one intruding train for each synapse:", pulse_gid
else:
pulse_gid = rnd.sample(self.global_pulse_list_intr[nt][j],1)[0] # not enough, just pick one at random
if self.id == 0: print "- one intruding train from", len(self.global_pulse_list_intr[nt][j]), ":", pulse_gid
if (self.use_pc is False):
if self.celltype[nt] == 'Prk': self.cells[nt][i].delrerun()
(msg,CF_input) = self.cells[nt][i].createsyn_CF(record_all=0,factor=self.g_syn_intr[nt][0],cf_setup_select='old')
CF_input.number = 3 # three bursts
CF_input.start = -0.3 # See synapsepfpurk.py
CF_input.interval = 3 # 3 ms interval between bursts
self.cells[nt][i].input_to_CF_nc.append(h.NetCon(self.vecstim[j], CF_input, 0, 0.1, 1))
self.netcons.append(self.cells[nt][i].input_to_CF_nc[-1])
else:
print "NOT IMPLEMENTED"
if self.id == 0: print "trains connected"
if local_gid_count_type[i][0] == 'intr':
pass
else:
self.id_all_vec_input.append(self.do_gather(id_vec_input, dtype = 'i'))
self.t_all_vec_input.append(self.do_gather(t_vec_input))
f_cells_mean = self.do_gather(f_cells_mean_local)
f_cells_cv = self.do_gather(f_cells_cv_local)
f_cells_std = self.do_gather(f_cells_std_local)
self.fmean_input = np.nan
self.fmax_input = np.nan
self.fmstd_input = np.nan
self.fcvm_input = np.nan
self.fstdm_input = np.nan
ih_use_v_all = self.do_gather(ih_use_v)
if self.id == 0 and local_gid_count_type[i][0] != 'intr':
self.fmean_input = mean(np.nan_to_num(f_cells_mean)) # compute mean of mean rate for all cells
self.fmstd_input = std(np.nan_to_num(f_cells_mean))
self.fmax_input = max(np.nan_to_num(f_cells_mean))
self.fcvm_input = mean(f_cells_cv[~np.isnan(f_cells_cv)])
self.fstdm_input = mean(f_cells_std[~np.isnan(f_cells_std)])
self.ih_use_max = max(ih_use_v_all)
print "- trains, fmean: ",self.fmean_input, "fmax: ",self.fmax_input, "Hz", "fmstd: ",self.fmstd_input, "Hz", "fcvm: ",self.fcvm_input, "fstdm: ",self.fstdm_input, "Hz, ih_use_max:", self.ih_use_max
else:
self.global_pulse_list.append([])
self.global_pulse_list_inh.append([])
def do_gather(self, v_local, dtype = 'd'):
if self.use_mpi:
self.barrier()
#v_local = v_local.astype(dtype).flatten()
v_local = np.array(v_local, dtype=dtype).flatten()
if self.use_pc == False:
v_global = None
counts_local = np.array(len(v_local), dtype='i')
counts = 0
if self.id == 0:
counts = np.empty(self.nhost, dtype='i')
self.comm.Gather(sendbuf=[counts_local, MPI.INT], recvbuf=[counts, MPI.INT], root=0)
if self.id == 0:
v_global = np.empty(sum(counts), dtype=dtype)
if dtype == 'd':
self.comm.Gatherv(sendbuf=[v_local, MPI.DOUBLE], recvbuf=[v_global, (counts, None), MPI.DOUBLE], root=0)
elif dtype == 'i':
self.comm.Gatherv(sendbuf=[v_local, MPI.INT], recvbuf=[v_global, (counts, None), MPI.INT], root=0)
#v_global = np.hstack(v_global)
else:
sendlist = [None]*self.nhost
sendlist[0] = v_local
getlist = self.pc.py_alltoall(sendlist)
v_global = np.hstack(getlist)
else:
v_global = np.hstack(v_local)
return v_global
def setup_Play_train(self, train = [], input_gid = 0, delay = 1):
self.trains.append(train)
# possibility to play spikes into the cells!
self.vecstim.append(h.VecStim(.5))
self.nc_vecstim.append(h.NetCon(self.vecstim[-1],None))
self.nc_vecstim[-1].delay = delay
self.spike_vec.append(h.Vector(self.trains[-1]))
self.vecstim[-1].play(self.spike_vec[-1])
if (self.use_mpi):
self.pc.set_gid2node(input_gid, self.id) # associate gid with this host
self.pc.cell(input_gid,self.nc_vecstim[-1]) # associate gid with spike detector
def record(self):
"""
Initializes recording vectors. Internal function
"""
if self.n_celltypes > 1:
#print "self.n_borders:",self.n_borders
for n in range(self.n_celltypes):
if self.n_borders[n] in self.gidlist[n]:
#print "np.shape(self.rec_v):",np.shape(self.rec_v)
#print "np.shape(self.cells):",np.shape(self.cells)
self.rec_v[n].record(self.cells[n][0].soma(0.5)._ref_v)
if self.id == 0: # only for first node and first cell
# Voltage
self.rec_v[0].record(self.cells[self.a_celltype[0]][0].soma(0.5)._ref_v)
# Stimuli
self.rec_i = h.Vector()
if (self.plays != []):
if (isinstance(self.plays[0], list) is False):
self.rec_i.record(self.plays[0]._ref_i)
else:
self.rec_i.record(self.plays[0][0]._ref_i)
self.rec_ich = h.Vector()
if self.ic_holds != [] and (isinstance(self.ic_holds[0], list) is False):
self.rec_ich.record(self.ic_holds[0]._ref_i)
self.rec_ics = h.Vector()
if self.ic_starts != []:
self.rec_ics.record(self.ic_starts[0]._ref_i)
self.rec_n = h.Vector()
if self.fluct_s[0] > 0:
# Fluctuating input
self.rec_n.record(self.flucts[0]._ref_i)
print "recording noise"
elif (len(self.flucts) > 0) and (len(self.fluct_g_i0)>0):
self.rec_n.record(self.flucts[0]._ref_g_i)
print "recording g noise"
else:
print "nonoise"
if hasattr(self.cells[self.a_celltype[0]][0], 'lkg2_noise'):
if self.cells[self.a_celltype[0]][0].lkg2_noise > 0:
self.rec_n.record(self.cells[self.a_celltype[0]][0].fluct._ref_il)
print "recording tonic gaba noise"
self.rec_step = h.Vector()
if self.ic_steps != []:
self.rec_step.record(self.ic_steps[0]._ref_i)
# Time
self.rec_t = h.Vector()
self.rec_t.record(h._ref_t)
def run(self, tstop = 10*s, do_loadstate = True):
"""
Starts the stimulation.
"""
self.record()
if self.first_run:
if self.use_mpi: self.pc.set_maxstep(100)
#self.pc.spike_compress(1) #test
if self.use_multisplit:
import multiprocessing
Hines = h.CVode()
Hines.active(0)
h.load_file("parcom.hoc")
p = h.ParallelComputeTool()
if self.use_mpi:
cpus = multiprocessing.cpu_count() #32 #self.pc.nhost()
else:
cpus = multiprocessing.cpu_count() #32
p.change_nthread(cpus,1)
p.multisplit(1)
print "Using multisplit, cpus:", cpus
else:
h.load_file("stdrun.hoc")
if self.use_local_dt:
h.cvode.active(1)
h.cvode.use_local_dt(1)
h.celsius = self.temperature
h.dt = self.dt/ms # Fixed dt
h.steps_per_ms = 1 / (self.dt/ms)
if self.cells[self.a_celltype[0]] != []:
if hasattr(self.cells[self.a_celltype[0]][0], 'v_init'):
h.v_init = self.cells[self.a_celltype[0]][0].v_init # v_init is supplied by cell itself!
else:
h.v_init = -60
h.stdinit()
h.finitialize()
if hasattr(self.cells[self.a_celltype[0]][0], 'load_states') and do_loadstate:
m = md5.new()
cell_exe_new = self.cell_exe[0]
m.update(cell_exe_new)
filename = './states_' + self.celltype[0] + '_' + m.hexdigest() + '_Population.b'
self.cells[self.a_celltype[0]][0].load_states(filename)
else:
pass
if self.id == 0:
import time
t0 = time.time()
if self.simstep == 0:
if self.id == 0: print "Running without steps",
if self.use_mpi:
self.pc.psolve(tstop/ms)
else:
h.init()
h.tstop = tstop/ms
h.run()
else:
h.finitialize()
cnt = 1
#if self.id == 50:
# print len(self.cells[1][0].nc), self.cells[1][0].nc[0].weight[0]
# print len(self.cells[0][0].nc_inh), self.cells[0][0].nc_inh[0].weight[0]
h.t = 0
while h.t < tstop/ms:
if self.id == 0:
print "Running...",
if self.use_mpi:
past_time = self.pc.time()
h.continuerun(cnt*self.simstep/ms)
if self.use_mpi: self.pc.barrier()
if self.id == 0:
if self.use_mpi:
print "Simulated time =",h.t*ms, "s, Real time = ", (self.pc.time()-past_time), 's'
else:
print "Simulated time =",h.t*ms, "s"
#if self.id == 0:
# print hpy.heap().byrcs
cnt += 1
if self.id == 0: print "psolve took ", time.time() - t0, "seconds"
self.first_run = False
self.barrier() # wait for other nodes
self.tstop = tstop
def get(self, t_startstop=[], i_startstop=[], N = []):
"""
Gets the recordings.
"""
if N == []:
N = self.N
if t_startstop == []:
t_startstop = np.array([2, self.tstop])
t_all_vec = []
id_all_vec = []
fmean = []
fbase = []
fmax = []
fmstd = []
fcvm = []
fstdm = []
gid_del = []
f_cells_mean_all = []
f_cells_base_all = []
f_cells_cv_all = []
f_cells_std_all = []
fmeanA = []
fmstdA = []
fmaxA = []
fcvmA = []
fstdmA = []
fbaseA = []
fbstdA = []
if self.id == 0: print "start gathering spikes"
for n in range(self.n_celltypes):
if self.use_mpi:
self.barrier() # wait for other node
t_vec = np.array(self.t_vec[n]).flatten()*ms - 1*ms # shift time because of output delay
id_vec = np.array(self.id_vec[n]).flatten()
else:
t_vec = np.array([])
id_vec = np.array([])
print np.shape(self.t_vec)
for i in self.gidlist[n]:
t_vec0 = np.array(self.t_vec[n][i]).flatten()*ms
t_vec = np.append(t_vec, t_vec0).flatten()
id_vec = np.append(id_vec, np.ones(len(t_vec0))*i).flatten()
fmean0, fmax0, fmstd0, fcvm0, fstdm0, gid_del0, f_cells_mean_all0, f_cells_cv_all0, f_cells_std_all0, fbase0, f_cells_base_all0 = self.get_fmean(t_vec, id_vec, t_startstop = t_startstop, gidlist = self.gidlist[n])
fmean.append(fmean0); fmax.append(fmax0), fmstd.append(fmstd0), fcvm.append(fcvm0), fstdm.append(fstdm0), gid_del.append(gid_del0), f_cells_mean_all.append(f_cells_mean_all0), f_cells_cv_all.append(f_cells_cv_all0), f_cells_std_all.append(f_cells_std_all0)
fbase.append(fbase0); f_cells_base_all.append(f_cells_base_all0)
t_all_vec.append(self.do_gather(t_vec))
id_all_vec.append(self.do_gather(id_vec))
if (self.id == 0) and (self.no_fmean == False):
f_cells_mean_all = np.array(f_cells_mean_all).flatten()
fmeanA = mean(f_cells_mean_all) # compute mean of mean rate for all cells
fmstdA = std(f_cells_mean_all)
fmaxA = max(f_cells_mean_all)
f_cells_base_all = np.array(f_cells_base_all).flatten()
fbaseA = mean(f_cells_base_all) # compute mean of mean rate for all cells
fbstdA = std(f_cells_base_all)
f_cells_cv_all = np.concatenate((np.array(f_cells_cv_all)))
f_cells_std_all = np.concatenate((np.array(f_cells_std_all)))
fcvmA = mean(f_cells_cv_all)
fstdmA = mean(f_cells_std_all)
print "- ALL, fmean: ",fmeanA, "fmax: ",fmaxA, "Hz", "fmstd: ",fmstdA, "Hz", "fcvm: ",fcvmA, "fstdm: ",fstdmA, "Hz", "fbase: ",fbaseA, "Hz", "fbstd: ", fbstdA, "Hz"
if self.id == 0: print "all spikes have been gathered"
self.barrier()
# do this here to have something to return
voltage = []
current = []
time = []
freq_times = []
spike_freq = []
gsyn = []
if self.id == 0: # only for first node
time = np.array(self.rec_t)*ms
# use self.bin_width as bin width!
freq_times = arange(0, time[-1], self.bin_width)
voltage.append(np.array(self.rec_v[0])*mV)
current = np.zeros(len(time))
if len(np.array(self.rec_ics)) > 0:
current = current + np.array(self.rec_ics)
if len(np.array(self.rec_ich)) > 0:
current = current + np.array(self.rec_ich)
if len(np.array(self.rec_i)) > 0:
current = current + np.array(self.rec_i)
if len(np.array(self.rec_n)) > 0:
current = current + np.array(self.rec_n)
print np.array(self.rec_n)
if len(np.array(self.rec_step)) > 0:
current = current + np.array(self.rec_step)
else:
time = [0]
self.barrier()
time = self.broadcast(time, fast = True)
gsyn_in = []
gsyn_in0 = []
if 'gsyn_in' in self.method_interpol:
gsyn_in = None
if self.id == 0: print "- collecting gsyn_in"
gsyn_in0 = np.zeros(len(time), dtype='d')
if self.record_syn is not []:
for i, j in enumerate(self.record_syn):
gsyn_in0 = gsyn_in0 + self.gsyn_in_fac[i] * np.array(j, dtype='d')
if self.use_mpi:
count = len(time)
#if self.id == 0: gsyn_in = np.empty(count*self.nhost, dtype='d')
#self.comm.Gatherv(sendbuf=[gsyn_in0, MPI.DOUBLE], recvbuf=[gsyn_in, MPI.DOUBLE], root=0)
gsyn_in = self.do_gather(gsyn_in0)
if self.id == 0:
gsyn_in = np.reshape(gsyn_in, (self.nhost,count))
gsyn_in = sum(gsyn_in,0)
else:
gsyn_in = gsyn_in0
self.barrier() # wait for other nodes
if self.n_celltypes > 1:
if self.id == 0: print "more than one celltype send voltage of first other cell to root"
for n in range(1, self.n_celltypes):
if self.use_pc == True:
srclist = [None]*self.nhost
if (self.n_borders[n] in self.gidlist[n]):
srclist[0] = np.array(self.rec_v[n])*mV
destlist = self.pc.py_alltoall(srclist)
if self.id == 0:
idx = [i for i, x in enumerate(destlist) if x is not None]
if len(idx) > 1: raise ValueError('Error, too many vectors sent, should be one at a time!')
voltage.append(np.array(destlist[idx[0]]))
else:
if self.id == 0:
if (self.n_borders[n] in self.gidlist[n]): # first node has it, do not wait to receive it!
v_temp = np.array(self.rec_v[n])*mV
else:
v_temp = np.zeros(len(voltage[0]))
self.comm.Recv([v_temp, MPI.DOUBLE], source = MPI.ANY_SOURCE, tag=int(sum(N)+33))
voltage.append(v_temp)
else:
if self.n_borders[n] in self.gidlist[n]:
voltage = np.array(self.rec_v[n])*mV
self.comm.Ssend([voltage, MPI.DOUBLE], dest=0, tag=int(sum(N)+33))
self.barrier() # wait for other nodes
times = arange(0, time[-1], 1*ms)
gsyns = []
if self.called_syn_out_all == True:
for n in range(self.n_celltypes):
gsyns.append([])
if self.use_pc == True:
for i, gid in enumerate(self.global_gidlist[n]):
srclist = [None]*self.nhost
if gid in self.gidlist[n]: #only one node does this
a = np.array(self.cells[n][self.gidlist[n].index(gid)].record['gsyn'])
c = np.zeros(int((1*ms)/self.dt))
temp = np.append(a, c).flatten()
temp = temp[int((1*ms)/self.dt):len(temp)+1]
gtemp = interp(times,time,temp)
srclist[0] = gtemp # send to root only
destlist = self.pc.py_alltoall(srclist)
if self.id == 0:
idx = [i for i, x in enumerate(destlist) if x is not None]
if len(idx) > 1: raise ValueError('Error, too many vectors sent, should be one at a time!')
gsyns[n].append(np.array(destlist[idx[0]]))
else:
for i, gid in enumerate(self.global_gidlist[n]):
if self.id == 0:
if gid in self.gidlist[n]:
a = np.array(self.cells[n][self.gidlist[n].index(gid)].record['gsyn'])
c = np.zeros(int((1*ms)/self.dt))
temp = np.append(a, c).flatten()
temp = temp[int((1*ms)/self.dt):len(temp)+1]
gtemp = interp(times,time,temp)
else:
gtemp = np.zeros(len(times))
self.comm.Recv([gtemp, MPI.DOUBLE], source = MPI.ANY_SOURCE, tag=int(gid))
gsyns[n].append(np.array(gtemp))
else:
if gid in self.gidlist[n]:
a = np.array(self.cells[n][self.gidlist[n].index(gid)].record['gsyn'])
c = np.zeros(int((1*ms)/self.dt))
temp = np.append(a, c).flatten()
temp = temp[int((1*ms)/self.dt):len(temp)+1]
gtemp = interp(times,time,temp)
#np.array(self.cells[n][self.gidlist[n].index(gid)].record['gsyn'])
self.comm.Ssend([gtemp, MPI.DOUBLE], dest=0, tag=int(gid))
if self.id == 0: print "root gathered synaptic output conductance"
self.barrier() # wait for other nodes
times = arange(0, time[-1], 10*ms)
w_mat = []
winh_mat = []
if self.stdp_used == True:
for n in range(self.n_celltypes):
w_mat.append([])
for i, gid in enumerate(self.global_gidlist[n]):
if self.id == 0:
wall = []
if gid in self.gidlist[n]:
walltemp = self.cells[n][self.gidlist[n].index(gid)].record['w']
if len(walltemp) > 0:
for l in range(len(walltemp)):
wtemp = np.array(walltemp[l])
wtemp = interp(times,time,wtemp)
wall.append(wtemp)
else:
while 1:
wtemp = np.zeros(len(times))
self.comm.Recv([wtemp, MPI.DOUBLE], source = MPI.ANY_SOURCE, tag=int(gid))
if wtemp[0] == -1:
break
else:
wall.append(wtemp)
w_mat[n].append(wall)
else:
if gid in self.gidlist[n]:
walltemp = self.cells[n][self.gidlist[n].index(gid)].record['w']
if len(walltemp) > 0:
for l in range(len(walltemp)):
wtemp = np.array(walltemp[l])
wtemp = interp(times,time,wtemp)
self.comm.Ssend([wtemp, MPI.DOUBLE], dest=0, tag=int(gid))
wtemp = np.ones(len(times))*-1
self.comm.Ssend([wtemp, MPI.DOUBLE], dest=0, tag=int(gid))
if self.id == 0:
print "root gathered synaptic input conductance"
self.barrier() # wait for other nodes
for n in range(self.n_celltypes):
winh_mat.append([])
for i, gid in enumerate(self.global_gidlist[n]):
if self.id == 0:
wall = []
if gid in self.gidlist[n]:
walltemp = self.cells[n][self.gidlist[n].index(gid)].record['w_inh']
if len(walltemp) > 0:
for l in range(len(walltemp)):
wtemp = np.array(walltemp[l])
wtemp = interp(times,time,wtemp)
wall.append(wtemp)
else:
while 1:
wtemp = np.zeros(len(times))
self.comm.Recv([wtemp, MPI.DOUBLE], source = MPI.ANY_SOURCE, tag=int(gid))
if wtemp[0] == -1:
break
else:
wall.append(wtemp)
winh_mat[n].append(wall)
else:
if gid in self.gidlist[n]:
walltemp = self.cells[n][self.gidlist[n].index(gid)].record['w_inh']
if len(walltemp) > 0:
for l in range(len(walltemp)):
wtemp = np.array(walltemp[l])
wtemp = interp(times,time,wtemp)
self.comm.Ssend([wtemp, MPI.DOUBLE], dest=0, tag=int(gid))
wtemp = np.ones(len(times))*-1
self.comm.Ssend([wtemp, MPI.DOUBLE], dest=0, tag=int(gid))
if self.id == 0:
print "root gathered synaptic input conductance"
self.barrier() # wait for other nodes
t_all_vec_vec = []
id_all_vec_vec = []
f_cells_mean = []
if self.id == 0: # only for first node
for n in range(self.n_celltypes):
ie = argsort(t_all_vec[n])
t_all_vec_vec.append( t_all_vec[n][ie] )
id_all_vec_vec.append( id_all_vec[n][ie].astype(int) ) #
print "all spikes have been sorted"
if self.jitter > 0: # add jitter!
np.random.seed(40)
x = np.random.normal(0, self.jitter, len(t_all_vec_vec[self.a_celltype[0]]))
t_all_vec_vec[self.a_celltype[0]] = t_all_vec_vec[self.a_celltype[0]] + x
if self.delta_t > 0:
t_all_vec_vec[self.a_celltype[0]] = t_all_vec_vec[self.a_celltype[0]] + self.delta_t
gsyn = zeros(len(freq_times))
if 'gsyn_in' in self.method_interpol:
pass
else:
bvec = ["syn" in st for st in self.method_interpol]
if np.any(bvec):
if (not hasattr(self, 'passive_target')) | (self.jitter > 0): # if not already done in neuron via artificial cell
[resp, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[self.a_celltype[0]], bins = freq_times)
resp = np.concatenate((zeros(1),resp))
Ksyn = syn_kernel(arange(0,10*self.syn_tau2,self.bin_width), self.syn_tau1, self.syn_tau2)
Ksyn = np.concatenate((zeros(len(Ksyn)-1),Ksyn))
gsyn = np.convolve(Ksyn, resp, mode='same')
print "Generated gsyn by convolution with Ksyn"
self.nc_delay = 0
else:
gsyn = interp(freq_times,time,np.array(self.rec_g))
spike_freq = np.zeros(len(freq_times))
for j in self.a_celltype:
#plt.figure('results_voltage')
#ax99 = plt.subplot(2,1,1)
#ax99.plot(time,voltage[j])
#plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')
#plt.savefig("./figs/Pub/Voltage_" + str(self.pickle_prefix) + "_cell" + str(j) + "_N" + str(self.N[j]) + ".pdf", dpi = 300, transparent=True) # save it
#plt.show()
#plt.clf()
[num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[j], bins = freq_times)
if isinstance(self.factor_celltype[j], ( int, long ) ):
f = self.factor_celltype[j]
else:
f = self.factor_celltype[j][0]
spike_freq = spike_freq + f * np.concatenate((zeros(1),num_spikes)) / self.bin_width
self.barrier() # wait for other nodes
#figure('1')
#plot(time,np.array(self.rec_s1),'b', time,np.array(self.rec_s2),'r')
#plt.show()
return {'time':time, 'voltage':voltage, 'current':current, 'fmean':fmean, 'f_cells_mean':f_cells_mean,
'gsyn':gsyn, 'freq_times':freq_times, 'spike_freq':spike_freq, 'gsyn_in':gsyn_in, 'fmeanA':fmeanA, 'fmaxA':fmaxA, 'fmstdA':fmstdA, 'fcvmA':fcvmA, 'fstdmA':fstdmA, 'fbstdA':fbstdA,
't_all_vec_vec':t_all_vec_vec, 'id_all_vec_vec':id_all_vec_vec, 'gsyns':gsyns, 'w_mat':w_mat, 'winh_mat':winh_mat, 'fmax':fmax, 'fmstd':fmstd, 'fcvm':fcvm, 'fbaseA':fbaseA, 'fbase':fbase}
def clean(self):
self.pc.runworker()
self.pc.done()
def compute_Transfer(self, stimulus, spike_freq, freq_times, t, noise_data_points, gsyn, gsyn_in, do_csd, t_qual, K_mat_old, t_startstop, inh_factor=[1]):
stimulus0 = np.zeros(len(stimulus[0]))
for a in self.a_celltype:
# sum input to produce linear input that should be reconstructed!
if (any(self.syn_inh_dist) > 0) and (any(self.syn_ex_dist) > 0):
if max(self.syn_inh_dist) == max(self.syn_ex_dist): # same signal through ex and inh
print "inh_factor = [0,1]"
inh_factor = [0,1]
for ni in self.syn_ex_dist[a]:
if ni != 0:
stimulus0 += inh_factor[ni-1] * stimulus[ni-1]
print "+ex:", ni-1
for ni in self.syn_inh_dist[a]:
if ni != 0:
stimulus0 -= inh_factor[ni-1] * stimulus[ni-1] #old: +nemax
print "-inh:", ni-1 #old: +nemax
if (max(self.n_syn_ex) == 0) and (max(self.n_syn_inh) == 0):
stimulus0 += stimulus[0]
print "current"
#if self.n_syn_ex[self.celltype_syn[0]] == 0:
# stimulus0 += stimulus[0]
# amplitude should not matter since filter amplitude is simply adjusted
#stimulus = stimulus0 #/len(self.syn_ex_dist)
stimulus0 = stimulus0 / std(stimulus0) / 2
# linear interpolation inside compute_Transfer !!!
print "max(stimulus0):",max(stimulus0)
results = compute_Transfer(spike_freq = spike_freq, freq_times = freq_times,
stimulus = stimulus0, t = t, noise_data_points = noise_data_points, gsyn = gsyn, gsyn_in = gsyn_in, do_csd = do_csd, t_kernel = 1*s,
method_interpol = self.method_interpol, nc_delay = self.nc_delay, w_length = 3, t_qual = t_qual, K_mat_old = K_mat_old, t_startstop = t_startstop, give_psd = self.give_psd) # freq_wp not defined, use all frequencies
# TEST:
#VAF = results.get('VAFf_mat')
#freq_used = results.get('freq_used')
#iend = mlab.find(freq_used >= self.xmax)[0]
#err = 1-mean(VAF[1][0,1:iend-1])
#print "err: ", err
return results
def residuals_compute_Transfer(self, p, stimulus, spike_freq, freq_times, t, noise_data_points, gsyn, gsyn_in, do_csd, t_qual, K_mat_old, t_startstop, inh_factor):
inh_factor_in = inh_factor[:]
ip = 0
for i, inhf in enumerate(inh_factor_in):
if inhf < 0:
inh_factor_in[i] = p[ip]
ip += 1
results = self.compute_Transfer(stimulus = stimulus, spike_freq = spike_freq, freq_times = freq_times,
t = t, noise_data_points = noise_data_points, gsyn = gsyn, gsyn_in = gsyn_in,
do_csd = do_csd, t_qual = t_qual, K_mat_old = K_mat_old, t_startstop = t_startstop, inh_factor = inh_factor_in)
VAF = results.get('VAFf_mat')
freq_used = results.get('freq_used')
iend = mlab.find(freq_used >= self.xmax)[0]
err = 1-mean(VAF[1][0,0:iend])
print "inh_factor:", inh_factor_in, "err: ", err
return err
#@profile
def fun_cnoise_Stim(self, t_stim = 10*s, sexp = 0, cutf = 0, do_csd = 1, t_qual = 0, freq_used = np.array([]), K_mat_old = np.array([]), inh_factor = [1], onf = None, equi = 0):
"""
Stimulate cell with colored noise
sexp = spectral exponent: Power ~ 1/freq^sexp
cutf = frequency cutoff: Power flat (white) for freq <~ cutf
do_csd = 1: use cross spectral density function for computation
"""
self.barrier() # wait for other nodes
filename = str(self.pickle_prefix) + "_results_pop_cnoise.p"
filepath = self.data_dir + "/" + filename
if self.id == 0: print "- filepath:", filepath
if self.do_run or (os.path.isfile(filepath) is False):
tstart = 0;
fs = 1 / self.dt # sampling rate
fmax = fs / 2 # maximum frequency (nyquist)
t_noise = arange(tstart, t_stim, self.dt) # create stimulus time vector, make sure stimulus is even!!!
#print self.syn_ex_dist
#print self.syn_inh_dist
#exit()
if (self.syn_ex_dist == []):
for nt in range(self.n_celltypes): # loop over all cells
#print "nt", nt
if hasattr(self.cells[nt][0], 'input_vec'):
self.syn_ex_dist.append([1] * len(self.cells[nt][0].input_vec)) # default ex for all by default!!!
else:
self.syn_ex_dist.append([1] * self.n_syn_ex[nt]) # default ex for all by default!!!
#print self.syn_ex_dist
if (self.syn_ex_dist[0] == []):
nemax = 1
else:
nemax = max([item for sublist in self.syn_ex_dist for item in sublist])
if (self.syn_inh_dist == []): # and (any(self.n_syn_inh) > 0)
for nt in range(self.n_celltypes): # loop over all cells
self.syn_inh_dist.append([0] * self.n_syn_inh[nt]) # default no inh for all by default!!!
#print self.syn_inh_dist
#exit()
if (self.syn_inh_dist[0] == []):
nimax = 0
else:
nimax = max([item for sublist in self.syn_inh_dist for item in sublist])
#print "self.syn_inh_dist, self.syn_ex_dist", self.syn_inh_dist, self.syn_ex_dist
n_noise = max([nemax,nimax]) # number of noise sources
#print n_noise,nemax,nimax
# create reproduceable input
noise_data = []
for nj in range(n_noise):
if self.id == 0: # make sure all have the same signal !!!
if len(freq_used) == 0:
noise_data0 = create_colnoise(t_noise, sexp, cutf, self.seed+nj, onf = onf)
else:
noise_data0, _, _, _ = create_multisines(t_noise, freq_used) # create multi sine signal
else:
noise_data0 = np.empty(len(t_noise), dtype=np.float64)
noise_data0 = self.broadcast(noise_data0, fast = True)
noise_data.append(noise_data0)
noise_data0 = []
noise_data_points = len(noise_data[0])
# Create signal weight vector inh_factor if it is not fully given
if len(noise_data) > len(inh_factor):
inh_factor = [inh_factor[0]] * len(noise_data)
print "inh_factor:", inh_factor
#if equi:
#pass
# tstop = t_stim
if max(self.n_syn_ex) == 0: # this means current input
self.set_IStim() # sets amp
if self.fluct_s != []:
if self.fluct_s[self.a_celltype[0]] > 0:
if self.id == 0: print "- adding i fluct"
self.connect_fluct()
for i, m in enumerate(self.method_interpol):
if "syn" in m: self.method_interpol[i] = "syn " + str(self.syn_tau1/ms) + "/" + str(self.syn_tau2/ms) + "ms"
if "bin" in m: self.method_interpol[i] = "bin " + str(self.bin_width/ms) + "ms"
stimulus = []
for nj in range(len(noise_data)):
stimulus0, t, t_startstop = construct_Stimulus(noise_data[nj], fs, self.amp[self.a_celltype[0]], ihold = 0, delay_baseline = self.delay_baseline) # , tail_points = 0
stimulus.append(stimulus0)
tstop = t[-1]
self.set_IPlay2(stimulus, t)
if self.id == 0: print "- starting colored noise transfer function estimation! with amp = " + str(np.round(self.amp[self.a_celltype[0]],4)) + ", ihold = " + str(np.round(self.ihold[self.a_celltype[0]],4)) + ", ihold_sigma = " + str(np.round(self.ihold_sigma,4)) + ", dt = " + str(self.dt) + " => maximum frequency = " + str(fmax) + "\r"
else:
self.give_freq = False
ihold = self.set_i(self.ihold) # just sets amp, ihold should not change!
if 'gsyn_in' not in self.method_interpol:
pass
else:
self.g_syn_ex = [1]*len(self.N)
if ((self.fluct_g_e0 != []) or (self.fluct_g_i0 != [])):
if ((self.fluct_g_e0[self.a_celltype[0]] > 0) or (self.fluct_g_i0[self.a_celltype[0]] > 0)):
if self.id == 0: print "- adding g fluct"
self.connect_gfluct(E_i=-65)
stimulus = []
for nj in range(len(noise_data)):
stimulus0, t, t_startstop = construct_Stimulus(noise_data[nj], fs, amp=1, ihold = 0, tail_points = 0, delay_baseline = self.delay_baseline) # self.amp
stimulus.append(stimulus0)
noise_data = []
tstop = t[-1]
if self.N[self.a_celltype[0]] > 1:
self.set_IStim(ihold = [0]*self.n_celltypes, ihold_sigma = [0]*self.n_celltypes, random_start = True, tstart_offset = 1)
if self.id == 0: print "- add random start"
#print "Enter Synplay()"
self.set_SynPlay(stimulus, t, t_startstop = t_startstop)
#print "Exit Synplay()"
if self.id == 0: print "- starting colored noise transfer function estimation with synaptic input! with amp = " + str(np.round(self.amp,4)) + ", ihold = " + str(np.round(self.ihold,4)) + ", ihold_sigma = " + str(np.round(self.ihold_sigma,4)) + ", dt = " + str(self.dt) + " => maximum frequency = " + str(fmax) + "\r"
amp_vec = []
mag_vec = []
pha_vec = []
freq_used = []
ca = []
SNR_mat = []
VAFf_mat = []
Qual_mat = []
CF_mat = []
VAF_mat = []
stim = []
stim_re_mat = []
resp_mat = []
current_re = []
ihold1 = []
tk = []
K_mat = []
gsyn_in = []
fmean = []
fmax = []
fmstd = []
fcvm = []
fmeanA = []
fmaxA = []
fmstdA = []
fcvmA = []
t_all_vec_input_sorted = []
id_all_vec_input_sorted = []
if (self.id == 0) and (max(self.n_syn_ex) > 0):
print range(self.n_celltypes), np.shape(self.t_all_vec_input)
for l in range(self.n_celltypes):
ie = argsort(self.t_all_vec_input[l])
t_all_vec_input_sorted.append( self.t_all_vec_input[l][ie] )
id_all_vec_input_sorted.append( self.id_all_vec_input[l][ie].astype(int) )
#if (self.id == 0):
# print self.g_syn_ex
# print np.array(self.g_syn_ex)>= 0
#print "g_syn_ex:",self.g_syn_ex
if np.array(np.array(self.g_syn_ex)>= 0).any():
if hasattr(self.cells[self.a_celltype[0]][0], 'get_states') and equi:
print "- Equilibrate!"
self.run(tstop, do_loadstate = False)
m = md5.new()
cell_exe_new = self.cell_exe[0]
m.update(cell_exe_new)
filename = './states_' + self.celltype[0] + '_' + m.hexdigest() + '_Population.b'
self.cells[self.a_celltype[0]][0].get_states(filename)
else:
self.run(tstop, do_loadstate = False)
i_startstop = []
results = self.get(t_startstop, i_startstop)
time = results.get('time')
current = results.get('current')
voltage = results.get('voltage')
fmean = results.get('fmean')
gsyn = results.get('gsyn')
freq_times = results.get('freq_times')
spike_freq = results.get('spike_freq')
t_all_vec_vec = results.get('t_all_vec_vec')
id_all_vec_vec = results.get('id_all_vec_vec')
gsyns = results.get('gsyns')
gsyn_in = results.get('gsyn_in')
fmax = results.get('fmax')
fmstd = results.get('fmstd')
fcvm = results.get('fcvm')
fmeanA = results.get('fmeanA')
fmaxA = results.get('fmaxA')
fmstdA = results.get('fmstdA')
fcvmA = results.get('fcvmA')
fbaseA = results.get('fbaseA')
fbase = results.get('fbase')
fbstdA = results.get('fbstdA')
else: # do not run, analyse input!!!
time = t
voltage = []
for l in range(self.n_celltypes):
voltage.append(np.zeros(len(t)))
current = []
freq_times = []
spike_freq = []
gsyn = []
gsyn_in = []
t_all_vec_vec = []
id_all_vec_vec = []
fmean = []
fmax = []
fmstd = []
fcvm = []
fstdm = []
fmeanA = []
fmaxA = []
fmstdA = []
fcvmA = []
fbaseA = []
fbase = []
fbstdA = []
if self.id == 0:
current = self.n_train_ex
#t_all_vec = self.t_all_vec_input
#id_all_vec = self.id_all_vec_input
#ie = argsort(t_all_vec)
#t_all_vec_vec.append( t_all_vec[ie] )
#id_all_vec_vec.append( id_all_vec[ie].astype(int) )
t_all_vec_vec = t_all_vec_input_sorted
id_all_vec_vec = id_all_vec_input_sorted
freq_times = arange(0, tstop, self.bin_width)
spike_freq = np.zeros(len(freq_times))
for j in self.a_celltype:
[num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[j], bins = freq_times)
if self.tau2_ex[0] > 0:
spike_freq = np.concatenate((zeros(1),num_spikes))
print "NOSYN TEST: start convolution with Ksyn"
Ksyn = syn_kernel(arange(0,10*self.tau2_ex[0],self.bin_width), self.tau1_ex[0], self.tau2_ex[0])
Ksyn = np.concatenate((zeros(len(Ksyn)-1),Ksyn))
spike_freq = np.convolve(Ksyn, spike_freq, mode='same')
print "NOSYN TEST: convolution finished"
else:
if isinstance(self.factor_celltype[j], ( int, long ) ):
f = self.factor_celltype[j]
else:
f = self.factor_celltype[j][0]
spike_freq = spike_freq + f * np.concatenate((zeros(1),num_spikes)) / self.bin_width
fmean.append(self.fmean_input)
fmax.append(self.fmax_input)
fmstd.append(self.fmstd_input)
fcvm.append(self.fcvm_input)
fstdm.append(self.fstdm_input)
if self.no_fmean == True:
fmean.append(ihold)
#plt.figure('spike_freq')
#plt.plot(freq_times, spike_freq)
#plt.savefig("./figs/Pub/Spike_freq_" + str(self.pickle_prefix) + ".pdf", dpi = 300, transparent=True) # save it
#plt.clf()
fmeanA = fmean[0]
fmaxA = fmax[0]
fmstdA = fmstd [0]
fcvmA = fcvm[0]
fstdmA = fstdm[0]
if self.id == 0:
if any([i<0 for i in inh_factor]):
p0 = []
inhf_idx = []
for i, inhf in enumerate(inh_factor):
if inhf < 0:
p0.append(0)
inhf_idx.append(i)
plsq = fmin(self.residuals_compute_Transfer, p0, args=(stimulus, spike_freq, freq_times, t, noise_data_points, gsyn, gsyn_in, do_csd, t_qual, K_mat_old, t_startstop, inh_factor))
p = plsq
ip = 0
for i in inhf_idx:
inh_factor[i] = p[ip]
ip += 1
print "Final inh_factor: ", inh_factor
results = self.compute_Transfer(stimulus, spike_freq = spike_freq, freq_times = freq_times,
t = t, noise_data_points = noise_data_points, gsyn = gsyn, gsyn_in = gsyn_in,
do_csd = do_csd, t_qual = t_qual, K_mat_old = K_mat_old, t_startstop = t_startstop, inh_factor=inh_factor)
mag_vec, pha_vec, ca, freq, freq_used, fmean_all = results.get('mag_mat'), results.get('pha_mat'), results.get('ca_mat'), results.get('freq'), results.get('freq_used'), results.get('fmean')
SNR_mat, VAFf_mat, Qual_mat, CF_mat, VAF_mat = results.get('SNR_mat'), results.get('VAFf_mat'), results.get('Qual_mat'), results.get('CF_mat'), results.get('VAF_mat')
stim, resp_mat, stim_re_mat, tk, K_mat = results.get('stim'), results.get('resp_mat'), results.get('stim_re_mat'), results.get('tk'), results.get('K_mat')
self.barrier() # wait for other nodes
if self.id == 0:
if t_qual > 0:
#print t_startstop[0], t_startstop[0]/self.dt, (t_startstop[0]+t_qual)/self.dt
current_re = current[int(t_startstop[0]/self.dt):int((t_startstop[0]+t_qual)/self.dt)]
current_re = current_re[int(len(K_mat[self.a_celltype[0]])):int(len(current_re))-int(len(K_mat[self.a_celltype[0]]))]
if len(self.i_holdrs) > 0:
ihold1 = self.i_holdrs[self.a_celltype[0]][0]
else:
ihold1 = []
for l in range(len(self.method_interpol)): # unwrap
pha_vec[l,:] = unwrap(pha_vec[l,:] * (pi / 180)) * (180 / pi) # unwrap for smooth phase
# only return fraction of actual signal, it is too long!!!
if time[-1] > self.tmax:
imax = -1*int(self.tmax/self.dt)
time = time[imax:]; current = current[imax:]; gsyn = gsyn[imax:]; gsyn_in = gsyn_in[imax:]
for n in range(self.n_celltypes):
voltage[n] = voltage[n][imax:]
if freq_times != []:
if freq_times[-1] > self.tmax:
imax2 = where(freq_times > self.tmax)[0][0] # for spike frequency
freq_times = freq_times[0:imax2]; spike_freq = spike_freq[0:imax2]
bvec = ["_syn" in st for st in self.method_interpol]
if np.any(bvec):
# normalize synaptic integration with others
mag_vec[1,:]= mag_vec[0,0]*mag_vec[1,:]/mag_vec[1,0]
if self.id == 0: print "start pickle"
results = {'freq_used':freq_used, 'amp':amp_vec,'mag':mag_vec,'pha':pha_vec,'ca':ca,'voltage':voltage,'tk':tk,'K_mat':K_mat, 'ihold1': ihold1, 't_startstop':t_startstop, #'stimulus':stimulus,
'current':current,'t1':time,'freq_times':freq_times,'spike_freq':spike_freq, 'stim':stim, 'stim_re_mat':stim_re_mat, 'resp_mat':resp_mat, 'current_re':current_re, 'gsyn_in':gsyn_in, 'fmeanA':fmeanA, 'fmaxA':fmaxA, 'fmstdA':fmstdA, 'fcvmA':fcvmA, 'fbaseA':fbaseA, 'fbase':fbase, 'fbstdA':fbstdA,
'fmean':fmean,'method_interpol':self.method_interpol, 'SNR':SNR_mat, 'VAF':VAFf_mat, 'Qual':Qual_mat, 'CF':CF_mat, 'VAFs':VAF_mat, 'fmax':fmax, 'fmstd':fmstd, 'fcvm':fcvm, 'inh_factor':inh_factor, 't_all_vec_vec':t_all_vec_vec, 'id_all_vec_vec':id_all_vec_vec}
if self.id == 0:
if self.dumpsave == 1:
pickle.dump( results, gzip.GzipFile( filepath, "wb" ) )
print "pickle done"
if self.plot_train:
for a in self.a_celltype:
#i_start = mlab.find(t_all_vec_vec[a] >= 0)[0]
#i_stop = mlab.find(t_all_vec_vec[a] >= 5)[0]
#t_all_cut = t_all_vec_vec[a][i_start:i_stop]
#id_all_cut = id_all_vec_vec[a][i_start:i_stop]
t_all_cut = t_all_vec_vec[a]
id_all_cut = id_all_vec_vec[a]
f_start_in = mlab.find(t_all_cut >= 0)
f_stop_in = mlab.find(t_all_cut <= 10)
f_start = f_start_in[0]
f_stop = f_stop_in[-1]+1
use_spikes = t_all_cut[f_start:f_stop]
use_id = id_all_cut[f_start:f_stop]
plt.figure('results_train')
ax99 = plt.subplot(1,1,1)
ax99.plot(use_spikes,use_id,'|', ms=2)
plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')
plt.savefig("./figs/Pub/Train_" + str(self.pickle_prefix) + "_cell" + str(a) + "_N" + str(self.N[a]) + ".pdf", dpi = 300, transparent=True) # save it
plt.clf()
if len(t_all_cut) > 0:
tbin = 100*ms
tb = np.arange(0,t[-1],tbin)
[all_rate, _] = neuronpy.util.spiketrain.get_histogram(t_all_cut, bins = tb)
all_rate = np.concatenate((np.zeros(1),all_rate)) / self.N[a] / tbin
plt.figure('results_train2')
plt.plot(tb,all_rate)
plt.savefig("./figs/Pub/PSTH_" + str(self.pickle_prefix) + "_cell" + str(a) + "_N" + str(self.N[a]) + ".pdf", dpi = 300, transparent=True) # save it
plt.clf()
plt.figure('results_noise')
plt.plot(time,current)
plt.savefig("./figs/Pub/Noise_" + str(self.pickle_prefix) + "_cell" + str(a) + "_N" + str(self.N[a]) + ".pdf", dpi = 300, transparent=True) # save it
plt.clf()
if self.plot_input:
if len(t_all_vec_input_sorted[0]) > 0:
i_start = mlab.find(t_all_vec_input_sorted[0] >= 0)[0]
i_stop = mlab.find(t_all_vec_input_sorted[0] >= 5)[0]
t_all_cut = t_all_vec_input_sorted[0][i_start:i_stop]
id_all_cut = id_all_vec_input_sorted[0][i_start:i_stop]
plt.figure('results_input')
ax99 = plt.subplot(1,1,1)
ax99.plot(t_all_cut,id_all_cut,'|', ms=2)
plt.text(0.5, 1.1, r'fmean=' + str(round(self.fmean_input,1)) + ',fmax=' + str(round(self.fmax_input,1)) + ',fmstd=' + str(round(self.fmstd_input,1)) + ',fcvm=' + str(round(self.fcvm_input,1)) + ',fstdm=' + str(round(self.fstdm_input,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')
plt.savefig("./figs/Pub/Input_" + str(self.pickle_prefix) + "_N" + str(self.N[self.a_celltype[0]]) + ".pdf", dpi = 300, transparent=True) # save it
plt.clf()
else:
if self.id == 0:
results = pickle.load( gzip.GzipFile( filepath, "rb" ) )
#print results
#print {key:np.shape(value) for key,value in results.iteritems()}
if self.minimal_dir: # save only info needed for plot
print {key:np.shape(value) for key,value in results.iteritems()}
if "Fig6_pop_transfer_grc_syngr_nsyn4_cn_a1_noisesynlow_inhlow_adjfinh_varih_N100_CFo6.0_results_pop_cnoise.p" in filename:
results['ca'] = []
results['resp_mat'] = []
results['stim'] = []
results['current'] = []
results['tk'] = []
results['K_mat'] = []
results['freq_times'] = []
results['spike_freq'] = []
results['stim_re_mat'] = []
results['current_re'] = []
results['t_all_vec_vec'] = []
results['id_all_vec_vec'] = []
results['gsyn_in'] = []
elif ("Fig8_pop_transfer_none_synno_cn_cutf30_a1_noisesynlow_ih20_varih_N100_CFo-1_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_none_synno_cn_cutf30_a10_noisesynlow_ih20_varih_N100_CFo-1_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_CFo9.0_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a10_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_is0.14_CFo9.0_results_pop_cnoise.p" in filename) \
:
results['ca'] = []
results['resp_mat'] = []
results['current'] = []
results['tk'] = []
results['K_mat'] = []
results['voltage'] = []
results['current_re'] = []
results['t_all_vec_vec'] = []
results['id_all_vec_vec'] = []
results['t1'] = []
results['gsyn_in'] = []
elif ("Fig8_pop_transfer_none_synno_cn_cutf30_a1_noisesynlow_ih20_varih_N50_twopop_CFo-1_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_none_synno_cn_cutf30_a10_noisesynlow_ih20_varih_N50_twopop_CFo-1_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_twopop_CFo9.0_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a10_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_is0.14_twopop_CFo9.0_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf5_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_CFo14.0_results_pop_cnoise.p" in filename) \
or ("Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf5_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_twopop_CFo14.0_results_pop_cnoise.p" in filename) \
:
results['ca'] = []
results['resp_mat'] = []
results['current'] = []
results['tk'] = []
results['K_mat'] = []
results['voltage'] = []
results['current_re'] = []
results['t_all_vec_vec'] = []
results['id_all_vec_vec'] = []
results['t1'] = []
results['gsyn_in'] = []
results['freq_times'] = []
results['spike_freq'] = []
elif ("Fig4_pop_transfer_grc_cn_addn100_N[100]_CF[40]_amod[1]_results_pop_cnoise.p" in filename) \
or ("Fig4_pop_transfer_grc_cn_addn1_N[100]_CF[40]_amod[1]_results_pop_cnoise.p" in filename) \
or ("Fig4b_pop_transfer_grc_lowcf_cn_twopop_N[50, 50]_CF[0.0055, 0.0055]_amod[None, None]_results_pop_cnoise.p" in filename) \
or ("Fig4b_pop_transfer_grc_lowcf_cn_N[100]_CF[0.0055]_amod[None]_results_pop_cnoise.p" in filename) \
or ("Fig4b_pop_transfer_grc_lowcf_slownoise_cn_twopop_N[50, 50]_CF[0.0051, 0.0051]_amod[None, None]_results_pop_cnoise.p" in filename) \
or ("Fig4b_pop_transfer_grc_lowcf_slownoise_cn_N[100]_CF[0.0051]_amod[None]_results_pop_cnoise.p" in filename) \
:
results['ca'] = []
results['resp_mat'] = []
results['current'] = []
results['tk'] = []
results['K_mat'] = []
results['voltage'] = []
results['t_all_vec_vec'] = []
results['id_all_vec_vec'] = []
results['t1'] = []
results['gsyn_in'] = []
results['freq_times'] = []
results['spike_freq'] = []
elif ("Fig2_pop_transfer_" in filename) \
:
results['ca'] = []
results['resp_mat'] = []
results['current'] = []
results['t1'] = []
results['voltage'] = []
results['freq_times'] = []
results['spike_freq'] = []
results['current_re'] = []
results['t_all_vec_vec'] = []
results['id_all_vec_vec'] = []
results['gsyn_in'] = []
else:
results['ca'] = []
results['resp_mat'] = []
results['stim'] = []
results['current'] = []
results['tk'] = []
results['K_mat'] = []
results['t1'] = []
results['voltage'] = []
results['freq_times'] = []
results['spike_freq'] = []
results['stim_re_mat'] = []
results['current_re'] = []
results['t_all_vec_vec'] = []
results['id_all_vec_vec'] = []
results['gsyn_in'] = []
print {key:np.shape(value) for key,value in results.iteritems()}
pickle.dump( results, gzip.GzipFile( self.minimal_dir + "/" + filename, "wb" ) )
else:
results = {'freq_used':[], 'amp':[],'mag':[],'pha':[],'ca':[],'voltage':[], 'tk':[],'K_mat':[], 'ihold1':[], 't_startstop':[], #'stimulus':[],
'current':[],'t1':[],'freq_times':[],'spike_freq':[], 'stim':[], 'stim_re_mat':[], 'current_re':[], 'gsyn_in':[], 'fmeanA':[], 'fmaxA':[], 'fmstdA':[], 'fcvmA':[], 'fbaseA':[], 'fbase':[], 'fbstdA':[],
'fmean':[],'method_interpol':self.method_interpol, 'SNR':[], 'VAF':[], 'Qual':[], 'CF':[], 'VAFs':[], 'fmax':[], 'fmstd':[], 'fcvm':[], 'inh_factor':[], 't_all_vec_vec':[], 'id_all_vec_vec':[]}
if self.id == 0:
if self.plot_train:
for a in self.a_celltype:
t1 = results.get('t1')
voltage = results.get('voltage')
fmean = results.get('fmean')
fmax = results.get('fmax')
fmstd = results.get('fmstd')
if results.has_key('t_all_vec_vec'):
if len(results['t_all_vec_vec']) > 0:
t_all_vec_vec = results.get('t_all_vec_vec')
id_all_vec_vec = results.get('id_all_vec_vec')
t_all_cut = t_all_vec_vec[a]
id_all_cut = id_all_vec_vec[a]
f_start_in = mlab.find(t_all_cut >= 0)
f_stop_in = mlab.find(t_all_cut <= 10)
f_start = f_start_in[0]
f_stop = f_stop_in[-1]+1
use_spikes = t_all_cut[f_start:f_stop]
use_id = id_all_cut[f_start:f_stop]
plt.figure('results_train')
ax97 = plt.subplot(1,1,1)
ax97.plot(use_spikes,use_id,'|', ms=6)
plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax97.transAxes, fontsize=10, va='center', ha='center')
plt.savefig("./figs/Pub/Train_" + str(self.pickle_prefix) + "_cell" + str(a) + "_N" + str(self.N[a]) + ".pdf", dpi = 300, transparent=True) # save it
plt.figure('results_voltage')
ax99 = plt.subplot(2,1,1)
ax99.plot(t1,voltage[a])
t_noise = arange(0, t_stim, self.dt)
noise_data = create_colnoise(t_noise, sexp, cutf, 50, onf = onf)
stimulus, t, t_startstop = construct_Stimulus(noise_data, 1/self.dt, amp=1, ihold = 0, tail_points = 0, delay_baseline = self.delay_baseline)
ax98 = plt.subplot(2,1,2)
ax98.plot(t[0:10/self.dt],stimulus[0:10/self.dt],color='k')
plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')
plt.savefig("./figs/Pub/Voltage_" + str(self.pickle_prefix) + "_cell" + str(a) + "_N" + str(self.N[a]) + ".pdf", dpi = 300, transparent=True) # save it
plt.show()
plt.clf()
if (self.id == 0) and (do_csd == 1):
Qual = results.get('Qual')
for i, ii in enumerate(self.method_interpol):
print "\n[QUAL:] Interpol:", ii, "SNR0:", Qual[i,0,0], "SNR_cutff:", Qual[i,0,1], "SNR_mean:", Qual[i,0,2], "\n VAF0:", Qual[i,1,0], "VAF_cutff:", Qual[i,1,1], "VAF_mean:", Qual[i,1,2], "\n CF(subtracted):", Qual[i,2,0], "VAF(subtracted):", Qual[i,2,1]
VAF = results.get('VAF')
freq_used = results.get('freq_used')
iend = mlab.find(freq_used >= self.xmax)[0]
print 'm(VAF)=' + str(np.mean(VAF[1][0,0:iend]))
self.barrier() # wait for other nodes
return results
# def fun_ssine_Stim(self, freq_used = np.array([1, 10, 100, 1000])*Hz):
# """
# Compute impedance and/or transfer function using Single sine stimulation
# Only compute transfer function if there is a steady state (resting) firing rate!
# """
# self.barrier() # wait for other nodes
#
# filepath = "./data/" + str(self.pickle_prefix) + "_results_pop_ssine.p"
#
# if self.do_run or (os.path.isfile(filepath) is False):
#
# fs = 1 / self.dt # sampling rate
# fmax = fs / 2 # maximum frequency (nyquist)
#
# if self.id == 0: print "- starting single sine transfer function estimation! with amp = " + str(np.round(self.amp[a_celltype[0]],4)) + ", ihold = " + str(np.round(self.ihold[self.a_celltype[0]],4)) + ", dt = " + str(self.dt) + " => maximum frequency = " + str(fmax) + "\r"
#
# if max(self.n_syn_ex) == 0:
# self.set_IStim()
#
# if self.fluct_s != []:
# if self.fluct_s[self.a_celltype[0]] > 0:
# if self.id == 0: print "- adding i fluct"
# self.connect_fluct()
#
# for i, m in enumerate(self.method_interpol):
# if "syn" in m: self.method_interpol[i] = "syn " + str(self.syn_tau1/ms) + "/" + str(self.syn_tau2/ms) + "ms"
# if "bin" in m: self.method_interpol[i] = "bin " + str(self.bin_width/ms) + "ms"
#
# else:
# self.give_freq = False
# ihold = self.set_i(self.ihold) # just sets amp, ihold should not change!
#
# if ((self.fluct_g_e0 != []) or (self.fluct_g_i0 != [])):
# if ((self.fluct_g_e0[self.a_celltype[0]] > 0) or (self.fluct_g_i0[self.a_celltype[0]] > 0)):
# if self.id == 0: print "- adding g fluct"
# self.connect_gfluct(E_i=-65)
#
# #if ((self.fluct_std_e[self.a_celltype[0]] != []) or (self.fluct_std_i[self.a_celltype[0]] != [])):
# # if ((self.fluct_std_e[self.a_celltype[0]] > 0) or (self.fluct_std_i[self.a_celltype[0]] > 0)):
# # if self.id == 0: print "- adding g fluct"
# # self.connect_gfluct(E_i=-65)
#
# if 'gsyn_in' not in self.method_interpol:
# pass
# else:
# self.g_syn_ex = 1
#
#
# for i, fu in enumerate(freq_used):
#
# if self.id == 0: print "- single sine processing frequency = " + str(fu)
#
# t, stimulus, i_startstop, t_startstop = create_singlesine(fu = fu, amp = self.amp[a_celltype[0]], ihold = 0, dt = self.dt, periods = 20, minlength = 2*s, t_prestim = 1*s)
# tstop = t[-1]
#
# if i == 0: t_startstop_plot = t_startstop
#
# if max(self.n_syn_ex) == 0:
# self.set_IPlay(stimulus, t)
# else:
# self.set_SynPlay(stimulus, t)
#
# if self.g_syn_ex >= 0: # should also be true for current input!!!
#
# self.run(tstop)
#
# if i == 0: # do this here to have something to return
#
# # select first sinusoidal to plot, later
# voltage_plot = []
# current_plot = []
# time_plot = []
# freq_times_plot = []
# spike_freq_plot = []
# gsyn_plot = []
#
# # construct vectors
# amp_vec = zeros(len(freq_used)) # amplitude vector
# fmean_all = zeros(len(freq_used)) # mean firing frequency (all cells combined)
# fmean = zeros(len(freq_used)) # mean firing frequency (one cell)
# ca = zeros(len(freq_used), dtype=complex)
#
# # create matrix to hold all different interpolation methods:
# mag_vec = zeros((len(self.method_interpol),len(freq_used))) # magnitude vector
# pha_vec = zeros((len(self.method_interpol),len(freq_used))) # phase vector
# NI_vec = zeros((len(self.method_interpol),len(freq_used))) # NI vector
# VAF_vec = zeros((len(self.method_interpol),len(freq_used))) # VAF vector
#
# results = self.get(t_startstop, i_startstop) # t1 should be equal to t!!!
# time, voltage, current, fmean0, gsyn = results.get('time'), results.get('voltage'), results.get('current'), results.get('fmean'), results.get('gsyn')
# freq_times, spike_freq, t_all_vec_vec, id_all_vec_vec, gsyns = results.get('freq_times'), results.get('spike_freq'), results.get('t_all_vec_vec'), results.get('id_all_vec_vec'), results.get('gsyns')
#
# else:
#
# time = t
# voltage = []
# voltage.append(np.zeros(len(t)))
# current = stimulus
#
# freq_times = []
# spike_freq = []
# fmean0 = ihold
# gsyn = []
# gsyn_in = []
#
# t_all_vec_vec = []
# id_all_vec_vec = []
#
#
# if self.id == 0:
#
# t_all_vec = []
# t_all_vec.append([])
# t_all_vec[0] = np.concatenate(self.t_all_vec_input)
#
# id_all_vec = []
# id_all_vec.append([])
# id_all_vec[0] = np.concatenate(self.id_all_vec_input)
#
# ie = argsort(t_all_vec[0])
# t_all_vec_vec.append( t_all_vec[0][ie] )
# id_all_vec_vec.append( id_all_vec[0][ie].astype(int) ) #
#
#
# freq_times = arange(0, tstop, self.bin_width)
# [num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[0], bins = freq_times)
# spike_freq = np.concatenate((zeros(1),num_spikes)) / self.bin_width
#
#
# if self.id == 0:
#
# fmean[i] = fmean0[0]
#
# if i == 0:
#
# # select first sinusoidal to plot
# voltage_plot = voltage
# current_plot = current
# time_plot = time
# freq_times_plot = freq_times
# spike_freq_plot = spike_freq
# gsyn_plot = gsyn
#
#
# for l in range(len(self.method_interpol)):
#
# if "bin" in self.method_interpol[l]:
#
# # binning and linear interpolation
# stimulus_signal = stimulus[i_startstop[0]:i_startstop[1]] # cut out relevant signal
# t_input_signal = t[i_startstop[0]:i_startstop[1]] - t[i_startstop[0]]
#
# spike_freq_interp = interp(t, freq_times, spike_freq, left=0, right=0) # interpolate to be eqivalent with input, set zero at beginning and end!
# freq_out_signal_interp = spike_freq_interp[i_startstop[0]:i_startstop[1]] # cut out relevant signal
# vamp, mag_vec[l,i], pha_vec[l,i], fmean_all[i], _ = get_magphase(stimulus_signal, t_input_signal, freq_out_signal_interp, t_input_signal, method = "fft", f = fu)
#
# results = est_quality(t_input_signal, fu, freq_out_signal_interp, self.amp[a_celltype[0]]*mag_vec[l,i], pha_vec[l,i]/ (180 / pi), fmean_all[i])
# NI_vec[l,i], VAF_vec[l,i] = results.get('NI'), results.get('VAF')
# print "-[bin] NI: " + str(NI_vec[l,i]) + ", VAF: " + str(VAF_vec[l,i])
#
# if "syn" in self.method_interpol[l]:
#
# # synaptic integration
# dt_out = t_input_signal[2] - t_input_signal[1]
# shift = self.nc_delay/dt_out # shift response by the nc delay to remove offset
# freq_out_signal_syn = gsyn[i_startstop[0]+shift:i_startstop[1]+shift] # cut out relevant signal
#
# vamp, mag_vec[l,i], pha_vec[l,i], fm, _ = get_magphase(stimulus_signal, t_input_signal, freq_out_signal_syn, t_input_signal, method = "fft", f = fu)
#
# results = est_quality(t_input_signal, fu, freq_out_signal_syn, self.amp[a_celltype[0]]*mag_vec[l,i], pha_vec[l,i]/ (180 / pi), fm)
# NI_vec[l,i], VAF_vec[l,i] = results.get('NI'), results.get('VAF')
# print "-[syn] NI: " + str(NI_vec[l,i]) + ", VAF: " + str(VAF_vec[l,i])
#
#
# self.barrier() # wait for other nodes
#
# #print "rest: " + str(vrest) + " freq_used:" + str(freq_used) + " amp_vec:" + str(amp_vec) + " mag_vec:" + str(mag_vec) + " pha_vec:" + str(pha_vec)
#
# if self.id == 0:
#
# for l in range(len(self.method_interpol)): # unwrap
# pha_vec[l,:] = unwrap(pha_vec[l,:] * (pi / 180)) * (180 / pi) # unwrap for smooth phase
#
# # only return fraction of actual signal, it is too long!!!
# if time_plot[-1] > self.tmax:
# imax = where(time_plot > self.tmax)[0][0] # for voltage, current and time
# time_plot = time_plot[0:imax]; current_plot = current_plot[0:imax]; gsyn_plot = gsyn_plot[0:imax]
# for n in range(self.n_celltypes):
# voltage_plot[n] = voltage_plot[n][0:imax]
#
# if freq_times_plot != []:
# if freq_times_plot[-1] > self.tmax:
# imax2 = where(freq_times_plot > self.tmax)[0][0] # for spike frequency
# freq_times_plot = freq_times_plot[0:imax2]; spike_freq_plot = spike_freq_plot[0:imax2]
#
# # normalize synaptic integration with with first magnitude, may by syn itself!
# bvec = ["syn" in st for st in self.method_interpol]
# if np.any(bvec):
# k = where(bvec)
# mag_vec[k,:]= mag_vec[0,0]*mag_vec[k,:]/mag_vec[k,0]
#
# NI_vec = (freq_used, NI_vec)
# VAF_vec = (freq_used, VAF_vec)
# results = {'freq_used':freq_used, 'amp':amp_vec,'mag':mag_vec,'pha':pha_vec,'ca':ca,'voltage':voltage_plot, 't_startstop':t_startstop_plot,
# 'current':current_plot,'t1':time_plot,'freq_times':freq_times_plot,'spike_freq':spike_freq_plot,
# 'fmean':mean(fmean),'method_interpol':self.method_interpol, 'NI':NI_vec, 'VAF':VAF_vec}
#
# if self.id == 0:
# pickle.dump( results, gzip.GzipFile( filepath, "wb" ) )
#
# else:
#
# if self.id == 0:
# results = pickle.load( gzip.GzipFile( filepath, "rb" ) )
# else:
# results = {'freq_used':[], 'amp':[],'mag':[],'pha':[],'ca':[],'voltage':[], 't_startstop':[],
# 'current':[],'t1':[],'freq_times':[],'spike_freq':[],
# 'fmean':[],'method_interpol':self.method_interpol,'NI':[],'VAF':[]}
#
# return results
def get_RC(self, opt_plot):
if self.id == 0:
if "analytical" in opt_plot: # simplest case, only uses rm and tau, scaling necessary
exec self.cell_exe[self.a_celltype[0]]
sim = Stimulation(cell, temperature = self.temperature)
rm, cm, taum = sim.get_RCtau()
else:
rm = cm = taum = 0
if "if" in opt_plot:
Vrest = cell.soma(0.5).pas.e*mV
Vth = cell.spkout.thresh*mV
Vreset = cell.spkout.vrefrac*mV
else:
Vreset = 0*mV; Vth = 1*mV; Vrest = 0*mV
sim = None
cell = None
else:
rm = cm = taum = 0
Vreset = 0*mV; Vth = 1*mV; Vrest = 0*mV
return rm, cm, taum, Vreset, Vth, Vrest
def fun_plot(self, currlabel="control", dowhat="cnoise", freq_used=np.array([]), cutf=10, sexp=0, t_stim=100*s, ymax=0, ax=None, SNR=None, VAF=None, t_qual=0, opt_plot=np.array([]), method_interpol_plot=[], do_csd = 1):
SNR_switch = SNR
VAF_switch = VAF
rm, cm, taum, Vreset, Vth, Vrest = self.get_RC(opt_plot)
if dowhat == "cnoise":
if do_csd == 0:
t_qual = 0; SNR_switch = 0; VAF_switch = 0
results = self.fun_cnoise_Stim(t_stim = t_stim, cutf = cutf, sexp = sexp, t_qual = t_qual, freq_used = freq_used, do_csd = do_csd)
freq_used, amp_vec, mag, pha, ca, voltage, current, t1 = results.get('freq_used'), results.get('amp'), results.get('mag'), results.get('pha'), results.get('ca'), results.get('voltage'), results.get('current'), results.get('t1')
freq_times, spike_freq, fmean, method_interpol, SNR, VAF, Qual = results.get('freq_times'), results.get('spike_freq'), results.get('fmean'), results.get('method_interpol'), results.get('SNR'), results.get('VAF'), results.get('Qual')
stim, stim_re_mat, current_re, tk, K_mat_old = results.get('stim'), results.get('stim_re_mat'), results.get('current_re'), results.get('tk'), results.get('K_mat')
elif dowhat == "ssine":
results = self.fun_ssine_Stim(freq_used = freq_used0)
freq_used, amp_vec, mag, pha, ca, voltage, current, t1 = results.get('freq_used'), results.get('amp'), results.get('mag'), results.get('pha'), results.get('ca'), results.get('voltage'), results.get('current'), results.get('t1')
freq_times, spike_freq, fmean, method_interpol, VAF = results.get('freq_times'), results.get('spike_freq'), results.get('fmean'), results.get('method_interpol'), results.get('VAF')
tk = []
K_mat_old = []
# analyse
if self.id == 0:
print "Mean rate: " + str(fmean)
# Turn it off if set to zero
if SNR_switch == 0: SNR = None
if VAF_switch == 0: VAF = None
if t_qual > 0:
plt.figure("Reconstruct")
ax1 = subplot(2,1,1)
ax1.plot(np.arange(len(stim))*dt-1, current_re*1e3, 'b', linewidth=1)
ax1.plot(np.arange(len(stim))*dt-1, (stim)*1e3, 'k-', linewidth=1)
ax1.plot(np.arange(len(stim))*dt-1, (stim_re_mat[0,:])*1e3, 'r', linewidth=1, alpha=1)
#adjust_spines(ax1, ['left','bottom'], d_out = 10)
#ax1.axis(xmin=0, xmax=1)
#ax1.axis(ymin=8.3, ymax=10.7)
#ax1.yaxis.set_ticks(array([8.5,9,9.5,10,10.5]))
#ax1.set_title("Reconstruction")
#ax1.set_xlabel("s")
#ax1.set_ylabel("pA")
#ax1.text(0.15, 10.7, "Input current", color=color3, fontsize = 8)
#ax1.text(0.8, 10.7, "Signal", color="#000000", fontsize = 8)
#ax1.text(0.0, 8.2, "Reconstruction", color=color2, fontsize = 8)
ax2 = subplot(2,1,2)
ax2.plot(tk, K_mat_old[0], 'k', linewidth=1)
self.save_plot(directory = "./figs/dump/", prefix = "reconstruct")
plt.figure("Transfer")
currtitle = currlabel + " pop " + dowhat + ", " + self.celltype[self.a_celltype[0]]
ax = plot_transfer(currtitle, freq_used, mag, pha, t1, current, voltage[self.a_celltype[0]], freq_times, spike_freq, taum, fmean, self.ihold, rm, Vreset, Vth, Vrest, method_interpol, method_interpol_plot, SNR = SNR, VAF = VAF, ymax = self.ymax, ax = self.ax, linewidth = self.linewidth, color_vec = self.color_vec, alpha = self.alpha, opt_plot = opt_plot)
suptitle("Population transfer function of " + str(self.N[self.a_celltype[0]]) + " " + self.celltype[self.a_celltype[0]] + ", amp: " + str(np.round(self.amp[self.a_celltype[0]],4)) + ", amod: " + str(self.amod) + ", ih: " + str(np.round(self.ihold,4)) + ", ih_s: " + str(np.round(self.ihold_sigma,4)) + ", fm: " + str(np.round(fmean,2)) + ", fl_s: " + str(self.fluct_s))
return VAF, SNR, ax, tk, K_mat_old
def save_plot(self, directory = "./figs/dump/", prefix = " "):
if pop.id == 0:
from datetime import datetime
idate = datetime.now().strftime('%Y%m%d_%H%M') # %S
savefig(directory + idate + "-pop_transfer_" + prefix + "_" + self.celltype[self.a_celltype[0]] + "_N" + str(self.N[self.a_celltype[0]]) + "_ihold" + str(np.round(self.ihold,4)) + "_amp" + str(np.round(self.amp[self.a_celltype[0]],4)) + ".pdf", dpi = 300) # save it
def do_pca_ica(self, t_analysis_delay=0, t_analysis_stop=1, time=0, signals=0, output_dim=10, n_processes=32, n_chunks=32, do_ica=1, n_celltype = 0):
if self.use_mpi:
filepath = self.data_dir + "/" + str(self.pickle_prefix) + "_results_pop_pca_ica.p"
if self.do_run or (os.path.isfile(filepath) is False):
# PCA
# remove beginning
dt = time[2]-time[1]
t = time[int(t_analysis_delay/dt):int(t_analysis_stop/dt)]
pca_mat = np.array(signals[n_celltype]).T[int(t_analysis_delay/dt):int(t_analysis_stop/dt),:]
node = mdp.nodes.PCANode(output_dim=output_dim, svd=True)
# pad with zeros to be able to split into chunks!
n_add = n_chunks-np.remainder(np.shape(pca_mat)[0],n_chunks)
mat_add = np.zeros((n_add, np.shape(pca_mat)[1]))
pca_mat_add = np.concatenate((pca_mat, mat_add))
pca_mat_iter = np.split(pca_mat_add, n_chunks)
flow = mdp.parallel.ParallelFlow([node])
start_time = ttime.time()
with mdp.parallel.ProcessScheduler(n_processes=n_processes, verbose=True) as scheduler:
flow.train([pca_mat_iter], scheduler=scheduler) # input has to be list, why??
process_time = ttime.time() - start_time
s = np.array(flow.execute(pca_mat_iter))
s = s[0:len(t),:] # resize to length of t!
#print "node.d: ",node.d
var_vec = node.d/sum(node.d)
print 'Explained variance (', 0, ') : ', round(node.explained_variance,4)
print 'Variance (' , 0, ') : ', var_vec
print 'Time to run (' , 0, ') : ', process_time
s2 = []
if do_ica:
# ICA
#s2 = mdp.fastica(s)
ica = mdp.nodes.FastICANode() #CuBICANode()
ica.train(s)
s2 = ica(s)
results = {'t':t, 'pca':s,'pca_var':var_vec,'pca_var_expl':round(node.explained_variance,4), 'ica':s2}
if self.id == 0:
if self.dumpsave == 1:
pickle.dump( results, gzip.GzipFile( filepath, "wb" ) )
else:
if self.id == 0:
results = pickle.load( gzip.GzipFile( filepath, "rb" ) )
else:
# remove beginning
dt = time[2]-time[1]
t = time[int(t_analysis_delay/dt):int(t_analysis_stop/dt)]
pca_mat = np.array(signals[n_celltype]).T[int(t_analysis_delay/dt):int(t_analysis_stop/dt),:]
node = mdp.nodes.PCANode(output_dim=output_dim, svd=True)
start_time = ttime.time()
node.train(pca_mat)
s = node(pca_mat)
process_time = ttime.time() - start_time
#print "node.d: ",node.d
var_vec = node.d/sum(node.d)
print 'Explained variance (', 0, ') : ', round(node.explained_variance,4)
print 'Variance (' , 0, ') : ', var_vec
print 'Time to run (' , 0, ') : ', process_time
s2 = []
if do_ica:
# ICA
#s2 = mdp.fastica(s)
ica = mdp.nodes.FastICANode() #CuBICANode()
ica.train(s)
s2 = ica(s)
results = {'t':t, 'pca':s,'pca_var':var_vec,'pca_var_expl':round(node.explained_variance,4), 'ica':s2}
return results
def net_run(self, tstop, simprop = "default", t_analysis_delay=0, t_analysis_stop=1, stim_start=0):
freq_times = []
t_all_vec_vec = []
id_all_vec_vec = []
gsyns = []
w_mat = []
winh_mat = []
time = []
voltage = []
current = []
filepath = self.data_dir + "/" + str(self.pickle_prefix) + "_results_pop_randomnet.hdf5"
if self.do_run or (os.path.isfile(filepath) is False):
self.run(tstop)
self.no_fmean = True
results = self.get()
time, voltage, current, fmean, gsyn = results.get('time'), results.get('voltage'), results.get('current'), results.get('fmean'), results.get('gsyn')
freq_times, spike_freq, t_all_vec_vec, id_all_vec_vec, gsyns, w_mat, winh_mat = results.get('freq_times'), results.get('spike_freq'), results.get('t_all_vec_vec'), results.get('id_all_vec_vec'), results.get('gsyns'), results.get('w_mat'), results.get('winh_mat')
if self.id == 0:
if self.dumpsave == 1:
#pickle.dump( results, open( filepath, "wb" ) ) # gzip.GzipFile
print "- Saving", filepath
f = h5py.File(filepath, 'w')
f.create_dataset('time', data=time, compression='gzip', shuffle=True)
f.create_dataset('voltage', data=np.array(voltage), compression='gzip', shuffle=True)
f.create_dataset('current', data=current, compression='gzip', shuffle=True)
f.create_dataset('freq_times', data=freq_times, compression='gzip', shuffle=True)
#f.create_dataset('t_all_vec_vec', data=np.array(t_all_vec_vec), compression='lzf', shuffle=True)
#f.create_dataset('id_all_vec_vec', data=np.array(id_all_vec_vec), compression='lzf', shuffle=True)
#f.create_dataset('gsyns', data=np.array(gsyns), compression='lzf', shuffle=True)
for i in range(len(self.N)):
subgroup = f.create_group("cell" + str(i))
subgroup.create_dataset('t_all_vec_vec', data=t_all_vec_vec[i], compression='gzip', shuffle=True)
subgroup.create_dataset('id_all_vec_vec', data=id_all_vec_vec[i], compression='gzip', shuffle=True)
subgroup.create_dataset('g', data=gsyns[i], compression='gzip', shuffle=True)
#for j in range(len(gsyns[i])):
# subsubgroup = subgroup.create_group("gsyn" + str(j))
# subsubgroup.create_dataset('g', data=gsyns[i][j], compression='lzf', shuffle=True)
f.close()
print "- Save finished"
#filename = slugify(simprop)
#syn_grc = np.array(gsyns[0])
#import scipy
#from scipy import io
#print "Saving .mat"
#data = {}
#data['syn_grc'] = syn_grc[:,int(t_analysis_delay/self.bin_width):int(t_analysis_stop/self.bin_width)]
#data['time'] = freq_times[int(t_analysis_delay/self.bin_width):int(t_analysis_stop/self.bin_width)]-stim_start
#scipy.io.savemat('./figs/' + filename + '.mat',data)
else:
if self.id == 0:
#results = pickle.load( open( filepath, "rb" ) ) #gzip.GzipFile
f = h5py.File(filepath, 'r')
time = np.array(f['time'])
voltage = np.array(f['voltage'])
current = np.array(f['current'])
freq_times = np.array(f['freq_times'])
for i in range(len(self.N)):
t_all_vec_vec.append(np.array(f['/cell' + str(i) + '/t_all_vec_vec']))
id_all_vec_vec.append(np.array(f['/cell' + str(i) + '/id_all_vec_vec']))
gsyns.append(np.array(f['/cell' + str(i) + '/g']))
#gsyns.append([])
#for j in range(self.N[i]):
# gsyns[i].append(np.array(f['/cell' + str(i) + '/gsyn' + str(j) + '/g' ]))
f.close()
return time, voltage, current, t_all_vec_vec, id_all_vec_vec, gsyns, freq_times, w_mat, winh_mat
def delall(self):
if self.use_mpi:
self.pc.gid_clear()
print "- clearing gids"
else:
pass
#h.topology()
#for sec in h.allsec():
# print "- deleting section:", sec.name()
# #h("%s{delete_section()}"%sec.name())
# sec.push()
# h.delete_section()
#h.topology()
for n in range(self.n_celltypes):
for m in self.cells[n]:
m.destroy()
del m
del self.cells
del self.nc_vecstim
del self.netcons
del self.nclist
print h.topology()
def delrerun(self):
del self.nc_vecstim
del self.netcons
del self.nclist
del self.vecstim
del self.spike_vec
del self.ST_stims
del self.PF_stims
self.netcons = []
self.nclist = []
self.nc_vecstim = []
self.vecstim = []
self.spike_vec = []
self.ST_stims = []
self.PF_stims = []
self.t_vec = []
self.id_vec = []
self.rec_v = []
for n in range(self.n_celltypes):
if self.use_mpi:
self.t_vec.append(h.Vector()) # np.array([0])
self.id_vec.append(h.Vector()) # np.array([-1], dtype=int)
else:
self.t_vec.append([])
self.rec_v.append(h.Vector())
for cell in self.cells[n]:
self.t_vec[n].append(h.Vector())
cell.nc_spike.record(self.t_vec[n][-1])
self.flucts = [] # Fluctuating inputs on this host
self.noises = [] # Random number generators on this host
self.plays = [] # Play inputs on this host
self.rec_is = []
self.trains = []
self.ic_holds = []
self.i_holdrs = []
self.i_holds = []
self.ic_starts = []
self.vc_starts = []
self.ic_steps = []
self.tvecs = []
self.ivecs = []
self.noises = []
self.record_syn = []
self.id_all_vec_input = []
self.t_all_vec_input = []
self.syn_ex_dist = []
self.syn_inh_dist = []
# test code
if __name__ == '__main__':
# mpiexec -f ~/machinefile -enable-x -n 96 python Population.py --noplot
from Stimulation import *
from Plotter import *
from Stimhelp import *
from cells.IfCell import *
import scipy
from scipy import io
dt = 0.1*ms
dt = 0.025*ms
do_run = 1
if results.norun: # do not run again use pickled files!
print "- Not running, using saved files"
do_run = 0
do = np.array(["transfer"])
opts = np.array(["if_cnoise", "grc_cnoise"]) #ssine
#opts = np.array(["if_cnoise"]) #ssine
#opts = np.array(["if_recon"]) #ssine
opts = np.array(["if_syn_CFvec"])
#opts = np.array(["prk_cnoise"])
opts = np.array(["if_cnoise", "if_ssine"]) #ssine
opts = np.array(["if_ssine"]) #ssine
opts = np.array(["grc_cnoise_addn_cn_", "grc_cnoise_cn_", "grc_cnoise_addn_cn_a01"])
opts = np.array(["grc_cnoise_addn100_cn_", "grc_cnoise_addn_cn_", "grc_cnoise_cn_"])
opts = np.array(["grc_cnoise_addn100_cn_"])
opts = np.array(["grc_cnoise_addn100_"])
opts = np.array(["grc_cnoise_addn_cn_"])
#opts = np.array(["grc_cnoise"])
#opts = np.array(["grc_cnoise_cn", "grc_cnoise_addn_cn"])
#opts = np.array(["if_cnoise_addn", "if_cnoise"])
do = np.array(["timeconst"])
#do = np.array(["transfer"])
#opts = np.array(["grc_cnoise_syn"])
#opts = np.array(["grc_recon_syn"])
#do = np.array(["prk_test"])
if "prk_test" in do:
import multiprocessing
from Purkinje import Purkinje
cell = Purkinje()
# set up recording
# Time
rec_t = h.Vector()
rec_t.record(h._ref_t)
# Voltage
rec_v = h.Vector()
rec_v.record(cell.soma(0.5)._ref_v)
tstop = 500
v_init = -60
stim = h.IClamp(cell.soma(0.5))
stim.amp = 0.0/nA
stim.delay = 1
stim.dur = 1000
cpu = multiprocessing.cpu_count()
h.load_file("parcom.hoc")
p = h.ParallelComputeTool()
p.change_nthread(cpu,1)
p.multisplit(1)
print 'cpus:', cpu
h.load_file("stdrun.hoc")
h.celsius = 37
h.init()
h.tstop = tstop
dt = 0.025 # ms
h.dt = dt
h.steps_per_ms = 1 / dt
h.v_init = v_init
h.finitialize()
h.run()
t1 = np.array(rec_t)
voltage = np.array(rec_v)
s, spike_times = get_spikes(voltage, -20, t1)
print 1000/diff( spike_times)
plt.figure()
plt.subplot(2,1,1)
plt.plot(t1, voltage)
plt.show()
if "transfer" in do:
# SET DEFAULT VALUES FOR THIS PLOT
fig_size = [11.7, 8.3]
params = {'backend': 'ps', 'axes.labelsize': 9, 'axes.linewidth' : 0.5, 'title.fontsize': 8, 'text.fontsize': 9,
'legend.borderpad': 0.2, 'legend.fontsize': 8, 'legend.linewidth': 0.1, 'legend.loc': 'best', # 'lower right'
'legend.ncol': 4, 'xtick.labelsize': 8, 'ytick.labelsize': 8, 'text.usetex': False, 'figure.figsize': fig_size}
rcParams.update(params)
freq_used0 = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 35, 40, 45, 50, 55, 60, 65, 70, 80, 100, 1000])*Hz
#freq_used0 = np.concatenate((arange(0.1, 1, 0.1), arange(1, 501, 1) ))
freq_used0 = np.array([1, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 200, 400, 600, 800, 1000])
SNR = None
NI = None
VAF = None
t_stim = 1000*s # only for cnoise
opt_plot = np.array(["only_mag","normalize", "dB"]) #
#opt_plot = np.array(["normalize", "dB"]) #
color_vec = (np.array(["Red", "Blue", "HotPink", "Indigo"]), np.array(["Blue", "Orange", "HotPink", "Indigo"]))
#color=cm.jet(1.*i/x)
method_interpol = np.array(['bin','syn'])
method_interpol = np.array(['bin'])
for i, o in enumerate(opts):
dt = 0.025*ms
bin_width = 5*ms
bin_width = dt
jitter = 0*ms
n_syn_ex = [0]
g_syn_ex = [1]
noise_syn = 0
inh_hold = 0
n_syn_inh = [0]
g_syn_inh = [1]
tau1_ex = 0
tau2_ex = 10*ms
tau1_inh = 0
tau2_inh = 100*ms
cutf = 20
sexp = -1
cutf = 0
sexp = 0
ihold = [10]
amod = 0.1 # relative value
give_freq = True
anoise = [0]
fluct_tau = 0*ms
N = [100]
amp = 0 # absolute value
fluct_s = [0] # absolute value 0.0008
ihold_sigma = [0] # 0.01 absolute value
CF_var = [[5,10,20]]
CF_var = False
syn_tau1 = 5*ms
syn_tau2 = 5*ms
do_csd = 1
if "if" in o:
do_csd = 1
color_vec = (np.array(["Blue"]), np.array(["Blue"]))
#color_vec = (np.array(["Red"]), np.array(["Red"]))
cellimport = []
celltype = ["IfCell"]
#cell_exe = ["cell = IfCell()"]
#cell_exe = ["cell = IfCell(e = -70*mV, thresh = -69*mV, vrefrac = -70*mV)"]
#cell_exe = ["cell = IfCell(e = 0*mV, thresh = 1*mV, vrefrac = 0*mV)"]
# Brunel
#cell_exe = ["cell = IfCell(C = 0.0005 *uF, R = 40*MOhm, e = -70*mV, thresh = -50*mV, vrefrac = -56*mV); cell.add_resonance(tau_r = 100*ms, gr = 0.025*uS)"]
#cell_exe = ["cell = IfCell(C = 0.0001*uF, R = 40*MOhm, sigma_C = 0.2, sigma_R = 0.2)"]
#cell_exe = ["cell = IfCell(C = 0.0001*uF, R = 40*MOhm)"] # tau = 4 ms
#cell_exe = ["cell = IfCell(C = 0.0001*uF, R = 40*MOhm, s_reset_noise = 0*mV)"] # tau = 4 ms
#GrC resting: 737 MOhm, 2.985e-06 uF tau: 0.0022 s
#GrC transfer fit: tau: 0.027 s => with 2.985e-06 uF, R = 0.027/2.985e-12 = 9045 MOhm
#cell_exe = ["cell = IfCell(C = 2.985e-06*uF, R = 9045*MOhm)"]
thresh = -41.8
R = 5227*MOhm
#tau_passive = 3e-06*5227 = 15.7ms
cell_exe = ["cell = IfCell(C = 3.0e-06*uF, R = " + str(R) + ", e = -71.5*mV, thresh =" + str(thresh) + ", vrefrac = -71.5*mV)"]
prefix = "if_tf"
istart = 0
istop = 0.01
di = 0.00001
syn_tau1 = 10*ms
syn_tau2 = 10*ms
# Indirect
give_freq = True
ihold = [40]
amod = 1 # relative value
anoise = [0]
fluct_tau = 0*ms
#anoise = 0.1
#fluct_tau = 100*ms
# # Direct
# give_freq = False
# ihold = [0.00569223341176]
# amod = None
# amp = 7.31353725e-06
#
# anoise = None
# fluct_s = [3.65676863e-06]
# fluct_tau = 0*ms
#
# # Low CF, No low noise
# N = [10000]
# give_freq = False
# ihold = [0.004]
# ihold_sigma = [0.1/2] # 0.1/2 0.01 realtive value
# amod = None
# amp = 0.0021
#
# anoise = None
# fluct_s = [0.00] # .005
# fluct_tau = 0*ms
# # Low CF, With low noise
# N = [10000]
# give_freq = False
# ihold = [0.002]
# ihold_sigma = [0.1/2] # 0.1/2 0.01 realtive value
# amod = None
# amp = 0.001
#
# anoise = None
# fluct_s = [0.002] # .005
# fluct_tau = 100*ms
if "resif" in o:
do_csd = 1
color_vec = (np.array(["Blue"]), np.array(["Blue"]))
#color_vec = (np.array(["Red"]), np.array(["Red"]))
cellimport = []
celltype = ["IfCell"]
gr = 5.56e-05*uS
tau_r = 19.6*ms
R = 5227*MOhm
delta_t = 4.85*ms
thresh = (0.00568*nA * R) - 71.5*mV #
thresh = -41.8
cellimport = []
celltype = "IfCell"
cell_exe = "cell = IfCell(C = 3e-06*uF, R = " + str(R) + ", e = -71.5*mV, thresh =" + str(thresh) + ", vrefrac = -71.5*mV, dgk =" + str(gr) + ", egk = -71.5*mV, ctau =" + str(tau_r) + ")"
prefix = "resif_tf"
istart = 0
istop = 0.01
di = 0.00001
syn_tau1 = 10*ms
syn_tau2 = 10*ms
# Indirect
give_freq = True
ihold = [40]
amod = 1 # relative value
anoise = [0]
fluct_tau = 0*ms
dt = 0.1*ms
if "if_syn" in o:
N = [1]
ihold = [40]
amod = 1 # relative value
prefix = "if_syntf"
n_syn_ex = 1
g_syn_ex = 0
noise_syn = 0
fluct_tau = 0*ms
freq_used = np.array([])
tau1_ex=0*ms
tau2_ex=10*ms
anoise = [0]
if "grc" in o:
color_vec = (np.array(["Blue"]), np.array(["Blue"]))
cellimport = ["from GRANULE_Cell import Grc"]
celltype = ["Grc"]
cell_exe = ["cell = Grc(np.array([0.,0.,0.]))"]
prefix = "grc_tf"
istart = 0
istop = 0.1
di = 0.01
syn_tau1 = 10*ms
syn_tau2 = 10*ms
# Indirect
give_freq = True
ihold = [40]
amod = 1 # relative value
anoise = [0]
fluct_tau = 0*ms
#anoise = 0.1
#fluct_tau = 100*ms
# # Direct
# give_freq = False
# ihold = [0.0058021085712642992]
# amod = None
# amp = 7.31353725e-06
#
# anoise = None
# fluct_s = [3.65676863e-06]
# fluct_tau = 0*ms
#
# # Low CF, No low noise
# N = [50]
# give_freq = False
# ihold = [0.0049]
# ihold_sigma = [0.1/2] # 0.1/2 0.01 realtive value
# amod = None
# amp = 0.0021
#
# anoise = None
# fluct_s = [0.00] # .005
# fluct_tau = 0*ms
#
#
# # Low CF, With low noise
# N = [10000]
# give_freq = False
# ihold = [0.003]
# ihold_sigma = [0.1/2] # 0.1/2 0.01 realtive value
# amod = None
# amp = 0.001
#
# anoise = None
# fluct_s = [0.002] # .005
# fluct_tau = 100*ms
use_multisplit = False
use_mpi = True
simstep = 1*s
if "prk" in o:
N = [1]
ihold = [60]
color_vec = (np.array(["Blue"]), np.array(["Blue"]))
cellimport = ["from Purkinje import Purkinje"]
celltype = ["Prk"]
cell_exe = ["cell = Purkinje()"]
prefix = "prk_tf"
temperature = 37
istart = 0
istop = 0.1
di = 0.005
use_multisplit = True
use_mpi = False
t_stim = 5*s # only for cnoise
simstep = 1*s
if "grc_syn" in o:
N = [1]
ihold = [125]
amod = 1 # relative value
prefix = "grc_syntf"
cutf = 20
sexp = -1
cutf = 0
sexp = 0
n_syn_ex = 1
g_syn_ex = -1
noise_syn = 1
n_syn_inh = -1
inh_hold = 0
g_syn_inh = 0
fluct_tau = 0*ms
freq_used = np.array([])
anoise = 0
if "_addn" in o:
anoise = [6] # RESPONSIBLE FOR FILTERING EFFECT!!!
fluct_tau = 1*ms
prefix = prefix + "_addn"
color_vec = (np.array(["Red"]), np.array(["Red"]))
if "_addn100" in o:
anoise = [2] # RESPONSIBLE FOR FILTERING EFFECT!!!
fluct_tau = 100*ms
prefix = prefix + "100"
color_vec = (np.array(["Green"]), np.array(["Green"]))
if "_cn_" in o:
cutf = 20
sexp = -1
prefix = prefix + "_cn"
if "_a01" in o:
amod=0.1
prefix = prefix + "_a01"
plt.figure(i)
pickle_prefix = "Population.py_" + prefix
#comm = MPI.COMM_WORLD
#comm.Barrier() # wait for other nodes
pop = Population(cellimport = cellimport, celltype = celltype, cell_exe = cell_exe, N = N, temperature = 37, ihold = ihold, ihold_sigma = ihold_sigma, amp = amp, amod = amod, give_freq = give_freq, do_run = do_run, pickle_prefix = pickle_prefix, istart = istart, istop = istop, di = di, dt = dt)
pop.bin_width = bin_width
pop.jitter = jitter
pop.anoise = anoise
pop.fluct_s = fluct_s
pop.fluct_tau = fluct_tau
pop.method_interpol = method_interpol
pop.no_fmean = False
pop.CF_var = CF_var
pop.tau1_ex=tau1_ex
pop.tau2_ex=tau2_ex
pop.tau1_inh=tau1_inh
pop.tau2_inh=tau2_inh
pop.n_syn_ex = n_syn_ex
pop.g_syn_ex = g_syn_ex
pop.noise_syn = noise_syn
pop.inh_hold = inh_hold
pop.n_syn_inh = n_syn_inh
pop.g_syn_inh = g_syn_inh
pop.force_run = False
pop.use_multisplit = use_multisplit
pop.use_mpi = use_mpi
pop.simstep = simstep
pop.use_local_dt = False
pop.syn_tau1 = syn_tau1
pop.syn_tau2 = syn_tau2
pop.plot_input = False
if n_syn_inh == -1:
pop.connect_gfluct(g_i0=g_syn_inh)
#pop.test_mod(n_syn_ex = n_syn_ex, g_syn_ex = g_syn_ex, noise_syn = noise_syn, inh_hold = inh_hold, n_syn_inh = n_syn_inh, g_syn_inh = g_syn_inh, do_plot = True)
if "ssine" in o:
pop.color_vec = color_vec
#pop.color_vec = (np.array(["Red", "Orange", "HotPink", "Indigo"]), np.array(["Red", "Orange", "HotPink", "Indigo"]))
pop.fun_plot(currlabel = "control", dowhat = "ssine", freq_used = freq_used0, opt_plot = opt_plot)
pop.save_plot(directory = "./figs/dump/")
if "cnoise" in o:
freq_used = np.array([])
pop.color_vec = color_vec
#pop.color_vec = (np.array(["Blue", "Green", "DimGray", "DarkGoldenRod"]), np.array(["Blue", "Green", "DimGray", "DarkGoldenRod"]))
pop.fun_plot(currlabel = "control", dowhat = "cnoise", t_stim = t_stim, cutf = cutf, sexp = sexp, t_qual = 0, opt_plot = opt_plot, freq_used = freq_used, do_csd = do_csd)
pop.save_plot(directory = "./figs/dump/")
if "recon" in o:
pop.color_vec = color_vec
#VAF, SNR, ax, tk, K_mat_old = pop.fun_plot(currlabel = "control", dowhat = "cnoise", t_stim = t_stim, cutf = cutf, sexp = sexp, t_qual = 0, opt_plot = opt_plot, n_syn_ex = n_syn_ex, g_syn_ex = g_syn_ex, noise_syn = noise_syn, inh_hold = inh_hold, n_syn_inh = n_syn_inh, g_syn_inh = g_syn_inh, SNR=0, freq_used = freq_used)
# RECONSTRUCT!
freq_used = np.array([9, 47, 111, 1000])*Hz
t_stim = 10*s
tk = arange(0,0.8192*2,pop.dt)
K_mat_old = zeros((len(method_interpol),len(tk)), dtype=complex)
if pop.id == 0:
sigma = 0.1e-3
a=0.1
t0 = tk[floor(len(tk)/2)]
K_mat_old[0] = gauss_func(tk, a, t0, sigma)
K_mat_old = np.array([])
results = pop.fun_cnoise_Stim(t_stim = t_stim, cutf = cutf, sexp = sexp, t_qual = 5, n_syn_ex = n_syn_ex, g_syn_ex = g_syn_ex, noise_syn = noise_syn, inh_hold = inh_hold, n_syn_inh = n_syn_inh, g_syn_inh = g_syn_inh, freq_used = freq_used, K_mat_old = K_mat_old, seed = 311)
freq_used, amp_vec, mag, pha, ca, voltage, current, t1 = results.get('freq_used'), results.get('amp'), results.get('mag'), results.get('pha'), results.get('ca'), results.get('voltage'), results.get('current'), results.get('t1')
freq_times, spike_freq, fmean, method_interpol, SNR, VAF, Qual = results.get('freq_times'), results.get('spike_freq'), results.get('fmean'), results.get('method_interpol'), results.get('SNR'), results.get('VAF'), results.get('Qual')
stim, resp_mat, stim_re_mat = results.get('stim'), results.get('resp_mat'), results.get('stim_re_mat')
if pop.id == 0:
plt.figure('Reconstruct')
axR0 = plt.subplot(4,1,1)
axR1 = plt.subplot(4,1,2)
axR2 = plt.subplot(4,1,3)
axR3 = plt.subplot(4,1,4)
axR0.plot(np.arange(len(stim))*pop.dt, resp_mat[0,:])
axR0.axis(xmin=0.9, xmax=1)
#axR0.plot(t1, voltage[0])
axR1.plot(np.arange(len(stim))*pop.dt, stim, 'b')
axR1.axis(xmin=0.9, xmax=1)
axR2.plot(np.arange(len(stim))*pop.dt, stim_re_mat[0,:], 'r')
axR2.axis(xmin=0.9, xmax=1)
axR3.plot(tk, K_mat_old[0])
plt.savefig("./figs/dump/Reconstruct.pdf", dpi = 300, transparent=True) # save it
pop = None
plt.show()
if "timeconst" in do:
from lmfit import minimize, Parameters
# SET DEFAULT VALUES FOR THIS PLOT
fig_size = [11.7, 8.3]
params = {'backend': 'ps', 'axes.labelsize': 9, 'axes.linewidth' : 0.5, 'title.fontsize': 8, 'text.fontsize': 9,
'legend.borderpad': 0.2, 'legend.fontsize': 8, 'legend.linewidth': 0.1, 'legend.loc': 'best', # 'lower right'
'legend.ncol': 4, 'xtick.labelsize': 8, 'ytick.labelsize': 8, 'text.usetex': False, 'figure.figsize': fig_size}
rcParams.update(params)
dt = 0.025*ms
prefix = "timeconst"
pickle_prefix = "Population.py_" + prefix
stimtype = "inh_50ms_20ms"
if stimtype == "ex_20ms":
trun = 2.9
tstart = 1.8
tstop = 2.7
celltype = ["IfCell"]
cell_exe = ["cell = IfCell(C = 0.0001*uF, R = 200*MOhm)"]
N = [5000]
pop = Population(celltype = celltype, cell_exe = cell_exe, N = N, temperature = 0, do_run = do_run, pickle_prefix = pickle_prefix, dt = dt)
pop.method_interpol = np.array(["bin", "syn"])
pop.method_interpol = np.array(["bin"])
modulation_vec = pop.set_PulseStim(start_time=[100*ms], dur=[3000*ms], steadyf=[100*Hz], pulsef=[150*Hz], pulse_start=[2000*ms], pulse_len=[500*ms], weight0=[1*nS], tau01=[0*ms], tau02=[20*ms], weight1=[0*nS], tau11=[0*ms], tau12=[1*ms])
params = Parameters()
params.add('amp', value=0.1)
params.add('shift', value=10)
params.add('tau1', value=1, vary=False) # alpha!
params.add('tau2', value=20*ms)
if stimtype == "ex_gr":
trun = 6.9
tstart = 4.8
tstop = 6.5
cellimport = ["from GRANULE_Cell import Grc"]
celltype = ["Grc"]
cell_exe = ["cell = Grc(np.array([0.,0.,0.]))"]
N = [4096*10]
pop = Population(cellimport = cellimport, celltype = celltype, cell_exe = cell_exe, N = N, temperature = 37, do_run = do_run, pickle_prefix = pickle_prefix, dt = dt)
pop.method_interpol = np.array(["bin", "syn"])
pop.method_interpol = np.array(["bin"])
modulation_vec = pop.set_PulseStim(start_time=[100*ms], dur=[7000*ms], steadyf=[20*Hz], pulsef=[30*Hz], pulse_start=[5000*ms], pulse_len=[500*ms])
params = Parameters()
params.add('amp', value=0.1)
params.add('shift', value=10)
params.add('tau1', value=1, vary=False) # alpha!
params.add('tau2', value=20*ms)
if stimtype == "inh_50ms_20ms":
trun = 2.9
tstart = 1.8
tstop = 2.7
celltype = ["IfCell", "IfCell"]
cell_exe = ["cell = IfCell()", "cell = IfCell()"]
N = [10000,10000]
pop = Population(celltype = celltype, cell_exe = cell_exe, N = N, temperature = 0, do_run = do_run, pickle_prefix = pickle_prefix, dt = dt)
pop.method_interpol = np.array(["bin", "syn"])
pop.method_interpol = np.array(["bin"])
modulation_vec = pop.set_PulseStim(start_time=[100*ms,100*ms], dur=[3000*ms,3000*ms], steadyf=[100*Hz,50*Hz], pulsef=[100*Hz,80*Hz], pulse_start=[2000*ms,2000*ms], pulse_len=[500*ms,500*ms], weight0=[1*nS,1*nS], tau01=[1*ms,1*ms], tau02=[20*ms,20*ms], weight1=[0,0], tau11=[0*ms,0*ms], tau12=[1*ms,1*ms])
pop.connect_cells(conntype='inh', weight=0.001, tau=50)
params = Parameters()
params.add('amp', value=-0.1)
params.add('shift', value=10)
params.add('tau1', value=1, vary=False) # alpha!
params.add('tau2', value=20*ms)
if stimtype == "inh_gr":
trun = 9.9
tstart = 4.8
tstop = 8
cellimport = ["from GRANULE_Cell import Grc", "from templates.golgi.Golgi_template import Goc"]
celltype = ["Grc","Goc_noloop"]
cell_exe = ["cell = Grc(np.array([0.,0.,0.]))","cell = Goc(np.array([0.,0.,0.]))"]
N = [100,4]
#N = [4096, 27]
#N = [4096*5, 27*5]
pop = Population(cellimport = cellimport, celltype = celltype, cell_exe = cell_exe, N = N, temperature = 37, do_run = do_run, pickle_prefix = pickle_prefix, dt = dt)
pop.method_interpol = np.array(["bin", "syn"])
pop.method_interpol = np.array(["bin"])
modulation_vec = pop.set_PulseStim(start_time=[100*ms,100*ms], dur=[9800*ms,9800*ms], steadyf=[60*Hz,10*Hz], pulsef=[60*Hz,22*Hz], pulse_start=[5000*ms,5000*ms], pulse_len=[1500*ms,1500*ms])
pop.connect_cells(conntype='inh_gr', weight = 0.3)
params = Parameters()
params.add('amp', value=-0.1)
params.add('shift', value=10)
params.add('tau1', value=1, vary=False) # alpha!
params.add('tau2', value=20*ms)
if stimtype == "inh_50ms_curr":
trun = 2.9
tstart = 1.8
tstop = 2.8
celltype = ["IfCell", "IfCell"]
cell_exe = ["cell = IfCell()", "cell = IfCell()"]
N = [1000,1000]
give_freq = True
istart = 0
istop = 0.2
di = 0.01
ihold = [100, 50]
ihold_sigma = [0.01, 0.01] # relative sigma
pop = Population(celltype = celltype, cell_exe = cell_exe, N = N, temperature = 0, ihold = ihold, ihold_sigma = ihold_sigma, give_freq = give_freq, do_run = do_run, pickle_prefix = pickle_prefix, istart = istart, istop = istop, di = di, dt = dt)
pop.method_interpol = np.array(["bin", "syn"])
pop.method_interpol = np.array(["bin"])
tstep = 2
tdur = 0.5
istep = [100,100]
current1 = np.concatenate(([ihold[1]*np.ones(round((tstep)/pop.dt)), istep[1]*np.ones(round(tdur/pop.dt)),ihold[1]*np.ones(round((trun-tstep-tdur)/pop.dt)) ]))
pop.set_IStim()
pop.set_IStep(istep = istep, istep_sigma = [0.01,0.01], tstep = tstep, tdur = tdur)
pop.connect_cells(conntype='inh', weight=0.0003, tau=50)
pop.fluct_s = [0.02,0.05]
pop.connect_fluct()
params = Parameters()
params.add('amp', value=-0.1)
params.add('shift', value=10)
params.add('tau1', value=1, vary=False) # alpha!
params.add('tau2', value=20*ms)
if stimtype == "inh_gr_curr":
trun = 9.9
tstart = 4.8
tstop = 8
cellimport = ["from GRANULE_Cell import Grc", "from templates.golgi.Golgi_template import Goc"]
celltype = ["Grc","Goc_noloop"]
cell_exe = ["cell = Grc(np.array([0.,0.,0.]))","cell = Goc(np.array([0.,0.,0.]))"]
N = [100,4]
N = [4096, 27]
N = [4096*10, 27*10]
give_freq = True
# GRC
#istart = 0
#istop = 0.1
#di = 0.01
#GOC
istart = 0
istop = 0.5
di = 0.02
ihold = [100, 10]
ihold_sigma = [0, 0] # relative sigma
pop = Population(cellimport = cellimport, celltype = celltype, cell_exe = cell_exe, N = N, temperature = 37, ihold = ihold, ihold_sigma = ihold_sigma, give_freq = give_freq, do_run = do_run, pickle_prefix = pickle_prefix, istart = istart, istop = istop, di = di, dt = dt)
pop.method_interpol = np.array(["bin", "syn"])
pop.method_interpol = np.array(["bin"])
tstep = 5
tdur = 2
istep = [100,50]
current1 = np.concatenate(([ihold[1]*np.ones(round((tstep)/pop.dt)), istep[1]*np.ones(round(tdur/pop.dt)),ihold[1]*np.ones(round((trun-tstep-tdur)/pop.dt)) ]))
pop.set_IStim()
pop.set_IStep(istep = istep, istep_sigma = [0,0], tstep = tstep, tdur = tdur)
pop.connect_cells(conntype='inh_gr', weight = 0.4)
pop.fluct_s = [0.05,2]
pop.connect_fluct()
params = Parameters()
params.add('amp', value=-0.1)
params.add('shift', value=10)
params.add('tau1', value=1, vary=False) # alpha!
params.add('tau2', value=20*ms)
pop.run_steps(trun)
self.no_fmean = True
results = pop.get()
time, voltage, current, fmean, gsyn = results.get('time'), results.get('voltage'), results.get('current'), results.get('fmean'), results.get('gsyn')
freq_times, spike_freq, t_all_vec_vec, id_all_vec_vec, gsyns = results.get('freq_times'), results.get('spike_freq'), results.get('t_all_vec_vec'), results.get('id_all_vec_vec'), results.get('gsyns')
if pop.id == 0:
bin_width = 1*ms
freq_times = arange(0, time[-1], bin_width)
[num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[0], bins = freq_times)
spike_freq = np.concatenate((zeros(1),num_spikes)) / bin_width / N[0]
if "inh" in stimtype: # generate input current, to complicated to get it out
if "curr" in stimtype:
time1 = np.arange(0, trun, pop.dt)
r_mod = interp(freq_times, time1, current1, left=0, right=0)
[num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[1], bins = freq_times)
spike_freq1 = np.concatenate((zeros(1),num_spikes)) / bin_width / N[1]
else:
r_mod = interp(freq_times, modulation_vec[1][0], modulation_vec[1][1], left=0, right=0)
[num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[1], bins = freq_times)
spike_freq1 = np.concatenate((zeros(1),num_spikes)) / bin_width / N[1]
elif "ex" in stimtype:
r_mod = interp(freq_times, modulation_vec[0][0], modulation_vec[0][1], left=0, right=0)
def modelfun(amp, shift, tau1, tau2, bin_width, r_mod):
tau1 = tau1
tau2 = tau2
t1 = np.arange(0,10*tau2,bin_width)
K = amp*syn_kernel(t1, tau1, tau2)
K = np.concatenate((np.zeros(len(K)-1),K))
t2 = np.arange(0,len(K)*bin_width,bin_width)
model = np.convolve(K, r_mod, mode='same') + shift
return model
def residual(params, r_mod, data=None, bin_width=1*ms, tstart=0, tstop=3):
amp = params['amp'].value
shift = params['shift'].value
tau1 = params['tau1'].value
tau2 = params['tau2'].value
model = modelfun(amp, shift, tau1, tau2, bin_width, r_mod)
return (data[int(tstart/bin_width):int(tstop/bin_width)]-model[int(tstart/bin_width):int(tstop/bin_width)])
result = minimize(residual, params, args=(r_mod, spike_freq, bin_width, tstart, tstop))
print "chisqr: ", result.chisqr
print 'Best-Fit Values:'
for name, par in params.items():
print ' %s = %.4f +/- %.4f ' % (name, par.value, par.stderr)
amp = params['amp'].value
shift = params['shift'].value
tau1 = params['tau1'].value
tau2 = params['tau2'].value
model = modelfun(amp, shift, tau1, tau2, bin_width = bin_width, r_mod = r_mod)
if "ex" in stimtype:
plt.figure(0)
plt.plot(freq_times[int(0.5/bin_width):int(trun/bin_width)], spike_freq[int(0.5/bin_width):int(trun/bin_width)], freq_times[int(0.5/bin_width):int(trun/bin_width)], model[int(0.5/bin_width):int(trun/bin_width)])
plt.figure(1)
plt.plot(time, voltage[0]), freq_times, r_mod, time, current
#plt.figure(100)
#plt.plot(t_all_vec_vec[0],id_all_vec_vec[0],'k|')
#plt.savefig("./figs/dump/taufit_" + str(stimtype) + "_spikes.pdf", dpi = 300) # save it
else:
plt.figure(0)
plt.plot(freq_times[int(0.5/bin_width):int(trun/bin_width)], spike_freq1[int(0.5/bin_width):int(trun/bin_width)], freq_times[int(0.5/bin_width):int(trun/bin_width)], spike_freq[int(0.5/bin_width):int(trun/bin_width)], freq_times[int(0.5/bin_width):int(trun/bin_width)], model[int(0.5/bin_width):int(trun/bin_width)])
plt.figure(1)
plt.plot(time, voltage[0], time, voltage[1], freq_times, r_mod, time, current)
plt.figure(100)
#plt.plot(t_all_vec_vec[0],id_all_vec_vec[0],'k|')
#plt.plot(t_all_vec_vec[1],id_all_vec_vec[1],'b|')
#plt.savefig("./figs/dump/taufit_" + str(stimtype) + "_spikes.pdf", dpi = 300) # save it
plt.figure(0)
plt.title('Fit: ' + str(stimtype) + ', tau1=' + str(tau1) + ' tau2=' + str(tau2))
plt.savefig("./figs/dump/taufit_" + str(stimtype) + "_rate.png", dpi = 300) # save it
plt.figure(1)
plt.savefig("./figs/dump/taufit_" + str(stimtype) + "_voltage.png", dpi = 300) # save it
plt.show()
|
normal
|
{
"blob_id": "06ea697989f8f9ac539559690dcfd7aa73151e0f",
"index": 2700,
"step-1": "# -*- coding: utf-8 -*-\n\"\"\"\n@author: chris\n\nModified from THOMAS MCTAVISH (2010-11-04).\n\nmpiexec -f ~/machinefile -enable-x -n 96 python Population.py --noplot\n\"\"\"\n\nfrom __future__ import with_statement\nfrom __future__ import division\n\nimport sys\nsys.path.append('../NET/sheff/weasel/')\nsys.path.append('../NET/sheffprk/template/')\n\nimport os\n\n#use_pc = True\n\nimport sys\nargv = sys.argv\n\nif \"-python\" in argv:\n use_pc = True\nelse:\n use_pc = False \n\nif use_pc == True:\n \n from neuron import h\n pc = h.ParallelContext()\n rank = int(pc.id())\n nhost = pc.nhost()\n \nelse:\n \n from mpi4py import MPI\n from neuron import h\n rank = MPI.COMM_WORLD.rank\n\n#print sys.version\n\nif __name__ == '__main__':\n \n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-o', action='store', dest='opt')\n parser.add_argument('--noplot', action='store_true')\n parser.add_argument('--norun', action='store_true')\n parser.add_argument('--noconst', action='store_true')\n parser.add_argument('--noqual', action='store_true')\n pars, unknown = parser.parse_known_args(['-o','--noplot','--norun','--noconst','--noqual'])\n\nif __name__ == '__main__':\n \n import matplotlib\n if rank == 0: \n matplotlib.use('Tkagg', warn=True) \n else: \n matplotlib.use('Agg', warn=True) \n\nif __name__ == '__main__':\n \n do_plot = 1\n if results.noplot: # do not plot to windows\n matplotlib.use('Agg', warn=True)\n if rank == 0: print \"- No plotting\"\n do_plot = 0\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\n\nimport random as rnd\nimport neuronpy.util.spiketrain\n\n#set_printoptions(threshold='nan')\n\nfrom Stimulation import *\nfrom Stimhelp import *\nfrom units import *\n\nfrom cells.PassiveCell import *\n\nfrom itertools import izip\n\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n \nimport gzip\nimport h5py\n\nfrom templates.synapse.synapse import Synapse\nfrom synapsepfpurk import Synapse as Synapse2\nif use_pc is False: import mdp\n \nimport time as ttime\nfrom scipy.optimize import fmin, leastsq\n\nfrom NeuroTools import stgen, signals\n\nimport md5\n\n#from guppy import hpy\n#hpy = hpy()\n \n\nclass Population:\n \"\"\"\n A population of N cells\n \"\"\"\n \n def __init__(self, cellimport = [], celltype = None, N = [10], temperature = 6.3, cell_exe = 0, ihold = [0*nA], ihold_sigma = [0*nA], amp = [0*nA], amod = [None], anoise = [None], give_freq = False, do_run = 1, pickle_prefix = \"default\", istart = 0, istop = 0.07, di = 0.001, dt = 0.025*ms, use_mpi = True, use_pc = False):\n \"\"\"\n :param N: Number of cells.\n :param fluct_m: \n :param fluct_s: \n :param fluct_tau: \n \"\"\"\n\n self.use_pc = use_pc\n \n if type(celltype) is not list: celltype = [celltype] #convert to list if it is not given as one\n self.celltype = celltype\n \n if type(cell_exe) is not list: cell_exe = [cell_exe] #convert to list if it is not given as one\n self.cell_exe = cell_exe \n \n if cellimport is not None:\n if cellimport == []: \n for n in range(len(celltype)):\n cellimport.append(\"from cells.\" + self.celltype[n] + \" import *\")\n self.cellimport = cellimport\n \n if type(N) is not list: N = [N]\n self.N = N # Total number of cells in the net\n \n self.n_celltypes = len(self.N)\n self.a_celltype = [0] # celltype to analyse\n \n self.factor_celltype = [1]*self.n_celltypes\n \n self.set_init(ihold, ihold_sigma, amp, amod)\n \n self.CF_var = False\n\n self.inh_hold_sigma = [0]\n self.intr_hold_sigma = [0]\n \n #self.sigma_inh_hold = 0\n #self.sigma_ihold = 0\n \n \n if type(anoise) is not list: anoise = [anoise]*self.n_celltypes\n if len(anoise) < self.n_celltypes: anoise = [anoise[0]]*self.n_celltypes\n self.anoise = anoise # RUN self.set_i()\n \n self.give_freq = give_freq # RUN self.set_i()\n \n self.temperature = temperature\n \n self.gid_count = 0\n self.gidlist = [] # List of global identifiers on this host\n self.global_gidlist = [] # List of global identifiers\n self.cells = [] # Cells on this host\n \n self.t_vec = []\n self.id_vec = []\n self.rec_v = []\n \n for n in range(self.n_celltypes):\n if use_mpi:\n self.t_vec.append(h.Vector()) # np.array([0])\n self.id_vec.append(h.Vector()) # np.array([-1], dtype=int)\n else:\n self.t_vec.append([])\n \n self.rec_v.append(h.Vector())\n \n #self.t_vec = h.Vector(np.array([0])) # Spike time of all cells on this host\n #self.id_vec = h.Vector(np.array([-1])) # Ids of spike times on this host\n \n self.flucts = [] # Fluctuating inputs on this host\n self.fluct_m = 0 # [nA]\n self.fluct_s = [0] # [nA]\n self.fluct_tau = 0*ms # [ms]\n \n self.noises = [] # Random number generators on this host\n self.plays = [] # Play inputs on this host\n self.rec_is = []\n \n self.trains = []\n self.vecstim = []\n self.nc_vecstim = []\n self.spike_vec = [] \n \n self.syn_tau1 = 5*ms # Synapse of virtual target neuron\n self.syn_tau2 = 5*ms # Synapse of virtual target neuron\n self.tmax = 10*sec # maximum length of plot that should be plotted!!\n \n self.nc_delay = 0 #500*ms # only important if syn_output is used, not used currently\n self.dt = dt\n self.bin_width = dt\n self.jitter = 0*ms\n self.delta_t = 0*ms\n \n self.istart = istart\n self.istop = istop\n self.di = di\n \n self.ic_holds = []\n self.i_holdrs = []\n self.i_holds = []\n self.ic_starts = [] \n self.vc_starts = []\n self.ic_steps = []\n \n self.rec_step = []\n \n self.tvecs = []\n self.ivecs = [] \n\n self.noises = []\n \n self.record_syn = []\n self.id_all_vec_input = []\n self.t_all_vec_input = []\n \n if len(self.N) == len(self.cell_exe) == len(self.celltype):\n pass\n else:\n raise ValueError('N, cell_exe, celltype do NOT have equal length!')\n\n self.use_mpi = use_mpi\n self.use_pc = use_pc\n \n if self.use_mpi:\n \n #### Make a new ParallelContext object\n self.pc = h.ParallelContext()\n self.id = self.pc.id()\n self.nhost = int(self.pc.nhost())\n \n if self.use_pc == False:\n\n s = \"mpi4py thinks I am %d of %d on %s, NEURON thinks I am %d of %d\\n\"\n processorname = MPI.Get_processor_name()\n self.comm = MPI.COMM_WORLD\n \n if self.id == 0:\n print s % (self.comm.rank, self.comm.size, processorname, self.id, self.nhost)\n \n else:\n \n s = \"NEURON thinks I am %d of %d\\n\"\n if self.id == 0:\n print s % (self.id, self.nhost)\n \n self.barrier()\n \n else:\n self.id = 0\n self.nhost = 1\n \n self.do_run = do_run\n\n self.first_run = True\n \n self.set_numcells() # Build the portion of cells on this host. \n \n self.pickle_prefix = pickle_prefix\n \n # plot options\n self.ymax = 0 \n self.ax = None \n self.linewidth = 1.5\n self.color_vec = None \n self.alpha = 0.8 \n self.method_interpol = np.array(['bin','syn']) \n self.dumpsave = 1 \n self.called_syn_out_all = False\n self.no_fmean=False\n \n self.tau1_ex=[0*ms]*self.n_celltypes\n self.tau2_ex=[10*ms]*self.n_celltypes\n self.tau1_inh=[0*ms]*self.n_celltypes\n self.tau2_inh=[100*ms]*self.n_celltypes\n \n self.n_syn_ex = [0]*self.n_celltypes \n self.g_syn_ex = [1]*self.n_celltypes\n self.g_syn_ex_s = [0]*self.n_celltypes\n self.mglufac_ex = [1,0] \n \n self.noise_syn = [0]*self.n_celltypes \n self.noise_syn_tau = [0*ms]*self.n_celltypes\n self.noise_syn_inh = [0]*self.n_celltypes\n self.noise_syn_tau_inh = [0*ms]*self.n_celltypes\n \n self.noise_a = [1e9]*self.n_celltypes\n self.noise_a_inh = [1e9]*self.n_celltypes\n \n self.inh_hold = [0]*self.n_celltypes\n self.n_syn_inh = [0]*self.n_celltypes\n self.g_syn_inh = [1]*self.n_celltypes\n self.g_syn_inh_s = [0]*self.n_celltypes\n self.intr_hold = [0]*self.n_celltypes\n self.n_syn_intr = [0]*self.n_celltypes\n self.g_syn_intr = [0]*self.n_celltypes\n self.syn_max_mf = [1]*self.n_celltypes # possible mossy fibres per synapse\n self.syn_max_inh = [1]*self.n_celltypes # possible Golgi cells per synapse\n self.syn_max_intr = [1]*self.n_celltypes # possible Intruding cells per synapse\n \n \n self.seed = 50\n \n self.force_run = False\n self.give_psd = False\n self.do_if = True\n \n self.fluct_g_e0 = []\n self.fluct_g_i0 = []\n self.fluct_std_e = [] \n self.fluct_std_i = [] \n self.fluct_tau_e = [] \n self.fluct_tau_i = [] \n \n self.adjinh = True # adjust inhibition to get CFo instead of g_ex\n self.adjfinh = True # adjust frequnecy of inhibition to get CFo instead of g_ex\n \n self.syn_ex_dist = []\n self.syn_inh_dist = []\n \n self.stdp_used = False\n self.xmax = 20\n self.use_multisplit = False\n self.use_local_dt = False\n self.simstep = 0\n self.plot_train = True\n self.inh_delay = 0 # in ms\n self.plot_input = True\n self.delay_baseline = 8\n \n self.tstop_if = 1\n self.gsyn_in_fac = []\n \n self.netcons = [] # keeping track of!\n self.nclist = []\n \n self.ST_stims = []\n self.PF_stims = []\n \n self.data_dir = \"./data\"\n self.minimal_dir = False\n \n\n def set_init(self, ihold, ihold_sigma, amp, amod):\n # important for all methods:\n if type(ihold) is not list: ihold = [ihold] #convert to list if it is not given as one\n self.ihold = ihold\n self.ihold_orig = ihold\n \n if type(amp) is not list: amp = [amp]\n if len(amp) < self.n_celltypes: amp = [amp[0]]*self.n_celltypes\n self.amp = amp \n \n if type(amod) is not list: amod = [amod]*self.n_celltypes\n self.amod = amod # RUN self.set_i()\n \n self.ihold_sigma = ihold_sigma\n \n def barrier(self):\n if self.use_mpi:\n if self.use_pc == True:\n self.pc.barrier()\n else:\n self.comm.Barrier()\n \n def broadcast(self, vec, root = 0, fast = False):\n if self.use_mpi: \n if self.use_pc:\n \n if fast:\n hvec = h.Vector(vec)\n v = self.pc.broadcast(hvec,root)\n vec = np.array(hvec)\n else:\n \n sendlist = [None]*self.nhost\n if self.id == root:\n for i in range(self.nhost):\n sendlist[i] = vec \n getlist = self.pc.py_alltoall(sendlist)\n vec = getlist[root]\n \n else:\n #vec = np.array(vec, dtype=np.float64)\n #self.comm.Bcast([vec, MPI.DOUBLE])\n vec = self.comm.bcast(vec, root=0)\n\n return vec \n \n def set_numcells(self, N = []):\n \"\"\"\n Create, layout, and connect N cells.\n \"\"\"\n self.set_gids(N)\n self.create_cells()\n\n #self.syn_output() # generate synaptic \"output\" in neuron\n #self.connect_cells()\n \n\n def set_gids(self, N = []):\n \"\"\"Set the gidlist on this host.\n Round-robin counting. Each host as an id from 0 to pc.nhost()-1.\n Example:\n if N = 5 cells and nhost() = 3\n node id() = 0 will get cells [0, 3]\n node id() = 1 will get cells [1, 4]\n node id() = 2 will get cells [2] \n \"\"\"\n \n self.gidlist = [] \n \n if N == []:\n N = self.N\n \n # borders where another celltype begins\n self.global_gidlist = []\n self.n_borders = [0]\n for l in range(1,self.n_celltypes+1):\n self.n_borders.append(sum(N[0:l]))\n self.global_gidlist.append(range(self.n_borders[-2], self.n_borders[-1]))\n\n for n in range(self.n_celltypes): # create list in list \n self.gidlist.append([]) \n \n for i in range(int(self.id), sum(N), int(self.nhost)): # loop over all cells\n \n n = np.where((np.array(self.n_borders)-i)>0)[0][0]-1 # find out what cell type this is\n self.gidlist[n].append(i) # put in specific gidlist for that celltype\n \n self.gid_count = self.gid_count + sum(N)\n \n if self.id == 0: print \"nodeid:\" , self.id , \", gidlist:\" , self.gidlist , \", total gids:\" , len(self.global_gidlist) , \", sum(N):\" , sum(N) # check gids of node\n \n \n def del_cells(self):\n if self.cells != []: \n for n in range(self.n_celltypes): \n for m in self.cells[n]:\n print \"deleting cell\", m\n del m \n del self.cells\n self.cells = [] \n if self.use_mpi: self.pc.gid_clear() \n\n\n def create_cells(self):\n \"\"\"\n Create cell objects on this host.\n \"\"\"\n if self.do_run:\n \n self.del_cells()\n \n if self.id == 0: print \"creating cells\"\n \n for n in range(self.n_celltypes): \n self.cells.append([]) # create list in list \n \n #print self.cellimport[n]\n exec self.cellimport[n]\n \n #print self.gidlist\n for i in self.gidlist[n]:\n \n #if \"sigma\" not in self.cell_exe[n]:\n # exec self.cell_exe[n]\n # cell.gid = i # tell cell it's gid!\n # print i\n #else:\n \n if (self.celltype[n] == \"IfCell\") or (self.celltype[n] == \"Grc\"):\n \n # add gid to cell and execute!\n if self.cell_exe[n][-2] == \"(\":\n exec self.cell_exe[n][0:-1] + \"gid=\" + str(i) + \")\"\n else:\n exec self.cell_exe[n][0:-1] + \", gid=\" + str(i) + \")\"\n \n else:\n exec self.cell_exe[n] \n cell.gid = i\n \n self.cells[n].append(cell) # add to (local) list\n \n if self.use_mpi:\n #### Tell this host it has this gid\n #### gids can be any integer, they just need to be unique.\n #### In this simple case, we set the gid to i.\n self.pc.set_gid2node(i, int(self.id))\n self.pc.cell(i, cell.nc_spike) # Associate the cell with this host and gid\n \n ## NOT NECESSARY ANYMORE ##\n #### Means to tell the ParallelContext that this cell is a source.\n #nc = cell.connect_target(None)\n #self.ncs[n].append(nc) \n \n #### Record spikes of this cell\n self.pc.spike_record(i, self.t_vec[n], self.id_vec[n])\n \n #print n, self.cells[n][-1].nc_spike.thresh\n else:\n \n self.t_vec[n].append(h.Vector())\n cell.nc_spike.record(self.t_vec[n][-1]) \n \n\n\n def connect_cells(self, conntype=[], stdp=[], tend=1e9):\n \"\"\"\n Connect cells as specified.\n \"\"\"\n \n if self.do_run:\n \n stdp = stdp[:]\n conntype = conntype[:]\n \n if len(stdp) == 0:\n for i in conntype:\n stdp.append({'wmax':0, 'taupre':0, 'taupost':0, 'apre':0, 'apost':0}) \n else:\n self.stdp_used = True\n \n for i, conn in enumerate(conntype): \n \n typ = conn['type']\n conv = conn['conv']\n src = conn['src']\n tgt = conn['tgt']\n w0 = conn['w']\n var = conn['var']\n tau1 = conn['tau1']\n tau2 = conn['tau2']\n \n if 'mgr2' in conn.keys():\n mgr2 = conn['mgr2']\n mgr2_var = conn['mgr2_var']\n else:\n mgr2 = 0\n mgr2_var = 0\n \n if 'e_inh' in conn.keys(): \n e_inh = conn['e_inh']\n else:\n e_inh = -65\n \n if 'e_ex' in conn.keys(): \n e_ex = conn['e_ex']\n else:\n e_ex = 0\n \n wmax = stdp[i]['wmax']\n taupre = stdp[i]['taupre']\n taupost = stdp[i]['taupost']\n apre = stdp[i]['apre']\n apost = stdp[i]['apost']\n \n # Connect conv cells of celltype src to every cell of celltype tgt\n for ni, i in enumerate(self.cells[tgt]):\n \n rnd.seed(i.gid*10*self.seed)\n \n if conv >= len(self.global_gidlist[src]):\n gids = self.global_gidlist[src]\n if self.id == 0: print \"more or equal conv to len(self.global_gidlist[src])\"\n else:\n gids = rnd.sample(self.global_gidlist[src],conv) \n \n if self.id == 0: print conn['type'], \":\", ni, \":\", gids[0], \"\\n\"\n \n for ng, g in enumerate(gids):\n \n np.random.seed(g*12) \n #np.random.seed(int(g%10+1)*12) \n \n if len(shape(w0))>0: # array is given\n print \"w array is given\"\n \n if len(w0[ng]) == self.N[0]:\n w = w0[ng][ni]\n \n elif (var > 0) and (w0>0):\n w = np.random.normal(w0, w0*var, 1).clip(min=0)\n else:\n w = w0\n \n if (mgr2_var > 0) and (mgr2>0):\n mg = np.random.normal(mgr2, mgr2*mgr2_var, 1).clip(min=0)\n else:\n mg = mgr2\n \n \n #print conn['type'], \":\", i.gid, \":\", g, \", w:\", w, \"\\n\"\n \n if self.celltype[tgt] == 'IfCell':\n \n if typ == 'gogr':\n \n i.whatami = \"grc\"\n i.synlist_inh.append(Synapse('goc', i, i.soma, nrel=0, record_all=0, weight_gmax=w))\n i0 = int(len(i.synlist_inh)-1)\n \n i.nc_inh.append(self.pc.gid_connect(g, i.synlist_inh[i0].input))\n i.nc_inh[-1].delay = 1\n i.nc_inh[-1].weight[0] = 1\n \n if typ == 'grgo':\n \n i.whatami = \"goc\"\n i.synlist.append(Synapse('grc', i, i.soma, syntype = 'D', nrel=0, record_all=0, weight_gmax=w))\n e0 = int(len(i.synlist)-1)\n \n i.nc.append(self.pc.gid_connect(g, i.synlist[e0].input))\n i.nc[-1].delay = 1\n i.nc[-1].weight[0] = 1\n \n if typ == 'grgom':\n \n i.whatami = \"goc\"\n i.synlist.append(Synapse('grc', i, i.soma, syntype = 'DM', nrel=0, record_all=0, weight_gmax=w, mglufac = mg))\n e0 = int(len(i.synlist)-1)\n \n i.nc.append(self.pc.gid_connect(g, i.synlist[e0].input))\n i.nc[-1].delay = 1\n i.nc[-1].weight[0] = 1\n \n \n if typ == 'e2inh':\n \n i.create_synapses(n_inh=1, tau1_inh=tau1, tau2_inh=tau2, e_inh=e_inh, w = w, wmax = wmax, taupre = taupre, taupost = taupost, apre = apre, apost = apost, tend=tend)\n i0 = len(i.synlist_inh)-1\n \n if self.use_mpi:\n if wmax == 0:\n i.pconnect_target(self.pc, source=g, target=i0, syntype='inh', weight=w, delay=1)\n else:\n i.pconnect_target(self.pc, source=g, target=i0, syntype='inh', weight=1, delay=1)\n \n else: \n if wmax == 0:\n i.nc_inh.append(self.cells[1][g-self.N[0]].connect_target(target=i.synlist_inh[i0], weight=w, delay=1))\n else:\n i.nc_inh.append(self.cells[1][g-self.N[0]].connect_target(target=i.synlist_inh[i0], weight=1, delay=1))\n \n if typ == 'e2ex':\n \n i.create_synapses(n_ex = 1, tau1 = tau1, tau2 = tau2, e_ex=e_ex, w = w, wmax = wmax, taupre = taupre, taupost = taupost, apre = apre, apost = apost, tend=tend)\n e0 = len(i.synlist)-1\n \n if self.use_mpi:\n if wmax == 0:\n i.pconnect_target(self.pc, source=g, target=e0, syntype='ex', weight=w, delay=1) \n else:\n i.pconnect_target(self.pc, source=g, target=e0, syntype='ex', weight=1, delay=1) \n \n else: \n if wmax == 0:\n i.nc.append(self.cells[0][g].connect_target(target=i.synlist[e0], weight=w, delay=1))\n else:\n i.nc.append(self.cells[0][g].connect_target(target=i.synlist[e0], weight=1, delay=1))\n \n else: # No IfCell\n \n if typ == 'gogr':\n i.createsyn(ngoc = 1, weight_gmax=w) # multiplication factor\n i0 = len(i.GOC_L)-1 # get number of current synapse!\n i.pconnect(self.pc,g,i0,'goc')\n \n if typ == 'grgo':\n i.createsyn(ngrc = 1, weight_gmax=w) # multiplication factor\n i0 = len(i.GRC_L)-1 # get number of current synapse!\n i.pconnect(self.pc,g,i0,'grc',conduction_speed=0,grc_positions=[1])\n \n if typ == 'grgom':\n #print w, mg\n i.createsyn(ngrcm = 1, weight_gmax=w, mglufac = mg) # multiplication factor\n i0 = len(i.GRC_L)-1 # get number of current synapse!\n i.pconnect(self.pc,g,i0,'grc',conduction_speed=0,grc_positions=[1])\n \n if typ == 'grstl':\n i.createsyn(ngrc = 1, weight_gmax=w) # multiplication factor\n i0 = len(i.GRC_L)-1 # get number of current synapse!\n i.pconnect(self.pc,g,i0,'grc',conduction_speed=0,grc_positions=[1])\n \n \n if 'e2' in typ:\n \n if 'inh' in typ:\n Erev = -65\n elif 'ex' in typ:\n Erev = 0\n \n if tau1 == 0:\n syn = h.ExpSyn(i.soma(0.5))\n syn.tau = tau2/ms\n else: \n if wmax == 0:\n syn = h.Exp2Syn(i.soma(0.5))\n syn.tau1 = tau1/ms\n syn.tau2 = tau2/ms\n \n else: # STDP\n syn = h.stdpE2S(i.soma(0.5))\n syn.tau1 = tau1/ms\n syn.tau2 = tau2/ms\n \n syn.on = 1\n syn.thresh = -20\n \n syn.wmax = wmax\n syn.w = w\n \n syn.taupre = taupre/ms\t\n syn.taupost = taupost/ms\n syn.apre = apre\n syn.apost = apost\n \n syn.e = Erev/mV\n \n if self.celltype[tgt] == 'Grc':\n \n i.GOC_L.append(syn)\n i0 = int(len(i.GOC_L)-1) # get number of current synapse!\n \n i.gocncpc.append(self.pc.gid_connect(g, i.GOC_L[i0]))\n i.gocncpc[-1].delay = 1\n \n if wmax == 0:\n i.gocncpc[-1].weight[0] = w\n else:\n i.gocncpc[-1].weight[0] = 1\n \n elif self.celltype[tgt] == 'Goc':\n \n i.GRC_L.append(syn)\n e0 = int(len(i.GRC_L)-1) # get number of current synapse!\n \n i.pfncpc.append(self.pc.gid_connect(g, i.GRC_L[e0]))\n i.pfncpc[-1].delay = 1\n i.pfncpc[-1].weight[0] = w\n \n if wmax == 0:\n i.pfncpc[-1].weight[0] = w\n else:\n i.pfncpc[-1].weight[0] = 1\n \n #self.rec_s1 = h.Vector()\n #self.rec_s1.record(self.cells[0][0].synlist_inh[0]._ref_g) \n #self.rec_s2 = h.Vector()\n #self.rec_s2.record(self.cells[1][0].synlist_inh[0]._ref_g) \n \n \n def syn_output(self):\n \"\"\"\n Connect cell n to target cell sum(self.N) + 100.\n \"\"\"\n \n if self.id == 0: # create target cell\n\n tgt_gid = self.gid_count\n self.gid_count = self.gid_count + 1 \n \n # Synaptic integrated response\n self.rec_g = h.Vector()\n self.passive_target = PassiveCell()\n if self.use_mpi: self.pc.set_gid2node(tgt_gid, 0) # Tell this host it has this gid\n \n syn = self.passive_target.create_synapses(tau1 = self.syn_tau1, tau2 = self.syn_tau2) # if tau1=tau2: alpha synapse!\n \n for i in range(self.n_borders[self.a_celltype[0]],self.n_borders[self.a_celltype[0]+1]): # take all cells, corresponding to self.a_celltype, not just the ones in self.gidlist:\n \n src_gid = i\n \n if self.use_mpi:\n nc = self.pc.gid_connect(src_gid, syn)\n nc.weight[0] = 1\n nc.delay = self.nc_delay/ms #0.05 # MUST be larger than dt!!!\n \n else:\n nc = self.cells[self.a_celltype[0]][src_gid].connect_target(target=syn, weight=1, delay=self.nc_delay/ms)\n \n self.nclist.append(nc) \n \n self.rec_g.record(syn._ref_g)\n \n \n def syn_out_all(self, tau1 = 1*ms, tau2 = 30*ms):\n \n if self.do_run:\n \n for n in range(self.n_celltypes): \n for i, gid in enumerate(self.gidlist[n]):\n \n self.cells[n][i].start_record(tau1 = tau1/ms, tau2 = tau2/ms)\n \n self.called_syn_out_all = True\n \n \n def get_i(self, a, n, do_plot = True):\n \n import md5\n m = md5.new()\n \n if \", sigma\" in self.cell_exe[n]: \n cell_exe_new = self.cell_exe[n].split(\", sigma\")[0] + \")\"\n else:\n cell_exe_new = self.cell_exe[n]\n \n m.update(cell_exe_new)\n filename = self.data_dir + '/if_' + self.celltype[n] + '_' + m.hexdigest() + '.p'\n \n #print filename\n \n if self.id == 0:\n is_there = os.path.isfile(filename)\n else:\n is_there = None\n \n is_there = self.broadcast(is_there)\n \n if (is_there is not True) or (self.force_run is True): # run i/f estimation\n \n if self.id == 0: print '- running i/f estimation for ', self.celltype[n], ' id: ' , m.hexdigest() \n exec self.cellimport[n]\n exec cell_exe_new\n sim = Stimulation(cell, temperature = self.temperature, use_multisplit = self.use_multisplit)\n sim.spikes_from_neuron = False\n sim.celltype = self.celltype[n]\n current_vector, freq_vector, freq_onset_vector = sim.get_if(istart = self.istart, istop = self.istop, di = self.di, tstop = self.tstop_if) \n \n sim = None\n cell = None\n \n if self.id == 0:\n if do_plot:\n plt.figure(99)\n plt.plot(current_vector, freq_vector, 'r*-')\n plt.plot(current_vector, freq_onset_vector, 'b*-')\n plt.savefig(\"./figs/dump/latest_if_\" + self.celltype[n] + \".pdf\", dpi = 300) # save it \n plt.clf()\n #plt.show()\n \n ifv = {'i':current_vector,'f':freq_vector}\n print ifv\n \n pickle.dump(ifv, gzip.GzipFile(filename, \"wb\" ))\n \n self.barrier()\n \n else:\n \n if self.id == 0: \n ifv = pickle.load(gzip.GzipFile(filename, \"rb\" ))\n #print ifv\n \n self.barrier()\n \n if self.id == 0:\n \n f = ifv.get('f') \n i = ifv.get('i')\n \n i = i[~isnan(f)]\n f = f[~isnan(f)]\n \n iin = if_extrap(a, f, i)\n \n else:\n \n iin = [0]\n \n iin = self.broadcast(iin, root=0, fast = True)\n self.barrier()\n \n return iin\n\n\n def set_i(self, ihold = [0]):\n \n ihold = list(ihold)\n self.ihold_orig = list(ihold)\n \n self.barrier() # wait for other nodes\n \n # Ihold given as frequency, convert to current\n \n if ((self.give_freq)): \n \n ihold0 = [[] for _ in range(self.n_celltypes)]\n \n for n in range(self.n_celltypes):\n a = np.array([ihold[n]])\n #print \"a:\", a\n iin = self.get_i(a, n)\n #print \"iin:\", iin\n ihold0[n] = iin[0]\n \n if self.id == 0: print '- ihold: ', ihold, 'Hz, => ihold: ', ihold0, 'nA' \n \n # Modulation depth given, not always applied to current!\n for n in range(self.n_celltypes):\n \n if self.amod[n] is not None:\n \n if self.give_freq:\n \n # Apply to amplitude:\n a = np.array([ihold[n]]) + self.amod[n]*np.array([ihold[n]])\n self.amp[n] = self.get_i(a, n) - ihold0[n]\n \n if self.id == 0:\n print '- amp: ihold: ', ihold[n], 'Hz , amod: ', self.amod[n], ', => amp: ', self.amp[n], 'nA (' #, self.get_i(a, n), ')'\n \n elif self.n_syn_ex[n] > 0:\n \n if self.id == 0:\n print '- amp: ihold: ', ihold[n], 'Hz , amod: ', self.amod[n], ', => amp will be set for each spike generator'\n\n else:\n \n self.amp[n] = self.amod[n] * ihold[n] \n \n if self.id == 0:\n print '- amp: ihold: ', ihold[n], 'nA , amod: ', self.amod[n], ', => amp: ', self.amp[n], 'nA'\n \n # Noise depth given, not always applied to current!\n if self.anoise[n] is not None:\n \n if (self.give_freq is True) or (self.n_syn_ex[n] > 0):\n \n # Apply to amplitude:\n a = np.array([ihold[n]]) + self.anoise[n]*np.array([ihold[n]])\n self.fluct_s[n] = ((self.get_i(a, n) - ihold0[n]))/2. # adjust with /2 so that noise = +-2*std\n \n if self.id == 0:\n print '- noise: ihold: ', ihold[n], 'Hz , anoise: ', self.anoise[n], ', => fluct_s: ', self.fluct_s[n], 'nA'\n \n else:\n \n self.fluct_s[n] = self.anoise[n] * ihold[n] \n \n if self.id == 0:\n print '- noise: ihold: ', ihold[n], 'nA , anoise: ', self.anoise[n], ', => fluct_s: ', self.fluct_s[n], 'nA'\n \n \n if self.give_freq is True: \n ihold = ihold0\n \n return ihold\n \n \n def calc_fmean(self, t_vec, t_startstop):\n \n #t_startstop[0] = 1\n #t_startstop[1] = 5\n \n f_cells_mean = 0\n f_cells_cv = np.nan\n f_cells_std = np.nan\n \n if len(t_vec) > 0: \n \n f_start_in = mlab.find(t_vec >= t_startstop[0]) # 1\n f_stop_in = mlab.find(t_vec <= t_startstop[1]) # 5\n \n if (len(f_start_in) > 0) & (len(f_stop_in) > 0):\n \n f_start = f_start_in[0] \n f_stop = f_stop_in[-1]+1 \n use_spikes = t_vec[f_start:f_stop]*1e3\n \n if len(use_spikes) > 1:\n s1 = signals.SpikeTrain(use_spikes)\n isi = s1.isi()\n f_cells_mean = s1.mean_rate() # use mean of single cells\n f_cells_cv = np.std(isi)/np.mean(isi)\n f_cells_std = np.std(isi)\n \n #f_start_in = mlab.find(t_vec >= 1) \n #f_stop_in = mlab.find(t_vec <= 2) \n \n #if (len(f_start_in) > 0) & (len(f_stop_in) > 0):\n \n # f_start = f_start_in[0] \n # f_stop = f_stop_in[-1]+1 \n # use_spikes = t_vec[f_start:f_stop]*1e3\n \n # if len(use_spikes) > 1:\n # s1 = signals.SpikeTrain(use_spikes)\n # isi = s1.isi()\n # f_cells_cv = np.std(isi)/np.mean(isi)\n \n return f_cells_mean, f_cells_cv, f_cells_std \n \n \n def get_fmean(self, t_all_vec_vecn, id_all_vec_vecn, t_startstop, gidlist, facborder = 3): # 1e9\n \n f_cells_mean = zeros(len(gidlist))\n f_cells_base = zeros(len(gidlist))\n f_cells_std = nans(len(gidlist))\n f_cells_cv = nans(len(gidlist))\n f_cells_gid = nans(len(gidlist))\n \n fbase = np.nan\n fmean = np.nan\n fmax = np.nan\n fmstd = np.nan\n fcvm = np.nan\n fstdm = np.nan\n \n f_cells_mean_all = []\n f_cells_base_all = []\n f_cells_cv_all = []\n f_cells_std_all = []\n \n gid_del = np.array([])\n \n if self.no_fmean == False:\n \n if self.id == 0: print \"- sorting for fmean\"\n\n for i, l in enumerate(gidlist):\n \n t_0_vec = t_all_vec_vecn[where(id_all_vec_vecn==l)]\n f_cells_mean[i], f_cells_cv[i], f_cells_std[i] = self.calc_fmean(t_0_vec, t_startstop)\n f_cells_base[i], _, _ = self.calc_fmean(t_0_vec, [self.delay_baseline-4,self.delay_baseline])\n f_cells_gid[i] = l\n \n if self.id == 0: print \"- gather fmean\" \n f_cells_mean_all = self.do_gather(f_cells_mean)\n f_cells_base_all = self.do_gather(f_cells_base)\n f_cells_std_all = self.do_gather(f_cells_std)\n f_cells_cv_all = self.do_gather(f_cells_cv)\n f_cells_gid_all = self.do_gather(f_cells_gid)\n\n if self.id == 0:\n \n #print f_cells_mean_all\n \n f_cells_mean_all = np.nan_to_num(f_cells_mean_all)\n fmean = mean(f_cells_mean_all) # compute mean of mean rate for all cells\n fmstd = std(f_cells_mean_all) \n fmax = max(f_cells_mean_all)\n \n f_cells_base_all = np.nan_to_num(f_cells_base_all)\n fbase = mean(f_cells_base_all) # compute mean of mean rate for all cells\n \n f_cells_cv_all = f_cells_cv_all[~np.isnan(f_cells_cv_all)]\n f_cells_std_all = f_cells_std_all[~np.isnan(f_cells_std_all)]\n fcvm = mean(f_cells_cv_all)\n fstdm = mean(f_cells_std_all)\n \n print \"- get_fmean, fmean: \",fmean, \"fmax: \",fmax, \"Hz\", \"fmstd: \",fmstd, \"Hz\", \"fcvm: \",fcvm, \"fstdm: \",fstdm, \"Hz\" ,\"fbase: \", fbase, \"Hz\"\n \n if facborder < 1e9:\n \n fborder = fmean + facborder*fmstd\n i = mlab.find(f_cells_mean_all > fborder)\n gid_del = f_cells_gid_all[i]\n \n # f_cells_mean_all[i] = 0\n # f_cells_cv_all[i] = np.nan\n # f_cells_std_all[i] = np.nan\n \n # fmean2 = mean(np.nan_to_num(f_cells_mean_all)) # compute mean of mean rate for all cells\n # fmstd2 = std(np.nan_to_num(f_cells_mean_all)) \n # fmax2 = max(np.nan_to_num(f_cells_mean_all))\n \n # fcvm2 = mean(f_cells_cv_all[~np.isnan(f_cells_cv_all)])\n # fstdm2 = mean(f_cells_std_all[~np.isnan(f_cells_std_all)])\n \n # print \"- after facborder: get_fmean, fmean: \",fmean2, \"fmax: \",fmax2, \"Hz\", \"fmstd: \",fmstd2, \"Hz\", \"fcvm: \",fcvm2, \"fstdm: \",fstdm2, \"Hz, gid_del: \", gid_del\n \n\n return fmean, fmax, fmstd, fcvm, fstdm, gid_del, f_cells_mean_all, f_cells_cv_all, f_cells_std_all, fbase, f_cells_base_all \n\n \n def connect_fluct(self):\n \"\"\"\n Create fluctuating input onto every cell.\n \"\"\"\n \n if self.do_run:\n \n for m in self.flucts:\n del m \n del self.flucts\n \n for m in self.noises:\n del m \n del self.noises\n \n self.flucts = []\n self.noises = []\n \n for n in range(self.n_celltypes):\n \n for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist \n \n #h.mcell_ran4_init(gid)\n \n noiseRandObj = h.Random() # provides NOISE with random stream\n self.noises.append(noiseRandObj) # has to be set here not inside the nmodl function!! \n \n # print str(gid) + \": \" + str(noiseRandObj.normal(0,1))\n \n fluct = h.Ifluct2(self.cells[n][i].soma(0.5))\n fluct.m = self.fluct_m/nA # [nA]\n fluct.s = self.fluct_s[n]/nA # [nA]\n fluct.tau = self.fluct_tau/ms # [ms]\n self.flucts.append(fluct) # add to list \n self.flucts[-1].noiseFromRandom(self.noises[-1]) # connect random generator!\n \n self.noises[-1].MCellRan4(1, gid+1) # set lowindex to gid+1, set highindex to > 0 \n self.noises[-1].normal(0,1)\n \n \n def connect_gfluct(self, E_e=0, E_i=-65):\n \"\"\"\n Create fluctuating conductance input onto every cell.\n \"\"\"\n if self.do_run:\n \n for m in self.flucts:\n del m \n del self.flucts\n \n for m in self.noises:\n del m \n del self.noises\n \n self.flucts = []\n self.noises = []\n \n for n in range(self.n_celltypes):\n \n fluct_g_i0_n = self.fluct_g_i0[n]\n \n if type(fluct_g_i0_n) is not ndarray: fluct_g_i0_n = np.array([fluct_g_i0_n])\n \n if len(fluct_g_i0_n) == len(self.global_gidlist[n]):\n pass\n else:\n fluct_g_i0_n = np.ones(int(len(self.global_gidlist[n])))*fluct_g_i0_n[0]\n if self.id == 0: print \"- single value in fluct_g_i0_n\"\n \n #print fluct_g_i0_n\n \n for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist \n \n #h.mcell_ran4_init(gid)\n \n noiseRandObj = h.Random() # provides NOISE with random stream\n self.noises.append(noiseRandObj) # has to be set here not inside the nmodl function!! \n \n # print str(gid) + \": \" + str(noiseRandObj.normal(0,1))\n \n fluct = h.Gfluct3(self.cells[n][i].soma(0.5))\n fluct.E_e = E_e/mV # [mV]\n fluct.E_i = E_i/mV # [mV]\n fluct.g_e0 = self.fluct_g_e0[n]/uS # [uS]\n fluct.g_i0 = fluct_g_i0_n[i]/uS # [uS]\n fluct.std_e = self.fluct_std_e[n]/uS # [uS] \n fluct.std_i = self.fluct_std_i[n]/uS # [uS] \n fluct.tau_e = self.fluct_tau_e/ms #tau_e/ms # [ms] \n fluct.tau_i = self.fluct_tau_i/ms #tau_i/ms # [ms]\n \n self.flucts.append(fluct) # add to list \n self.flucts[-1].noiseFromRandom(self.noises[-1]) # connect random generator!\n \n self.noises[-1].MCellRan4(1, gid+1) # set lowindex to gid+1, set highindex to > 0 \n self.noises[-1].normal(0,1)\n \n \n def connect_synfluct(self, PF_BG_rate=6, PF_BG_cv=1, STL_BG_rate=20, STL_BG_cv=1):\n \"\"\"\n Create fluctuating synaptic input onto every cell.\n \"\"\"\n \n if self.do_run:\n \n for m in self.ST_stims:\n del m \n del self.ST_stims\n \n for m in self.PF_stims:\n del m \n del self.PF_stims\n \n self.ST_stims = []\n self.PF_stims = []\n \n \n for n in range(self.n_celltypes):\n \n for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist \n \n PF_syn_list = self.cells[n][i].createsyn_PF()\n \n for d in PF_syn_list:\n d.input.newnetstim.number = 1e9\n d.input.newnetstim.noise = PF_BG_cv\n d.input.newnetstim.interval = 1000.0 / PF_BG_rate\n d.input.newnetstim.start = 0\n \n self.PF_stims.append(PF_syn_list)\n \n ST_stim_list = self.cells[n][i].createsyn_ST(record_all=0)\n\n for d in ST_stim_list:\n d.newnetstim.number = 1e9\n d.newnetstim.noise = STL_BG_cv\n d.newnetstim.interval = 1000.0 / STL_BG_rate\n d.newnetstim.start = 0\n \n self.ST_stims.append(ST_stim_list)\n \n if self.id == 0: print \"- PF and ST stimulation added.\"\n \n \n\n def set_IStim(self, ihold = None, ihold_sigma = None, random_start = True, tstart_offset = 0):\n \"\"\"\n Add (random) ihold for each cell and offset!\n \"\"\"\n if self.do_run:\n \n # if not given, use the one in self\n if ihold == None:\n ihold = self.ihold\n if ihold_sigma == None:\n ihold_sigma = self.ihold_sigma\n \n if ihold[self.a_celltype[0]] != 0:\n ihold = self.set_i(ihold) \n \n for m in self.ic_holds:\n #m.destroy()\n del m \n del self.ic_holds\n \n for m in self.ic_starts:\n #m.destroy()\n del m \n del self.ic_starts\n \n for m in self.vc_starts:\n #m.destroy()\n del m \n del self.vc_starts\n \n self.ic_holds = []\n self.ic_starts = [] \n self.vc_starts = []\n self.i_holdrs = []\n self.i_holds = ihold\n \n for n in range(self.n_celltypes):\n self.i_holdrs.append([])\n \n for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist \n \n np.random.seed(gid*20)\n \n tis = 1\n \n if random_start == True:\n \n # random start time\n tstart = np.random.uniform(tstart_offset+0, tstart_offset+0.5)\n #if self.id == 0: print \"tstart:\", tstart\n vc_start = h.SEClamp(self.cells[n][i].soma(0.5))\n vc_start.dur1 = tstart/ms\n vc_start.amp1 = -80\n self.vc_starts.append(vc_start)\n tis = 0\n \n else:\n \n tis = 0 \n \n \n if ihold_sigma[n] != 0:\n #print ihold_sigma[n], ihold[n]\n ihold_r = np.random.normal(ihold[n], ihold[n]*ihold_sigma[n], 1).clip(min=0)\n #ihold_r = np.random.uniform(ihold[n]*ihold_sigma[n], ihold[n])\n \n elif self.CF_var is not False: # CF gets not adapted to current but final frequnecy!\n \n r_ok = False\n while r_ok == False:\n r_temp = np.random.normal(self.ihold_orig[n], self.CF_var[n][1], 1) \n if (r_temp <= self.CF_var[n][2]) and (r_temp >= self.CF_var[n][0]): # check borders!\n r_ok = True\n \n #print r_temp \n ihold_r = self.get_i(r_temp, n)\n #print ihold_r\n #if self.id == 0: \n print \"set self.CF_var\", r_temp, ihold_r\n \n else: # same ihold for all cells!\n ihold_r = ihold[n]\n \n self.i_holdrs[n].append(ihold_r)\n \n if ihold_r != 0:\n \n if hasattr(self.cells[n][i], 'input_vec'):\n \n ic_hold = []\n for vec in self.cells[n][i].input_vec:\n for inv in vec:\n #print ihold_r\n ic_hold.append(h.IClamp(inv(0.5))) \n ic_hold[-1].amp = self.cells[n][i].ifac * ihold_r / self.cells[n][i].n_input_spiny / nA\n ic_hold[-1].delay = tis/ms\n ic_hold[-1].dur = 1e9\n \n else: \n\n # holding current\n ic_hold = h.IClamp(self.cells[n][i].soma(0.5))\n ic_hold.delay = tis/ms\n ic_hold.dur = 1e9\n ic_hold.amp = ihold_r/nA\n \n self.ic_holds.append(ic_hold)\n \n if self.id == 0: print \"set_IStim finished. ihold: \", ihold, \", ihold_sigma: \", ihold_sigma\n \n \n def set_IStep(self, istep = [0], istep_sigma = [0], tstep = 5, tdur = 1e6, give_freq = True):\n \"\"\"\n Add istep for each cell and offset!\n \"\"\"\n if self.do_run:\n #for m in self.ic_steps:\n # m.destroy()\n # del m \n #del self.ic_steps\n \n #self.ic_steps = []\n \n istep = list(istep)\n neg = False\n \n for n in range(self.n_celltypes):\n \n if istep[n] < 0: \n neg = True\n istep[n] = abs(istep[n]) # make positive again\n \n if istep[n] != 0:\n if give_freq is True:\n a = np.array([istep[n]])\n iin = self.get_i(a, n)[0]\n if self.id == 0: print \"celltype: \", n, \" istep: \", istep[n], \"Hz => \", iin, \" nA\"\n istep[n] = iin \n \n for n in range(self.n_celltypes):\n \n for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist \n \n np.random.seed(gid*30)\n \n if self.i_holdrs == []:\n \n if istep_sigma[n] != 0:\n istep_r = np.random.normal(istep[n], istep[n]*istep_sigma[n], 1).clip(min=0)\n else: # same ihold for all cells!\n istep_r = istep[n]\n \n else: # ihold has been set!\n \n if istep_sigma[n] != 0:\n istep_r = np.random.normal(istep[n]-self.i_holds[n], (istep[n]-self.i_holds[n])*istep_sigma[n], 1).clip(min=0) # delta now! put on top of hold!\n else: # same ihold for all cells!\n istep_r = istep[n]-self.i_holds[n] # delta now! put on top of hold!\n \n if neg:\n istep_r = -1*istep_r\n \n if istep[n] == 0:\n istep_r = -1*self.i_holdrs[n][i] \n \n #print 'is:' + str(istep_r) + 'was:' + str(self.i_holdrs[n][i])\n \n if istep_r != 0: \n # step current\n ic_step = h.IClamp(self.cells[n][i].soma(0.5))\n ic_step.delay = tstep/ms\n ic_step.dur = tdur/ms\n ic_step.amp = istep_r/nA\n self.ic_steps.append(ic_step)\n \n \n if self.id == 0: print \"set_IStep finished. istep: \", istep, \", istep_sigma: \", istep_sigma\n \n\n def set_IPlay(self, stimulus, t):\n \"\"\"\n Initializes values for current clamp to play a signal. \n \"\"\"\n \n if self.do_run:\n \n for m in self.tvecs:\n #m.destroy()\n del m \n del self.tvecs\n \n for m in self.ivecs:\n #m.destroy()\n del m \n del self.ivecs\n \n for m in self.plays:\n #m.destroy()\n del m \n del self.plays\n \n self.tvecs = []\n self.ivecs = []\n self.plays = []\n \n for i, gid in enumerate(self.gidlist[self.a_celltype[0]]): # for every cell in the gidlist \n \n tvec = h.Vector(t/ms)\n ivec = h.Vector(stimulus/nA)\n \n play = h.IClamp(self.cells[self.a_celltype[0]][i].soma(0.5))\n play.delay = 0\n play.dur = 1e9\n \n ivec.play(play._ref_amp, tvec, 1)\n \n self.plays.append(play) # add to list\n self.tvecs.append(tvec) # add to list\n self.ivecs.append(ivec) # add to list \n \n if self.id == 0: print \"set_IPlay finished.\"\n \n \n def set_IPlay2(self, stimulus, t):\n \"\"\"\n Initializes values for current clamp to play a signal. \n \"\"\"\n \n if self.do_run:\n \n for m in self.tvecs:\n #m.destroy()\n del m \n del self.tvecs\n \n for m in self.ivecs:\n #m.destroy()\n del m \n del self.ivecs\n \n for m in self.plays:\n #m.destroy()\n del m \n del self.plays\n \n self.tvecs = []\n self.ivecs = []\n self.plays = []\n \n for j in self.a_celltype:\n \n tvec = h.Vector(t/ms)\n ivec = []\n for s in stimulus:\n if hasattr(self.cells[j][0], 'input_vec'):\n ivec.append(h.Vector(self.factor_celltype[j] * self.cells[j][0].ifac * s / self.cells[j][0].n_input_spiny / nA))\n else:\n ivec.append(h.Vector(self.factor_celltype[j]*s/nA))\n\n self.tvecs.append(tvec) # add to list\n self.ivecs.append(ivec) # add to list \n \n for i, gid in enumerate(self.gidlist[j]): # for every cell in the gidlist \n\n if hasattr(self.cells[j][i], 'input_vec'):\n \n play = []\n for iloc, vec in enumerate(self.cells[j][i].input_vec):\n isig = self.syn_ex_dist[j][iloc]-1\n #print isig\n for inv in vec:\n play.append(h.IClamp(inv(0.5))) \n play[-1].delay = 0\n play[-1].dur = 1e9\n ivec[isig].play(play[-1]._ref_amp, tvec, 1)\n \n else: \n #fluctuating current\n play = h.IClamp(self.cells[j][i].soma(0.5))\n play.delay = 0\n play.dur = 1e9\n ivec[0].play(play._ref_amp, tvec, 1)\n \n self.plays.append(play) # add to list\n\n \n if self.id == 0: print \"set_IPlay2 finished.\"\n \n \n def set_IPlay3(self, stimulus, t, amp = None):\n \"\"\"\n Initializes values for current clamp to play a signal. \n \"\"\"\n \n if self.do_run:\n \n for m in self.tvecs:\n #m.destroy()\n del m \n del self.tvecs\n \n for m in self.ivecs:\n #m.destroy()\n del m \n del self.ivecs\n \n for m in self.plays:\n #m.destroy()\n del m \n del self.plays\n \n self.tvecs = []\n self.ivecs = []\n self.plays = []\n \n for j in self.a_celltype:\n \n if amp is None:\n amp0 = 0\n else:\n amp0 = amp[j]\n \n tvec = h.Vector(t/ms)\n self.tvecs.append(tvec) # add to list\n \n for i, gid in enumerate(self.gidlist[j]): # for every cell in the gidlist \n \n if isinstance(self.factor_celltype[j], ( int, long ) ): \n ivec = h.Vector(self.factor_celltype[j]*(stimulus*amp0)/nA) \n else:\n np.random.seed(gid*40)\n rnd.seed(gid*40)\n if self.factor_celltype[j][1] > 0:\n f = np.random.normal(self.factor_celltype[j][0], self.factor_celltype[j][1], 1).clip(min=0)\n else:\n f = self.factor_celltype[j][0] \n if self.factor_celltype[j][2] > 0: # add inverted input with 50% probability, in future versions this will indicate the propability for -1 and 1\n f = rnd.sample([-1,1],1)[0] * f\n if self.id == 0: print \"- inverted input with 50% probability:\", f \n if self.id == 0: print \"- randomize play stimulus height\" \n ivec = h.Vector(f*(stimulus*amp0)/nA)\n \n self.ivecs.append(ivec) # add to list \n \n #fluctuating current\n play = h.IClamp(self.cells[j][i].soma(0.5))\n play.delay = 0\n play.dur = 1e9\n ivec.play(play._ref_amp, tvec, 1)\n \n self.plays.append(play) # add to list\n \n if self.id == 0: print \"set_IPlay3 finished.\"\n \n \n def set_PulseStim(self, start_time=[100*ms], dur=[1500*ms], steadyf=[100*Hz], pulsef=[150*Hz], pulse_start=[500*ms], pulse_len=[500*ms], weight0=1, tau01=[1*ms], tau02=[20*ms], weight1=1, tau11=[0*ms], tau12=[1*ms], noise = 1):\n \n if self.do_run:\n \n modulation_vec = []\n \n for n in range(self.n_celltypes):\n \n t_input = np.arange(0, dur[n], self.dt) # create stimulus time vector has to be in ms!! \n mod = np.concatenate(([np.zeros(round(start_time[n]/self.dt)), steadyf[n]*np.ones(round((pulse_start[n]-start_time[n])/self.dt)), pulsef[n]*np.ones(round(pulse_len[n]/self.dt)),steadyf[n]*np.ones(round((dur[n]-pulse_start[n]-pulse_len[n])/self.dt)) ])) \n modulation = (t_input, mod)\n \n #print shape(t_input), shape(mod), shape(modulation)\n \n for i, gid in enumerate(self.gidlist[n]): # for every cell in the gidlist \n \n if dur[n] > 0:\n \n if self.celltype[n] == 'Grc':\n \n nmf = 4\n \n for j in range(nmf):\n \n self.cells[n][i].createsyn(nmf = 1, ngoc = 0, weight = weight0) \n e0 = len(self.cells[n][i].MF_L)-1 # get number of current synapse!\n \n pulse_gid = int(self.gid_count + gid*1000 + j)\n \n train = mod_spike_train(modulation, noise = noise, seed = pulse_gid)\n \n self.setup_Play_train(train = train, input_gid = pulse_gid)\n \n self.cells[n][i].pconnect(self.pc,pulse_gid,int(e0),'mf') \n \n elif self.celltype[n] == 'Goc':\n \n nmf = 53\n \n for j in range(nmf):\n \n self.cells[n][i].createsyn(nmf = 1, weight = weight1)\n e0 = len(self.cells[n][i].MF_L)-1 # get number of current synapse!\n \n pulse_gid = int(self.gid_count + gid*1000 + j)\n \n train = mod_spike_train(modulation, noise = noise, seed = pulse_gid)\n \n self.setup_Play_train(train = train, input_gid = pulse_gid)\n \n self.cells[n][i].pconnect(self.pc,pulse_gid,int(e0),'mf') \n \n \n elif self.celltype[n] == 'Goc_noloop':\n \n ngrc = 100\n \n for j in range(ngrc):\n \n self.cells[n][i].createsyn(ngrc = 1, weight = weight0)\n e0 = len(self.cells[n][i].GRC_L)-1 # get number of current synapse!\n \n pulse_gid = int(self.gid_count + gid*1000 + j)\n \n train = mod_spike_train(modulation, noise = noise, seed=pulse_gid)\n \n self.setup_Play_train(train = train, input_gid = pulse_gid)\n \n self.cells[n][i].pconnect(self.pc,pulse_gid,int(e0),'grc') \n \n else:\n \n pulse_gid = int(self.gid_count + gid*1000 + 100)\n \n train = mod_spike_train(modulation, noise = noise, seed = pulse_gid)\n self.trains.append(train)\n \n setup_Play_train(train = train, input_gid = pulse_gid)\n \n # NMDA\n self.cells[n][i].create_synapses(n_ex=1, tau1=tau01[n], tau2=tau02[n])\n e0 = len(self.cells[n][i].synlist)-1\n \n weight=weight0[n]\n np.random.seed(gid*60)\n #weight = np.random.normal(weight, weight*0.5, 1).clip(min=0)\n self.cells[n][i].pconnect_target(self.pc, source=pulse_gid, target=e0, syntype='ex', weight=weight, delay=1)\n \n # AMPA\n self.cells[n][i].create_synapses(n_ex=1, tau1=tau11[n], tau2=tau12[n])\n e0 = len(self.cells[n][i].synlist)-1\n \n weight=weight1[n]\n np.random.seed(gid*60)\n #weight = np.random.normal(weight, weight*0.5, 1).clip(min=0)\n self.cells[n][i].pconnect_target(self.pc, source=pulse_gid, target=e0, syntype='ex', weight=weight, delay=1)\n \n \n modulation = (t_input, mod) # mack to s!\n modulation_vec.append(modulation) \n \n return modulation_vec\n \n \n def connect_Synapse(self, pulse_gid, nt, i, n, gid, j, syntype = \"ex\", nsyn=0): \n \n if self.do_run:\n \n if 'gsyn_in' in self.method_interpol: \n if isinstance(self.factor_celltype[nt], ( int, long ) ):\n f = self.factor_celltype[nt] \n else:\n f = self.factor_celltype[nt][0] \n \n if syntype == \"ex\":\n \n # each cell can receive different g_syn_ex ! \n if type(self.g_syn_ex[nt]) is ndarray:\n if len(self.g_syn_ex[nt]) == len(self.global_gidlist[nt]):\n w = self.g_syn_ex[nt][n]\n else:\n w = self.g_syn_ex[nt] \n else:\n w = self.g_syn_ex[nt] \n \n seed = int(10000 + 10*gid + j)\n np.random.seed(seed*41)\n \n if self.g_syn_ex_s[nt] > 0:\n w = np.random.normal(w, w*self.g_syn_ex_s[nt], 1).clip(min=0) # self.g_syn_ex_s[nt] \n \n if self.celltype[nt] == 'Grc':\n \n # delete old\n if j == 0: \n self.cells[nt][i].MF_L = []\n self.cells[nt][i].mfncpc = []\n \n if \"gr\" not in str(self.tau1_ex[nt]):\n \n if \"amfit\" in str(self.tau1_ex[nt]):\n syn = h.ExpZSyn(self.cells[nt][i].soma(0.5)) \n \n syn.tau1_ampa = 0.254\n syn.tau2_ampa = 0.254\n syn.tau3_ampa = 0.363\n syn.tau4_ampa = 6.523\n syn.f1_ampa = 8.8376e-05\n syn.f2_ampa = 5.5257e-05\n \n syn.f1_nmda = 0\n \n elif \"nmfit\" in str(self.tau1_ex[nt]):\n syn = h.ExpYSyn(self.cells[nt][i].soma(0.5))\n \n syn.f1_ampa = 0\n syn.f2_ampa = 0\n \n syn.tau1_nmda = 1.902\n syn.tau2_nmda = 82.032\n syn.f1_nmda = 7.853857483005277e-05\n \n elif \"fit\" in str(self.tau1_ex[nt]): \n syn = h.ExpGrcSyn(self.cells[nt][i].soma(0.5))\n \n syn.tau1_ampa = 0.254\n syn.tau2_ampa = 0.254\n syn.tau3_ampa = 0.363\n syn.tau4_ampa = 6.523\n syn.f1_ampa = 8.8376e-05\n syn.f2_ampa = 5.5257e-05\n \n syn.tau1_nmda = 1.902\n syn.tau2_nmda = 82.032\n syn.f1_nmda = 7.853857483005277e-05\n \n else:\n tau1 = self.tau1_ex[nt]\n tau2 = self.tau2_ex[nt]\n \n if tau1 == 0:\n syn = h.ExpSyn(self.cells[nt][i].soma(0.5))\n syn.tau = tau2/ms\n \n else: \n syn = h.Exp2Syn(self.cells[nt][i].soma(0.5))\n syn.tau1 = tau1/ms\n syn.tau2 = tau2/ms\n \n syn.e = 0/mV\n \n self.cells[nt][i].MF_L.append(syn)\n \n e0 = len(self.cells[nt][i].MF_L)-1 # get number of current synapse!\n \n syn_idx = int(e0)\n \n source = int(pulse_gid)\n self.cells[nt][i].mfncpc.append(self.pc.gid_connect(source, self.cells[nt][i].MF_L[syn_idx]))\n self.cells[nt][i].mfncpc[-1].delay = 1\n self.cells[nt][i].mfncpc[-1].weight[0] = w\n \n if 'gsyn_in' in self.method_interpol:\n self.record_syn.append(h.Vector())\n self.record_syn[-1].record(self.cells[nt][i].MF_L[-1]._ref_g)\n self.gsyn_in_fac.append(f)\n \n else:\n \n nrel = 0\n \n if \"stoch\" in str(self.tau1_ex[nt]):\n nrel = 4\n \n self.cells[nt][i].createsyn(nmf = 1, ngoc = 0, weight_gmax = w, nrel=nrel) \n \n if \"ampa\" in str(self.tau1_ex[nt]):\n self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].gmax_factor = 0\n if \"nopre\" in str(self.tau1_ex[nt]):\n print \"- no pre\"\n self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_rec = 1e-9\n self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_facil = 1e-9\n self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_1 = 0\n \n if \"nostdampa\" in str(self.tau1_ex[nt]):\n self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].gmax_factor = 0\n self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_rec = 1e-9\n self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_facil = 1e-9\n self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].tau_1 = 0\n self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].r6FIX = 0\n \n if \"nostdnmda\" in str(self.tau1_ex[nt]):\n self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].gmax_factor = 0\n self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_rec = 1e-9\n self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_facil = 1e-9\n self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_1 = 0\n self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].RdRate = 0\t\n \n if \"nmda\" in str(self.tau1_ex[nt]):\n self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].gmax_factor = 0\n if \"nopre\" in str(self.tau1_ex[nt]):\n self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_rec = 1e-9\n self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_facil = 1e-9\n self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].tau_1 = 0\n \n if \"nostdgr\" in str(self.tau1_ex[nt]):\n self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0].r6FIX\t= 0 #1.12\t\n self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].RdRate = 0 #12e-3\n print \"- no std\"\n \n if \"nomggr\" in str(self.tau1_ex[nt]): \n self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0].v0_block = -1e9\n print \"- no mg block\"\n \n e0 = len(self.cells[nt][i].MF_L)-1 # get number of current synapse!\n \n self.cells[nt][i].pconnect(self.pc,pulse_gid,int(e0),'mf') \n \n if 'gsyn_in' in self.method_interpol:\n self.record_syn.append(h.Vector())\n self.record_syn[-1].record(self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0]._ref_g)\n self.record_syn.append(h.Vector())\n self.record_syn[-1].record(self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0]._ref_g)\n self.gsyn_in_fac.append(f)\n self.gsyn_in_fac.append(f)\n \n \n elif self.celltype[nt] == 'Goc':\n \n # delete old\n if j == 0: \n self.cells[nt][i].MF_L = []\n self.cells[nt][i].mfncpc = []\n \n if \"go\" not in str(self.tau1_ex[nt]):\n \n tau1 = self.tau1_ex[nt]\n tau2 = self.tau2_ex[nt]\n \n if tau1 == 0:\n syn = h.ExpSyn(self.cells[nt][i].soma(0.5))\n syn.tau = tau2/ms\n \n else: \n syn = h.Exp2Syn(self.cells[nt][i].soma(0.5))\n syn.tau1 = tau1/ms\n syn.tau2 = tau2/ms\n \n syn.e = 0/mV\n \n self.cells[nt][i].MF_L.append(syn)\n \n e0 = len(self.cells[nt][i].MF_L)-1 # get number of current synapse!\n \n syn_idx = int(e0)\n \n source = int(pulse_gid)\n self.cells[nt][i].mfncpc.append(self.pc.gid_connect(source, self.cells[nt][i].MF_L[syn_idx]))\n self.cells[nt][i].mfncpc[-1].delay = 1\n self.cells[nt][i].mfncpc[-1].weight[0] = w\n \n if 'gsyn_in' in self.method_interpol:\n self.record_syn.append(h.Vector())\n self.record_syn[-1].record(self.cells[nt][i].MF_L[-1]._ref_g)\n self.gsyn_in_fac.append(f)\n else:\n \n nrel = 0\n \n mg = self.mglufac_ex[0]\n if self.mglufac_ex[1] > 0:\n mg = np.random.normal(self.mglufac_ex[0], self.mglufac_ex[1]*self.mglufac_ex[0], 1).clip(min=0) # self.g_syn_ex_s[nt] \n \n if \"stoch\" in str(self.tau1_ex[nt]):\n nrel = 4\n \n self.cells[nt][i].createsyn(nmf = 1, weight_gmax = w, nrel=nrel, mglufac = mg) \n \n e0 = len(self.cells[nt][i].MF_L)-1 # get number of current synapse!\n \n self.cells[nt][i].pconnect(self.pc,pulse_gid,int(e0),'mf') \n \n if 'gsyn_in' in self.method_interpol:\n self.record_syn.append(h.Vector())\n self.record_syn[-1].record(self.cells[nt][i].MF_L[-1].postsyns['AMPA'][0]._ref_g)\n self.record_syn.append(h.Vector())\n self.record_syn[-1].record(self.cells[nt][i].MF_L[-1].postsyns['NMDA'][0]._ref_g)\n self.gsyn_in_fac.append(f)\n self.gsyn_in_fac.append(f)\n \n elif self.celltype[nt] == 'IfCell': \n \n # delete old\n if j == 0: \n self.cells[nt][i].synlist = []\n self.cells[nt][i].nc = []\n \n if \"gr\" in str(self.tau1_ex[nt]):\n \n self.cells[nt][i].whatami = \"grc\"\n \n nrel = 0\n if \"stoch\" in str(self.tau1_ex[nt]):\n nrel = 4\n \n self.cells[nt][i].MF_L = self.cells[nt][i].synlist\n self.cells[nt][i].synlist.append(Synapse('glom', self.cells[nt][i], self.cells[nt][i].soma, nrel=nrel, record_all=0, weight_gmax = w))\n \n if \"ampa\" in str(self.tau1_ex[nt]):\n self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].gmax_factor = 0\n if \"nopre\" in str(self.tau1_ex[nt]):\n print \"- no pre\"\n self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_rec = 1e-9\n self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_facil = 1e-9\n self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_1 = 0\n \n if \"nmda\" in str(self.tau1_ex[nt]):\n self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].gmax_factor = 0\n if \"nopre\" in str(self.tau1_ex[nt]):\n self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_rec = 1e-9\n self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_facil = 1e-9\n self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_1 = 0\n \n if \"nostdampa\" in str(self.tau1_ex[nt]):\n self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_rec = 1e-9\n self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_facil = 1e-9\n self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].tau_1 = 0\n self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].r6FIX\t= 0 #1.12\t\n \n if \"nostdnmda\" in str(self.tau1_ex[nt]):\n self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_rec = 1e-9\n self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_facil = 1e-9\n self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].tau_1 = 0\n self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].RdRate = 0\t\n \n if \"nostdgr\" in str(self.tau1_ex[nt]):\n self.cells[nt][i].synlist[-1].postsyns['AMPA'][0].r6FIX\t= 0 #1.12\t\n self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].RdRate = 0 #12e-3\n print \"- no std\"\n \n if \"nomggr\" in str(self.tau1_ex[nt]): \n self.cells[nt][i].synlist[-1].postsyns['NMDA'][0].v0_block = -1e9 #.k_block = 1e-9\n print \"- no mg block\"\n \n e0 = len(self.cells[nt][i].synlist)-1\n syn_idx = int(e0)\n \n source = int(pulse_gid)\n self.cells[nt][i].nc.append(self.pc.gid_connect(source, self.cells[nt][i].synlist[syn_idx].input))\n self.cells[nt][i].nc[-1].delay = 1\n self.cells[nt][i].nc[-1].weight[0] = 1\n \n if 'gsyn_in' in self.method_interpol:\n self.record_syn.append(h.Vector())\n self.record_syn[-1].record(self.cells[nt][i].synlist[syn_idx].postsyns['AMPA'][0]._ref_g)\n self.record_syn.append(h.Vector())\n self.record_syn[-1].record(self.cells[nt][i].synlist[syn_idx].postsyns['NMDA'][0]._ref_g) \n self.gsyn_in_fac.append(f)\n self.gsyn_in_fac.append(f)\n else:\n \n if \"amfit\" in str(self.tau1_ex):\n \n syn = h.ExpGrcSyn(self.cells[nt][i].soma(0.5)) \n \n syn.tau1_ampa = 0.254\n syn.tau2_ampa = 0.254\n syn.tau3_ampa = 0.363\n syn.tau4_ampa = 6.523\n syn.f1_ampa = 8.8376e-05\n syn.f2_ampa = 5.5257e-05\n \n syn.f1_nmda = 0\n \n self.cells[nt][i].synlist.append(syn) # synlist is defined in Cell \n \n elif \"nmfit\" in str(self.tau1_ex):\n \n syn = h.ExpGrcSyn(self.cells[nt][i].soma(0.5))\n \n syn.f1_ampa = 0\n syn.f2_ampa = 0\n \n syn.tau1_nmda = 1.902\n syn.tau2_nmda = 82.032\n syn.f1_nmda = 7.853857483005277e-05\n \n self.cells[nt][i].synlist.append(syn) # synlist is defined in Cell \n \n elif \"fit\" in str(self.tau1_ex): \n \n syn = h.ExpGrcSyn(self.cells[nt][i].soma(0.5))\n \n syn.tau1_ampa = 0.254\n syn.tau2_ampa = 0.254\n syn.tau3_ampa = 0.363\n syn.tau4_ampa = 6.523\n syn.f1_ampa = 8.8376e-05\n syn.f2_ampa = 5.5257e-05\n \n syn.tau1_nmda = 1.902\n syn.tau2_nmda = 82.032\n syn.f1_nmda = 7.853857483005277e-05 \n \n self.cells[nt][i].synlist.append(syn) # synlist is defined in Cell \n \n else:\n \n self.cells[nt][i].create_synapses(n_ex=1, tau1=self.tau1_ex[nt], tau2=self.tau2_ex[nt])\n \n \n e0 = len(self.cells[nt][i].synlist)-1\n syn_idx = int(e0)\n \n self.cells[nt][i].pconnect_target(self.pc, source=pulse_gid, target=int(e0), syntype='ex', weight=w, delay=1)\n \n if 'gsyn_in' in self.method_interpol:\n self.record_syn.append(h.Vector())\n self.record_syn[-1].record(self.cells[nt][i].synlist[syn_idx]._ref_g)\n self.gsyn_in_fac.append(f)\n \n elif self.celltype[nt] == 'Prk':\n \n # delete old\n if j == 0: \n self.cells[nt][i].PF_Lsync = []\n self.cells[nt][i].spk_nc_pfsync = []\n self.cells[nt][i].pfrand = []\n \n m = len(self.cells[nt][i].dendrange)\n \n seed = int(4*gid)\n np.random.seed(seed)\n \n for k in xrange(nsyn):\n m -= 1\n \t mi = np.random.randint(0, m)\t \n \t self.cells[nt][i].dendrange[mi], self.cells[nt][i].dendrange[m] = self.cells[nt][i].dendrange[m], self.cells[nt][i].dendrange[mi]\n \t self.cells[nt][i].pfrand.append(self.cells[nt][i].dendrange[m])\n \n #print self.cells[nt][i].pfrand\n\n if \"prk\" not in str(self.tau1_ex[nt]):\n pass\n else:\n self.cells[nt][i].PF_Lsync.append(Synapse2('pf',self.cells[nt][i],self.cells[nt][i].pfrand[j],record_all=0))\n\n e0 = len(self.cells[nt][i].PF_Lsync)-1 # get number of current synapse!\n syn_idx = int(e0)\n\n self.cells[nt][i].spk_nc_pfsync.append(self.pc.gid_connect(pulse_gid, self.cells[nt][i].PF_Lsync[syn_idx].input.newnetstim))\n self.cells[nt][i].spk_nc_pfsync[-1].delay = 1\n self.cells[nt][i].spk_nc_pfsync[-1].weight[0] = 1\n \n if 'gsyn_in' in self.method_interpol:\n self.record_syn.append(h.Vector())\n self.record_syn[-1].record(self.cells[nt][i].PF_Lsync[-1].postsyns['AMPA'][0]._ref_g)\n self.gsyn_in_fac.append(f) \n \n elif syntype == \"inh\":\n \n w = self.g_syn_inh[nt]\n \n seed = int(10000 + 10*gid + j)\n np.random.seed(seed*42)\n \n if self.g_syn_inh_s[nt] > 0:\n w = np.random.normal(w, w*self.g_syn_inh_s[nt], 1).clip(min=w*0.1) # self.g_syn_inh_s[nt] \n \n if self.celltype[nt] == 'Grc':\n \n if j == 0: \n self.cells[nt][i].GOC_L = []\n self.cells[nt][i].gocncpc = []\n \n if \"gr\" not in str(self.tau1_inh[nt]):\n \n tau1 = self.tau1_inh[nt]\n tau2 = self.tau2_inh[nt]\n \n if tau1 == 0:\n syn = h.ExpSyn(self.cells[nt][i].soma(0.5))\n syn.tau = tau2/ms\n \n else: \n syn = h.Exp2Syn(self.cells[nt][i].soma(0.5))\n syn.tau1 = tau1/ms\n syn.tau2 = tau2/ms\n \n syn.e = -65\n \n self.cells[nt][i].GOC_L.append(syn)\n \n i0 = len(self.cells[nt][i].GOC_L)-1 # get number of current synapse!\n \n syn_idx = int(i0)\n source = int(pulse_gid)\n self.cells[nt][i].gocncpc.append(self.pc.gid_connect(source, self.cells[nt][i].GOC_L[syn_idx]))\n self.cells[nt][i].gocncpc[-1].delay = 1\n self.cells[nt][i].gocncpc[-1].weight[0] = w\n \n else:\n \n self.cells[nt][i].createsyn(nmf = 0, ngoc = 1, weight_gmax = w) \n i0 = len(self.cells[nt][i].GOC_L)-1 # get number of current synapse!\n self.cells[nt][i].pconnect(self.pc,pulse_gid,int(i0),'goc')\n \n \n if self.celltype[nt] == 'IfCell': \n \n if j == 0: \n self.cells[nt][i].synlist_inh = []\n self.cells[nt][i].nc_inh = []\n \n if \"gr\" in str(self.tau1_inh[nt]):\n \n nrel = 0\n if \"stoch\" in str(self.tau1_ex[nt]):\n nrel = 4\n \n self.cells[nt][i].GOC_L = self.cells[nt][i].synlist\n self.cells[nt][i].whatami = \"grc\"\n self.cells[nt][i].synlist_inh.append(Synapse('goc', self.cells[nt][i], self.cells[nt][i].soma, nrel=nrel, record_all=0, weight_gmax = w))\n \n i0 = len(self.cells[nt][i].synlist_inh)-1\n syn_idx = int(i0)\n \n source = int(pulse_gid)\n self.cells[nt][i].nc_inh.append(self.pc.gid_connect(source, self.cells[nt][i].synlist_inh[syn_idx].input))\n self.cells[nt][i].nc_inh[-1].delay = 1\n self.cells[nt][i].nc_inh[-1].weight[0] = 1\n \n if \"gaba\" in str(self.tau1_ex[nt]):\n \n if 'gsyn_in' in self.method_interpol:\n \n if \"nostdgaba\" in str(self.tau1_ex[nt]):\n \n self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].tau_rec = 1e-9 \n self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].tau_facil = 1e-9 \n self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].tau_1 = 0 \n self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d3 = 0 \n self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d1d2 = 0 \n self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d1 = 0 \n self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d2 = 0 \n self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d3_a6 = 0 \n self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d1d2_a6 = 0 \n self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d1_a6 = 0 \n self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0].d2_a6 = 0 \n \n self.record_syn.append(h.Vector())\n self.record_syn[-1].record(self.cells[nt][i].synlist_inh[syn_idx].postsyns['GABA'][0]._ref_g)\n self.gsyn_in_fac.append(f)\n \n else:\n \n self.cells[nt][i].create_synapses(n_inh=1, tau1_inh=self.tau1_inh[nt], tau2_inh=self.tau2_inh[nt], e_inh=-65)\n i0 = len(self.cells[nt][i].synlist_inh)-1\n syn_idx = int(i0)\n self.cells[nt][i].pconnect_target(self.pc, source=pulse_gid, target=int(i0), syntype='inh', weight=w, delay=1)\n \n \n elif syntype == \"intr\":\n \n if self.celltype[nt] == 'Prk':\n \n pass\n\n \n def set_SynPlay(self, farray, tarray, N = [], t_startstop = [], amode = 1):\n \n if self.do_run:\n \n delay = 1\n if (self.use_pc is False):\n delay = 0.1\n \n if N == []:\n N = self.N\n \n self.pulse_list = [] \n self.global_pulse_list = []\n self.global_pulse_list_inh = []\n self.global_pulse_list_intr = []\n \n f_cells_mean_local = []\n f_cells_cv_local = []\n f_cells_std_local = []\n \n for nt in range(self.n_celltypes): # loop over all cells\n \n if (self.n_syn_ex[nt] > 0) or (self.n_syn_inh[nt] > 0) or (self.n_syn_intr[nt] > 0):\n\n local_gid_count = 0\n local_gid_count_type = []\n \n \n # EXCITATION\n if str(type(self.g_syn_ex[nt] )) is not ndarray: self.g_syn_ex[nt] = np.array([self.g_syn_ex[nt] ]) # each cell can receive different g_syn_ex !\n \n if len(self.g_syn_ex[nt]) == len(self.global_gidlist[nt]):\n pass\n else:\n self.g_syn_ex[nt] = np.ones(len(self.global_gidlist[nt]))*self.g_syn_ex[nt][0]\n #print \"- single value in g_syn_ex, cells:\", len(self.global_gidlist[nt])\n \n self.global_pulse_list.append([])\n for ns in range(self.n_syn_ex[nt]): # loop over all excitatory synapses!\n self.global_pulse_list[-1].append([])\n for n in range(self.syn_max_mf[nt]): # number of cells of this celltype\n self.global_pulse_list[-1][-1].append(local_gid_count+self.gid_count)\n local_gid_count += 1\n local_gid_count_type.append([])\n local_gid_count_type[-1].append('ex')\n local_gid_count_type[-1].append(n) # number of cell within their population 0..N[nt]\n local_gid_count_type[-1].append(ns) # number of synapse \n \n \n # INHIBITION \n if np.array(self.inh_hold[nt]).size <= 1:\n self.inh_hold[nt] = np.ones(len(self.global_gidlist[nt]))*self.inh_hold[nt]\n #print \"- single value in inh_hold\", self.inh_hold[nt] \n \n \n self.global_pulse_list_inh.append([])\n for ns in range(self.n_syn_inh[nt]): # loop over all inhibitory synapses!\n self.global_pulse_list_inh[-1].append([])\n for n in range(self.syn_max_inh[nt]): # number of cells of this celltype\n self.global_pulse_list_inh[-1][-1].append(local_gid_count+self.gid_count)\n local_gid_count += 1\n local_gid_count_type.append([])\n local_gid_count_type[-1].append('inh')\n local_gid_count_type[-1].append(n) # number of cell within their population 0..N[nt]\n local_gid_count_type[-1].append(ns) # number of synapse \n\n \n # INTRUDER SYNAPSE\n if str(type(self.g_syn_intr[nt] )) is not ndarray: self.g_syn_intr[nt] = np.array([self.g_syn_intr[nt] ]) # each cell can receive different g_syn_intr !\n \n if len(self.g_syn_intr[nt]) == len(self.global_gidlist[nt]):\n pass \n else:\n self.g_syn_intr[nt] = np.ones(len(self.global_gidlist[nt]))*self.g_syn_intr[nt][0]\n #print \"- single value in g_syn_intr, cells:\", len(self.global_gidlist[nt])\n \n self.global_pulse_list_intr.append([])\n for ns in range(self.n_syn_intr[nt]): # loop over all intruding synapses!\n self.global_pulse_list_intr[-1].append([])\n for n in range(self.syn_max_intr[nt]): # number of generators for this celltype\n self.global_pulse_list_intr[-1][-1].append(local_gid_count+self.gid_count)\n local_gid_count += 1\n local_gid_count_type.append([])\n local_gid_count_type[-1].append('intr')\n local_gid_count_type[-1].append(n) # number of cell within their population 0..N[nt]\n local_gid_count_type[-1].append(ns) # number of synapse \n \n \n t_vec_input = np.array([]) # input trains \n id_vec_input = np.array([]) # input trains id\n fs = 1 / self.dt\n ih_use_v = []\n \n for i in range(int(self.id), local_gid_count, int(self.nhost)): # loop over all train generators and generate them\n \n self.pulse_list.append(i+self.gid_count)\n pulse_gid = self.pulse_list[-1] \n gid = local_gid_count_type[i][1] # should correspond to this gid when multiple values inserted\n \n if local_gid_count_type[i][0] == 'ex':\n \n seed = int(10001 + pulse_gid) # unique gid for generators! \n np.random.seed(seed*423)\n \n if self.ihold_sigma[nt] > 0:\n ih_use = np.random.normal(self.ihold[nt], self.ihold[nt]*self.ihold_sigma[nt], 1).clip(min=0) # self.ihold[nt]*self.ihold_sigma[nt] \n \n elif self.ihold_sigma[nt] < 0:\n ih_use = np.random.uniform(0.1, self.ihold[nt])\n \n else:\n ih_use = self.ihold[nt] \n \n ih_use_v.append(ih_use)\n \n if ih_use > 0:\n # train has to be contructed here, to insert different train into each \"dendrite\"\n ## different ihold has to be implemented here!!\n iholdvec = concatenate((zeros(round(fs)), ones(round(len(tarray) - 1 * fs)) * ih_use))\n \n if isinstance(self.syn_ex_dist[nt], ( tuple ) ): # distribution of amplitude, only one noise source!\n \n np.random.seed(pulse_gid*40)\n if self.syn_ex_dist[nt][1] > 0:\n f = np.random.normal(self.syn_ex_dist[nt][0], self.syn_ex_dist[nt][1], 1).clip(min=0)\n else:\n f = self.syn_ex_dist[nt][0]\n \n f2 = f\n rnd.seed(pulse_gid*40) # use gid so type 1, 2 is identical for each cell\n #rnd.seed(gid*40) # use gid so type 1, 2 is identical for each cell\n if self.syn_ex_dist[nt][2] > 0: # add inverted input with 50% probability, in future versions this will indicate the propability for -1 and 1 \n f2 = rnd.sample([-1,1],1)[0] * f\n #f2 = f\n \n if amode == 1:\n inamp = (f2 * self.amod[nt] * ih_use)\n elif amode == 2:\n inamp = (f2 * self.amod[nt] * self.ihold[nt]) \n \n modulation = (tarray, inamp * farray[0] + iholdvec)\n \n #if self.id == 0: print \"- randomize play stimulus height, pulse_gid=\", pulse_gid, \" gid=\", gid ,\" f=\", f \n if (gid==0): print \"- randomize play stimulus height, pulse_gid=\", pulse_gid, \" gid=\", gid ,\" f2=\", f2,\"inamp=\",inamp \n \n #rnd.seed(local_gid_count_type[i][1]*300) # pick seed based on number of cell\n #nj = rnd.sample(range(len(farray)),1)[0] \n nj = 1\n \n else: # different noise sources can be used at different synapses, linear combination test in openloop\n \n nj = self.syn_ex_dist[nt][local_gid_count_type[i][2]]\n \n if nj == 0:\n modulation = (tarray, iholdvec)\n else:\n if amode == 1:\n inamp = (self.factor_celltype[nt] * self.amod[nt] * ih_use)\n elif amode == 2:\n inamp = (self.factor_celltype[nt] * self.amod[nt] * self.ihold[nt]) \n\n modulation = (tarray, inamp * farray[nj-1] + iholdvec)\n if self.id == 0: print \"ex farray number:\", nj-1, \"ih_use:\", ih_use, \"self.amod[nt]:\", self.amod[nt], \"inamp: \", inamp\n \n \n # will be done n_syn_ex * number of cells!\n if self.noise_syn_tau[nt] < 0: # variable threshold\n no = self.noise_syn[nt]\n else: \n no = self.noise_syn[nt]*ih_use\n\n train, self.n_train_ex = mod_spike_train(modulation, noise = no, seed = seed, noise_tau = self.noise_syn_tau[nt], noise_a = self.noise_a[nt]) \n \n #plt.figure(\"input\")\n #plt.plot(train, train*0, '|')\n #plt.show()\n \n t_vec_input = np.append(t_vec_input, train*ms).flatten() # use ms to save!!\n id_vec_input = np.append(id_vec_input, np.ones(len(train))*pulse_gid).flatten()\n \n f_cells_mean_local0, f_cells_cv_local0, f_cells_std_local0 = self.calc_fmean(train*ms, t_startstop)\n f_cells_mean_local.append(f_cells_mean_local0); f_cells_cv_local.append(f_cells_cv_local0); f_cells_std_local.append(f_cells_std_local0)\n \n if self.id == 0: print \"TRAIN: requ. mean:\", ih_use ,\"eff. mean:\", f_cells_mean_local0, \"cv: \" , f_cells_cv_local0, \"std:\" , f_cells_std_local0\n \n else:\n train = []\n self.n_train_ex = []\n \n\n\n elif local_gid_count_type[i][0] == 'intr':\n \n # train has to be contructed here, to insert different train into each \"dendrite\"\n nj = 0\n \n seed = int(10001 + pulse_gid)\n np.random.seed(seed*4411)\n \n if self.intr_hold_sigma[nt] > 0: \n ih_use = np.random.normal(self.intr_hold[nt], self.intr_hold[nt]*self.intr_hold_sigma[nt], 1).clip(min=0) \n else:\n ih_use = self.intr_hold[nt]\n \n ih_use_v.append(ih_use)\n \n if ih_use > 0: \n \n iholdvec = concatenate((zeros(round(fs)), ones(round(len(tarray) - 1 * fs)) * ih_use))\n modulation = (tarray, iholdvec)\n \n # will be done n_syn_in * number of cells! \n if self.noise_syn_tau_intr[nt] < 0: # variable threshold\n no = self.noise_syn_intr[nt]\n else: \n no = self.noise_syn_intr[nt]*ih_use\n \n if self.noise_syn_tau_intr[nt] >= -1:\n train, _ = mod_spike_train(modulation, noise = no, seed = seed, noise_tau = self.noise_syn_tau_intr[nt], noise_a = self.noise_a_intr[nt]) # train in ms\n else:\n train = oscill_spike_train(sor = 4, spike_prob = 1/4, noise_fraction = 4, end_time = tarray[-1]/ms, seed = seed) \n \n \n elif local_gid_count_type[i][0] == 'inh':\n \n # train has to be contructed here, to insert different train into each \"dendrite\"\n \n seed = int(10001 + pulse_gid)\n \n np.random.seed(seed*44)\n \n if self.inh_hold_sigma[nt] > 0: \n ih_use = np.random.normal(self.inh_hold[nt][gid], self.inh_hold[nt][gid]*self.inh_hold_sigma[nt], 1).clip(min=0) \n else:\n ih_use = self.inh_hold[nt][gid]\n \n \n iholdvec = concatenate((zeros(round(fs)), ones(round(len(tarray) - 1 * fs)) * ih_use))\n \n nj = self.syn_inh_dist[nt][local_gid_count_type[i][2]]\n if nj == 0:\n modulation = (tarray, iholdvec)\n else:\n inamp = (self.amod[nt] * ih_use)\n modulation = (tarray, inamp * farray[nj-1] + iholdvec)\n #print \"inh farray number:\", nj-1, \"ih_use:\", ih_use, \"amp: \", inamp #old: nj-1+nemax\n \n # will be done n_syn_in * number of cells! \n if self.noise_syn_tau_inh[nt] < 0: # variable threshold\n no = self.noise_syn_inh[nt]\n else: \n no = self.noise_syn_inh[nt]*ih_use\n \n train, _ = mod_spike_train(modulation, noise = no, seed = seed, noise_tau = self.noise_syn_tau_inh[nt], noise_a = self.noise_a_inh[nt]) # train in ms\n #print train\n \n #print train\n if len(train) > 0:\n if self.id == 0: \n print \"-\", pulse_gid, local_gid_count_type[i], \"seed: \", seed, \"ih_use:\", ih_use, no, nj #, \"first spike: \", train[0] \n self.setup_Play_train(train = train+self.inh_delay, input_gid = pulse_gid, delay = delay) # train in ms\n \n \n self.gid_count += local_gid_count # increase gid count\n \n self.barrier()\n \n for i, gid in enumerate(self.gidlist[nt]): # for all input cells\n \n rnd.seed(gid*200)\n n = self.global_gidlist[nt].index(gid) # index of cell within their population 0..N[nt]\n # i is index on this node only!\n \n self.record_syn = []\n for j in range(self.n_syn_ex[nt]):\n if N[nt] == len(self.global_pulse_list[nt][j]):\n pulse_gid = self.global_pulse_list[nt][j][n] #every cell of this type receives one pulse gid \n if self.id == 0: print \"- gid:\", gid ,\" n:\", n ,\" one ex train for each synapse:\", pulse_gid, \"self.g_syn_ex[nt][n]:\", self.g_syn_ex[nt][n] \n else:\n pulse_gid = rnd.sample(self.global_pulse_list[nt][j],1)[0] # not enough, just pick one at random, for inh/f search only one synapse available!\n if self.id == 0: print \"- gid:\", gid ,\" n:\", n ,\" one ex train from\", len(self.global_pulse_list[nt][j]), \":\", pulse_gid, \"self.g_syn_ex[nt][n]:\", self.g_syn_ex[nt][n] \n \n if \"gaba\" in str(self.tau1_ex[nt]):\n self.connect_Synapse(pulse_gid, nt, i, n, gid, j, syntype = \"inh\") \n else:\n self.connect_Synapse(pulse_gid, nt, i, n, gid, j, syntype = \"ex\", nsyn = self.n_syn_ex[nt]) \n \n \n if self.n_syn_inh[nt] > 0:\n for j in range(self.n_syn_inh[nt]):\n \n if N[nt] == len(self.global_pulse_list_inh[nt][j]):\n pulse_gid = self.global_pulse_list_inh[nt][j][n] #every cell of this type receives one pulse gid \n if self.id == 0: print \"- one inh train for each synapse:\", pulse_gid\n else:\n pulse_gid = rnd.sample(self.global_pulse_list_inh[nt][j],1)[0] # not enough, just pick one at random \n if self.id == 0: print \"- one inh train from\", len(self.global_pulse_list_inh[nt][j]), \":\", pulse_gid\n \n self.connect_Synapse(pulse_gid, nt, i, n, gid, j, syntype = \"inh\") \n \n \n if self.n_syn_intr[nt] > 0:\n for j in range(self.n_syn_intr[nt]):\n \n if N[nt] == len(self.global_pulse_list_intr[nt][j]):\n pulse_gid = self.global_pulse_list_intr[nt][j][n] #every cell of this type receives one pulse gid \n if self.id == 0: print \"- one intruding train for each synapse:\", pulse_gid\n else:\n pulse_gid = rnd.sample(self.global_pulse_list_intr[nt][j],1)[0] # not enough, just pick one at random \n if self.id == 0: print \"- one intruding train from\", len(self.global_pulse_list_intr[nt][j]), \":\", pulse_gid\n \n if (self.use_pc is False):\n \n if self.celltype[nt] == 'Prk': self.cells[nt][i].delrerun() \n \n (msg,CF_input) = self.cells[nt][i].createsyn_CF(record_all=0,factor=self.g_syn_intr[nt][0],cf_setup_select='old')\n CF_input.number = 3 # three bursts\n CF_input.start = -0.3 # See synapsepfpurk.py\n CF_input.interval = 3 # 3 ms interval between bursts\n\n self.cells[nt][i].input_to_CF_nc.append(h.NetCon(self.vecstim[j], CF_input, 0, 0.1, 1))\n self.netcons.append(self.cells[nt][i].input_to_CF_nc[-1])\n \n else:\n print \"NOT IMPLEMENTED\"\n \n \n if self.id == 0: print \"trains connected\"\n \n if local_gid_count_type[i][0] == 'intr':\n pass\n else:\n self.id_all_vec_input.append(self.do_gather(id_vec_input, dtype = 'i')) \n self.t_all_vec_input.append(self.do_gather(t_vec_input)) \n \n f_cells_mean = self.do_gather(f_cells_mean_local) \n f_cells_cv = self.do_gather(f_cells_cv_local) \n f_cells_std = self.do_gather(f_cells_std_local) \n \n self.fmean_input = np.nan\n self.fmax_input = np.nan\n self.fmstd_input = np.nan\n self.fcvm_input = np.nan\n self.fstdm_input = np.nan\n \n ih_use_v_all = self.do_gather(ih_use_v)\n \n if self.id == 0 and local_gid_count_type[i][0] != 'intr':\n \n self.fmean_input = mean(np.nan_to_num(f_cells_mean)) # compute mean of mean rate for all cells\n self.fmstd_input = std(np.nan_to_num(f_cells_mean)) \n self.fmax_input = max(np.nan_to_num(f_cells_mean))\n \n self.fcvm_input = mean(f_cells_cv[~np.isnan(f_cells_cv)])\n self.fstdm_input = mean(f_cells_std[~np.isnan(f_cells_std)])\n \n self.ih_use_max = max(ih_use_v_all)\n \n print \"- trains, fmean: \",self.fmean_input, \"fmax: \",self.fmax_input, \"Hz\", \"fmstd: \",self.fmstd_input, \"Hz\", \"fcvm: \",self.fcvm_input, \"fstdm: \",self.fstdm_input, \"Hz, ih_use_max:\", self.ih_use_max \n \n else:\n self.global_pulse_list.append([])\n self.global_pulse_list_inh.append([])\n \n\n\n def do_gather(self, v_local, dtype = 'd'):\n \n if self.use_mpi:\n \n self.barrier()\n \n #v_local = v_local.astype(dtype).flatten()\n v_local = np.array(v_local, dtype=dtype).flatten() \n \n if self.use_pc == False:\n\n v_global = None\n counts_local = np.array(len(v_local), dtype='i')\n \n counts = 0\n if self.id == 0:\n counts = np.empty(self.nhost, dtype='i')\n \n self.comm.Gather(sendbuf=[counts_local, MPI.INT], recvbuf=[counts, MPI.INT], root=0)\n \n if self.id == 0:\n v_global = np.empty(sum(counts), dtype=dtype)\n \n \n if dtype == 'd':\n self.comm.Gatherv(sendbuf=[v_local, MPI.DOUBLE], recvbuf=[v_global, (counts, None), MPI.DOUBLE], root=0)\n elif dtype == 'i':\n self.comm.Gatherv(sendbuf=[v_local, MPI.INT], recvbuf=[v_global, (counts, None), MPI.INT], root=0) \n \n #v_global = np.hstack(v_global)\n \n else:\n sendlist = [None]*self.nhost \n sendlist[0] = v_local\n getlist = self.pc.py_alltoall(sendlist)\n \n v_global = np.hstack(getlist) \n \n else:\n \n v_global = np.hstack(v_local)\n \n return v_global\n \n\n def setup_Play_train(self, train = [], input_gid = 0, delay = 1):\n \n self.trains.append(train)\n\n # possibility to play spikes into the cells!\n self.vecstim.append(h.VecStim(.5))\n self.nc_vecstim.append(h.NetCon(self.vecstim[-1],None))\n self.nc_vecstim[-1].delay = delay\n\n self.spike_vec.append(h.Vector(self.trains[-1]))\n self.vecstim[-1].play(self.spike_vec[-1]) \n\n if (self.use_mpi):\n self.pc.set_gid2node(input_gid, self.id) # associate gid with this host\n self.pc.cell(input_gid,self.nc_vecstim[-1]) # associate gid with spike detector\n \n\n def record(self):\n \"\"\"\n Initializes recording vectors. Internal function\n \"\"\"\n\n if self.n_celltypes > 1:\n #print \"self.n_borders:\",self.n_borders\n for n in range(self.n_celltypes):\n if self.n_borders[n] in self.gidlist[n]:\n #print \"np.shape(self.rec_v):\",np.shape(self.rec_v)\n #print \"np.shape(self.cells):\",np.shape(self.cells)\n self.rec_v[n].record(self.cells[n][0].soma(0.5)._ref_v) \n\n \n if self.id == 0: # only for first node and first cell\n \n # Voltage\n self.rec_v[0].record(self.cells[self.a_celltype[0]][0].soma(0.5)._ref_v) \n \n # Stimuli\n self.rec_i = h.Vector()\n\n if (self.plays != []): \n if (isinstance(self.plays[0], list) is False): \n self.rec_i.record(self.plays[0]._ref_i)\n else:\n self.rec_i.record(self.plays[0][0]._ref_i) \n \n self.rec_ich = h.Vector()\n if self.ic_holds != [] and (isinstance(self.ic_holds[0], list) is False): \n self.rec_ich.record(self.ic_holds[0]._ref_i)\n \n self.rec_ics = h.Vector()\n if self.ic_starts != []: \n self.rec_ics.record(self.ic_starts[0]._ref_i)\n \n self.rec_n = h.Vector() \n \n if self.fluct_s[0] > 0:\n # Fluctuating input \n self.rec_n.record(self.flucts[0]._ref_i)\n print \"recording noise\"\n elif (len(self.flucts) > 0) and (len(self.fluct_g_i0)>0):\n self.rec_n.record(self.flucts[0]._ref_g_i)\n print \"recording g noise\"\n else:\n print \"nonoise\"\n \n if hasattr(self.cells[self.a_celltype[0]][0], 'lkg2_noise'):\n if self.cells[self.a_celltype[0]][0].lkg2_noise > 0:\n self.rec_n.record(self.cells[self.a_celltype[0]][0].fluct._ref_il)\n print \"recording tonic gaba noise\" \n \n self.rec_step = h.Vector()\n if self.ic_steps != []: \n self.rec_step.record(self.ic_steps[0]._ref_i) \n \n # Time\n self.rec_t = h.Vector()\n self.rec_t.record(h._ref_t)\n \n \n def run(self, tstop = 10*s, do_loadstate = True):\n \"\"\"\n Starts the stimulation.\n \"\"\"\n self.record()\n \n if self.first_run:\n\n if self.use_mpi: self.pc.set_maxstep(100)\n #self.pc.spike_compress(1) #test\n \n if self.use_multisplit:\n import multiprocessing\n \n Hines = h.CVode()\n Hines.active(0)\n \n h.load_file(\"parcom.hoc\")\n p = h.ParallelComputeTool()\n \n if self.use_mpi:\n cpus = multiprocessing.cpu_count() #32 #self.pc.nhost()\n else:\n cpus = multiprocessing.cpu_count() #32 \n \n p.change_nthread(cpus,1) \n p.multisplit(1)\n print \"Using multisplit, cpus:\", cpus\n \n else:\n \n h.load_file(\"stdrun.hoc\")\n \n if self.use_local_dt:\n h.cvode.active(1) \n h.cvode.use_local_dt(1) \n \n h.celsius = self.temperature \n h.dt = self.dt/ms # Fixed dt\n h.steps_per_ms = 1 / (self.dt/ms)\n \n if self.cells[self.a_celltype[0]] != []: \n if hasattr(self.cells[self.a_celltype[0]][0], 'v_init'):\n h.v_init = self.cells[self.a_celltype[0]][0].v_init # v_init is supplied by cell itself!\n else: \n h.v_init = -60 \n \n h.stdinit() \n\n h.finitialize()\n \n if hasattr(self.cells[self.a_celltype[0]][0], 'load_states') and do_loadstate:\n m = md5.new()\n cell_exe_new = self.cell_exe[0]\n m.update(cell_exe_new)\n filename = './states_' + self.celltype[0] + '_' + m.hexdigest() + '_Population.b'\n self.cells[self.a_celltype[0]][0].load_states(filename)\n \n else:\n\n pass \n \n \n if self.id == 0:\n import time\n t0 = time.time()\n\n if self.simstep == 0:\n if self.id == 0: print \"Running without steps\",\n \n if self.use_mpi:\n self.pc.psolve(tstop/ms)\n else:\n h.init()\n h.tstop = tstop/ms\n h.run()\n\n else:\n \n h.finitialize()\n cnt = 1\n \n #if self.id == 50: \n # print len(self.cells[1][0].nc), self.cells[1][0].nc[0].weight[0]\n # print len(self.cells[0][0].nc_inh), self.cells[0][0].nc_inh[0].weight[0]\n \n h.t = 0\n while h.t < tstop/ms:\n \n if self.id == 0:\n print \"Running...\",\n if self.use_mpi:\n past_time = self.pc.time()\n \n h.continuerun(cnt*self.simstep/ms)\n if self.use_mpi: self.pc.barrier()\n \n if self.id == 0:\n if self.use_mpi:\n print \"Simulated time =\",h.t*ms, \"s, Real time = \", (self.pc.time()-past_time), 's'\n else:\n print \"Simulated time =\",h.t*ms, \"s\"\n \n #if self.id == 0:\n # print hpy.heap().byrcs\n cnt += 1\n\n if self.id == 0: print \"psolve took \", time.time() - t0, \"seconds\"\n \n self.first_run = False\n \n self.barrier() # wait for other nodes\n\n self.tstop = tstop \n \n \n def get(self, t_startstop=[], i_startstop=[], N = []):\n \"\"\"\n Gets the recordings.\n \"\"\"\n \n if N == []:\n N = self.N\n \n if t_startstop == []:\n t_startstop = np.array([2, self.tstop])\n \n t_all_vec = []\n id_all_vec = []\n \n fmean = []\n fbase = []\n fmax = []\n fmstd = []\n fcvm = []\n fstdm = []\n gid_del = []\n f_cells_mean_all = []\n f_cells_base_all = []\n f_cells_cv_all = [] \n f_cells_std_all = []\n \n fmeanA = []\n fmstdA = []\n fmaxA = []\n fcvmA = []\n fstdmA = []\n fbaseA = []\n fbstdA = []\n \n if self.id == 0: print \"start gathering spikes\"\n \n for n in range(self.n_celltypes):\n\n if self.use_mpi: \n \n self.barrier() # wait for other node\n t_vec = np.array(self.t_vec[n]).flatten()*ms - 1*ms # shift time because of output delay\n id_vec = np.array(self.id_vec[n]).flatten()\n \n else:\n \n t_vec = np.array([])\n id_vec = np.array([])\n print np.shape(self.t_vec)\n for i in self.gidlist[n]:\n t_vec0 = np.array(self.t_vec[n][i]).flatten()*ms \n t_vec = np.append(t_vec, t_vec0).flatten()\n id_vec = np.append(id_vec, np.ones(len(t_vec0))*i).flatten() \n\n fmean0, fmax0, fmstd0, fcvm0, fstdm0, gid_del0, f_cells_mean_all0, f_cells_cv_all0, f_cells_std_all0, fbase0, f_cells_base_all0 = self.get_fmean(t_vec, id_vec, t_startstop = t_startstop, gidlist = self.gidlist[n]) \n fmean.append(fmean0); fmax.append(fmax0), fmstd.append(fmstd0), fcvm.append(fcvm0), fstdm.append(fstdm0), gid_del.append(gid_del0), f_cells_mean_all.append(f_cells_mean_all0), f_cells_cv_all.append(f_cells_cv_all0), f_cells_std_all.append(f_cells_std_all0)\n fbase.append(fbase0); f_cells_base_all.append(f_cells_base_all0)\n \n t_all_vec.append(self.do_gather(t_vec))\n id_all_vec.append(self.do_gather(id_vec))\n \n if (self.id == 0) and (self.no_fmean == False): \n f_cells_mean_all = np.array(f_cells_mean_all).flatten()\n fmeanA = mean(f_cells_mean_all) # compute mean of mean rate for all cells\n fmstdA = std(f_cells_mean_all) \n fmaxA = max(f_cells_mean_all)\n \n f_cells_base_all = np.array(f_cells_base_all).flatten()\n fbaseA = mean(f_cells_base_all) # compute mean of mean rate for all cells\n fbstdA = std(f_cells_base_all)\n \n f_cells_cv_all = np.concatenate((np.array(f_cells_cv_all)))\n f_cells_std_all = np.concatenate((np.array(f_cells_std_all)))\n \n fcvmA = mean(f_cells_cv_all)\n fstdmA = mean(f_cells_std_all)\n \n print \"- ALL, fmean: \",fmeanA, \"fmax: \",fmaxA, \"Hz\", \"fmstd: \",fmstdA, \"Hz\", \"fcvm: \",fcvmA, \"fstdm: \",fstdmA, \"Hz\", \"fbase: \",fbaseA, \"Hz\", \"fbstd: \", fbstdA, \"Hz\"\n \n if self.id == 0: print \"all spikes have been gathered\"\n\n self.barrier()\n\n # do this here to have something to return\n voltage = []\n current = []\n time = []\n \n freq_times = []\n spike_freq = []\n gsyn = []\n \n if self.id == 0: # only for first node\n \n time = np.array(self.rec_t)*ms\n\n # use self.bin_width as bin width!\n freq_times = arange(0, time[-1], self.bin_width)\n\n voltage.append(np.array(self.rec_v[0])*mV)\n current = np.zeros(len(time))\n\n if len(np.array(self.rec_ics)) > 0:\n current = current + np.array(self.rec_ics) \n \n if len(np.array(self.rec_ich)) > 0:\n current = current + np.array(self.rec_ich)\n \n if len(np.array(self.rec_i)) > 0:\n current = current + np.array(self.rec_i) \n \n if len(np.array(self.rec_n)) > 0:\n current = current + np.array(self.rec_n) \n print np.array(self.rec_n) \n \n if len(np.array(self.rec_step)) > 0:\n current = current + np.array(self.rec_step) \n\n else:\n time = [0]\n \n self.barrier()\n time = self.broadcast(time, fast = True)\n\n gsyn_in = []\n gsyn_in0 = []\n \n if 'gsyn_in' in self.method_interpol:\n \n gsyn_in = None\n if self.id == 0: print \"- collecting gsyn_in\"\n gsyn_in0 = np.zeros(len(time), dtype='d')\n if self.record_syn is not []:\n for i, j in enumerate(self.record_syn):\n gsyn_in0 = gsyn_in0 + self.gsyn_in_fac[i] * np.array(j, dtype='d') \n \n if self.use_mpi:\n count = len(time)\n \n #if self.id == 0: gsyn_in = np.empty(count*self.nhost, dtype='d')\n #self.comm.Gatherv(sendbuf=[gsyn_in0, MPI.DOUBLE], recvbuf=[gsyn_in, MPI.DOUBLE], root=0)\n \n gsyn_in = self.do_gather(gsyn_in0)\n \n if self.id == 0:\n gsyn_in = np.reshape(gsyn_in, (self.nhost,count))\n gsyn_in = sum(gsyn_in,0)\n \n else:\n gsyn_in = gsyn_in0\n \n self.barrier() # wait for other nodes\n \n if self.n_celltypes > 1:\n if self.id == 0: print \"more than one celltype send voltage of first other cell to root\"\n \n for n in range(1, self.n_celltypes):\n \n if self.use_pc == True:\n \n srclist = [None]*self.nhost\n \n if (self.n_borders[n] in self.gidlist[n]):\n srclist[0] = np.array(self.rec_v[n])*mV\n \n destlist = self.pc.py_alltoall(srclist) \n \n if self.id == 0:\n idx = [i for i, x in enumerate(destlist) if x is not None]\n if len(idx) > 1: raise ValueError('Error, too many vectors sent, should be one at a time!')\n voltage.append(np.array(destlist[idx[0]]))\n \n else:\n \n if self.id == 0:\n if (self.n_borders[n] in self.gidlist[n]): # first node has it, do not wait to receive it!\n v_temp = np.array(self.rec_v[n])*mV\n else:\n v_temp = np.zeros(len(voltage[0]))\n self.comm.Recv([v_temp, MPI.DOUBLE], source = MPI.ANY_SOURCE, tag=int(sum(N)+33))\n \n voltage.append(v_temp)\n else:\n if self.n_borders[n] in self.gidlist[n]:\n voltage = np.array(self.rec_v[n])*mV \n self.comm.Ssend([voltage, MPI.DOUBLE], dest=0, tag=int(sum(N)+33))\n\n self.barrier() # wait for other nodes \n\n times = arange(0, time[-1], 1*ms) \n gsyns = []\n if self.called_syn_out_all == True:\n \n for n in range(self.n_celltypes):\n gsyns.append([])\n \n if self.use_pc == True:\n\n for i, gid in enumerate(self.global_gidlist[n]): \n \n srclist = [None]*self.nhost\n \n if gid in self.gidlist[n]: #only one node does this\n a = np.array(self.cells[n][self.gidlist[n].index(gid)].record['gsyn'])\n c = np.zeros(int((1*ms)/self.dt))\n temp = np.append(a, c).flatten()\n temp = temp[int((1*ms)/self.dt):len(temp)+1]\n gtemp = interp(times,time,temp)\n \n srclist[0] = gtemp # send to root only\n \n destlist = self.pc.py_alltoall(srclist) \n \n if self.id == 0:\n idx = [i for i, x in enumerate(destlist) if x is not None]\n if len(idx) > 1: raise ValueError('Error, too many vectors sent, should be one at a time!')\n gsyns[n].append(np.array(destlist[idx[0]]))\n \n else: \n \n for i, gid in enumerate(self.global_gidlist[n]): \n \n if self.id == 0:\n if gid in self.gidlist[n]:\n a = np.array(self.cells[n][self.gidlist[n].index(gid)].record['gsyn'])\n c = np.zeros(int((1*ms)/self.dt))\n temp = np.append(a, c).flatten()\n temp = temp[int((1*ms)/self.dt):len(temp)+1]\n gtemp = interp(times,time,temp)\n \n else:\n gtemp = np.zeros(len(times))\n self.comm.Recv([gtemp, MPI.DOUBLE], source = MPI.ANY_SOURCE, tag=int(gid))\n \n gsyns[n].append(np.array(gtemp))\n \n else:\n if gid in self.gidlist[n]:\n a = np.array(self.cells[n][self.gidlist[n].index(gid)].record['gsyn'])\n c = np.zeros(int((1*ms)/self.dt))\n temp = np.append(a, c).flatten()\n temp = temp[int((1*ms)/self.dt):len(temp)+1]\n gtemp = interp(times,time,temp) \n #np.array(self.cells[n][self.gidlist[n].index(gid)].record['gsyn'])\n self.comm.Ssend([gtemp, MPI.DOUBLE], dest=0, tag=int(gid))\n \n if self.id == 0: print \"root gathered synaptic output conductance\" \n \n \n self.barrier() # wait for other nodes \n \n times = arange(0, time[-1], 10*ms)\n \n w_mat = []\n winh_mat = []\n \n if self.stdp_used == True:\n \n for n in range(self.n_celltypes):\n w_mat.append([]) \n \n for i, gid in enumerate(self.global_gidlist[n]): \n \n if self.id == 0:\n \n wall = []\n \n if gid in self.gidlist[n]:\n\n walltemp = self.cells[n][self.gidlist[n].index(gid)].record['w'] \n if len(walltemp) > 0:\n for l in range(len(walltemp)):\n wtemp = np.array(walltemp[l])\n wtemp = interp(times,time,wtemp)\n wall.append(wtemp)\n \n else:\n \n while 1:\n wtemp = np.zeros(len(times))\n self.comm.Recv([wtemp, MPI.DOUBLE], source = MPI.ANY_SOURCE, tag=int(gid))\n \n if wtemp[0] == -1:\n break\n else:\n wall.append(wtemp)\n \n w_mat[n].append(wall)\n \n else:\n if gid in self.gidlist[n]:\n walltemp = self.cells[n][self.gidlist[n].index(gid)].record['w']\n \n if len(walltemp) > 0:\n for l in range(len(walltemp)):\n wtemp = np.array(walltemp[l])\n wtemp = interp(times,time,wtemp)\n self.comm.Ssend([wtemp, MPI.DOUBLE], dest=0, tag=int(gid))\n \n wtemp = np.ones(len(times))*-1\n self.comm.Ssend([wtemp, MPI.DOUBLE], dest=0, tag=int(gid)) \n\n if self.id == 0: \n print \"root gathered synaptic input conductance\" \n\n\n self.barrier() # wait for other nodes \n\n \n for n in range(self.n_celltypes):\n winh_mat.append([])\n \n for i, gid in enumerate(self.global_gidlist[n]): \n \n if self.id == 0:\n \n wall = []\n \n if gid in self.gidlist[n]:\n \n walltemp = self.cells[n][self.gidlist[n].index(gid)].record['w_inh'] \n if len(walltemp) > 0:\n for l in range(len(walltemp)):\n wtemp = np.array(walltemp[l])\n wtemp = interp(times,time,wtemp)\n wall.append(wtemp)\n \n else:\n \n while 1:\n wtemp = np.zeros(len(times))\n self.comm.Recv([wtemp, MPI.DOUBLE], source = MPI.ANY_SOURCE, tag=int(gid))\n \n if wtemp[0] == -1:\n break\n else:\n wall.append(wtemp)\n \n winh_mat[n].append(wall)\n \n else:\n if gid in self.gidlist[n]:\n walltemp = self.cells[n][self.gidlist[n].index(gid)].record['w_inh']\n \n if len(walltemp) > 0:\n for l in range(len(walltemp)):\n wtemp = np.array(walltemp[l])\n wtemp = interp(times,time,wtemp)\n self.comm.Ssend([wtemp, MPI.DOUBLE], dest=0, tag=int(gid))\n \n wtemp = np.ones(len(times))*-1\n self.comm.Ssend([wtemp, MPI.DOUBLE], dest=0, tag=int(gid))\n \n \n if self.id == 0: \n print \"root gathered synaptic input conductance\" \n \n\n self.barrier() # wait for other nodes \n \n\n t_all_vec_vec = []\n id_all_vec_vec = []\n f_cells_mean = []\n \n if self.id == 0: # only for first node\n \n for n in range(self.n_celltypes):\n \n ie = argsort(t_all_vec[n]) \n t_all_vec_vec.append( t_all_vec[n][ie] )\n id_all_vec_vec.append( id_all_vec[n][ie].astype(int) ) # \n \n print \"all spikes have been sorted\"\n\n if self.jitter > 0: # add jitter!\n np.random.seed(40)\n x = np.random.normal(0, self.jitter, len(t_all_vec_vec[self.a_celltype[0]])) \n t_all_vec_vec[self.a_celltype[0]] = t_all_vec_vec[self.a_celltype[0]] + x\n \n if self.delta_t > 0:\n t_all_vec_vec[self.a_celltype[0]] = t_all_vec_vec[self.a_celltype[0]] + self.delta_t\n \n gsyn = zeros(len(freq_times))\n \n if 'gsyn_in' in self.method_interpol:\n pass\n else: \n bvec = [\"syn\" in st for st in self.method_interpol]\n if np.any(bvec):\n \n if (not hasattr(self, 'passive_target')) | (self.jitter > 0): # if not already done in neuron via artificial cell\n \n [resp, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[self.a_celltype[0]], bins = freq_times)\n resp = np.concatenate((zeros(1),resp))\n \n Ksyn = syn_kernel(arange(0,10*self.syn_tau2,self.bin_width), self.syn_tau1, self.syn_tau2) \n Ksyn = np.concatenate((zeros(len(Ksyn)-1),Ksyn))\n gsyn = np.convolve(Ksyn, resp, mode='same')\n print \"Generated gsyn by convolution with Ksyn\"\n self.nc_delay = 0 \n \n else:\n gsyn = interp(freq_times,time,np.array(self.rec_g)) \n \n spike_freq = np.zeros(len(freq_times))\n \n for j in self.a_celltype:\n \n #plt.figure('results_voltage') \n #ax99 = plt.subplot(2,1,1)\n #ax99.plot(time,voltage[j])\n \n #plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n #plt.savefig(\"./figs/Pub/Voltage_\" + str(self.pickle_prefix) + \"_cell\" + str(j) + \"_N\" + str(self.N[j]) + \".pdf\", dpi = 300, transparent=True) # save it \n #plt.show()\n #plt.clf() \n \n [num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[j], bins = freq_times)\n \n if isinstance(self.factor_celltype[j], ( int, long ) ):\n f = self.factor_celltype[j] \n else:\n f = self.factor_celltype[j][0] \n \n spike_freq = spike_freq + f * np.concatenate((zeros(1),num_spikes)) / self.bin_width\n\n self.barrier() # wait for other nodes\n \n #figure('1')\n #plot(time,np.array(self.rec_s1),'b', time,np.array(self.rec_s2),'r')\n #plt.show()\n \n return {'time':time, 'voltage':voltage, 'current':current, 'fmean':fmean, 'f_cells_mean':f_cells_mean,\n 'gsyn':gsyn, 'freq_times':freq_times, 'spike_freq':spike_freq, 'gsyn_in':gsyn_in, 'fmeanA':fmeanA, 'fmaxA':fmaxA, 'fmstdA':fmstdA, 'fcvmA':fcvmA, 'fstdmA':fstdmA, 'fbstdA':fbstdA,\n 't_all_vec_vec':t_all_vec_vec, 'id_all_vec_vec':id_all_vec_vec, 'gsyns':gsyns, 'w_mat':w_mat, 'winh_mat':winh_mat, 'fmax':fmax, 'fmstd':fmstd, 'fcvm':fcvm, 'fbaseA':fbaseA, 'fbase':fbase}\n \n \n def clean(self):\n \n self.pc.runworker() \n self.pc.done() \n \n \n def compute_Transfer(self, stimulus, spike_freq, freq_times, t, noise_data_points, gsyn, gsyn_in, do_csd, t_qual, K_mat_old, t_startstop, inh_factor=[1]):\n\n stimulus0 = np.zeros(len(stimulus[0]))\n \n for a in self.a_celltype:\n # sum input to produce linear input that should be reconstructed!\n \n if (any(self.syn_inh_dist) > 0) and (any(self.syn_ex_dist) > 0):\n if max(self.syn_inh_dist) == max(self.syn_ex_dist): # same signal through ex and inh\n print \"inh_factor = [0,1]\"\n inh_factor = [0,1] \n \n for ni in self.syn_ex_dist[a]:\n if ni != 0:\n stimulus0 += inh_factor[ni-1] * stimulus[ni-1]\n print \"+ex:\", ni-1\n\n for ni in self.syn_inh_dist[a]:\n if ni != 0:\n stimulus0 -= inh_factor[ni-1] * stimulus[ni-1] #old: +nemax\n print \"-inh:\", ni-1 #old: +nemax\n \n if (max(self.n_syn_ex) == 0) and (max(self.n_syn_inh) == 0): \n stimulus0 += stimulus[0] \n print \"current\"\n \n #if self.n_syn_ex[self.celltype_syn[0]] == 0:\n # stimulus0 += stimulus[0] \n \n # amplitude should not matter since filter amplitude is simply adjusted \n #stimulus = stimulus0 #/len(self.syn_ex_dist)\n\n stimulus0 = stimulus0 / std(stimulus0) / 2\n \n # linear interpolation inside compute_Transfer !!!\n print \"max(stimulus0):\",max(stimulus0)\n results = compute_Transfer(spike_freq = spike_freq, freq_times = freq_times, \n stimulus = stimulus0, t = t, noise_data_points = noise_data_points, gsyn = gsyn, gsyn_in = gsyn_in, do_csd = do_csd, t_kernel = 1*s,\n method_interpol = self.method_interpol, nc_delay = self.nc_delay, w_length = 3, t_qual = t_qual, K_mat_old = K_mat_old, t_startstop = t_startstop, give_psd = self.give_psd) # freq_wp not defined, use all frequencies\n \n # TEST:\n #VAF = results.get('VAFf_mat')\n #freq_used = results.get('freq_used')\n \n #iend = mlab.find(freq_used >= self.xmax)[0] \n #err = 1-mean(VAF[1][0,1:iend-1])\n #print \"err: \", err \n \n return results\n \n \n def residuals_compute_Transfer(self, p, stimulus, spike_freq, freq_times, t, noise_data_points, gsyn, gsyn_in, do_csd, t_qual, K_mat_old, t_startstop, inh_factor):\n \n inh_factor_in = inh_factor[:]\n ip = 0\n for i, inhf in enumerate(inh_factor_in):\n if inhf < 0:\n inh_factor_in[i] = p[ip]\n ip += 1\n \n results = self.compute_Transfer(stimulus = stimulus, spike_freq = spike_freq, freq_times = freq_times, \n t = t, noise_data_points = noise_data_points, gsyn = gsyn, gsyn_in = gsyn_in, \n do_csd = do_csd, t_qual = t_qual, K_mat_old = K_mat_old, t_startstop = t_startstop, inh_factor = inh_factor_in) \n \n VAF = results.get('VAFf_mat')\n freq_used = results.get('freq_used')\n \n iend = mlab.find(freq_used >= self.xmax)[0] \n err = 1-mean(VAF[1][0,0:iend])\n print \"inh_factor:\", inh_factor_in, \"err: \", err \n \n return err \n \n #@profile \n def fun_cnoise_Stim(self, t_stim = 10*s, sexp = 0, cutf = 0, do_csd = 1, t_qual = 0, freq_used = np.array([]), K_mat_old = np.array([]), inh_factor = [1], onf = None, equi = 0):\n \"\"\"\n Stimulate cell with colored noise\n sexp = spectral exponent: Power ~ 1/freq^sexp\n cutf = frequency cutoff: Power flat (white) for freq <~ cutf \n do_csd = 1: use cross spectral density function for computation\n \"\"\"\n self.barrier() # wait for other nodes\n \n filename = str(self.pickle_prefix) + \"_results_pop_cnoise.p\"\n filepath = self.data_dir + \"/\" + filename\n \n if self.id == 0: print \"- filepath:\", filepath \n \n if self.do_run or (os.path.isfile(filepath) is False):\n\n tstart = 0; \n fs = 1 / self.dt # sampling rate \n fmax = fs / 2 # maximum frequency (nyquist)\n \n t_noise = arange(tstart, t_stim, self.dt) # create stimulus time vector, make sure stimulus is even!!!\n\n #print self.syn_ex_dist\n #print self.syn_inh_dist\n #exit()\n \n if (self.syn_ex_dist == []):\n for nt in range(self.n_celltypes): # loop over all cells\n #print \"nt\", nt\n if hasattr(self.cells[nt][0], 'input_vec'):\n self.syn_ex_dist.append([1] * len(self.cells[nt][0].input_vec)) # default ex for all by default!!!\n else: \n self.syn_ex_dist.append([1] * self.n_syn_ex[nt]) # default ex for all by default!!!\n \n #print self.syn_ex_dist\n \n if (self.syn_ex_dist[0] == []):\n nemax = 1\n else:\n nemax = max([item for sublist in self.syn_ex_dist for item in sublist])\n \n if (self.syn_inh_dist == []): # and (any(self.n_syn_inh) > 0)\n for nt in range(self.n_celltypes): # loop over all cells\n self.syn_inh_dist.append([0] * self.n_syn_inh[nt]) # default no inh for all by default!!!\n \n #print self.syn_inh_dist\n #exit()\n \n if (self.syn_inh_dist[0] == []):\n nimax = 0\n else:\n nimax = max([item for sublist in self.syn_inh_dist for item in sublist]) \n \n #print \"self.syn_inh_dist, self.syn_ex_dist\", self.syn_inh_dist, self.syn_ex_dist\n \n n_noise = max([nemax,nimax]) # number of noise sources\n #print n_noise,nemax,nimax\n # create reproduceable input\n noise_data = []\n\n for nj in range(n_noise):\n \n if self.id == 0: # make sure all have the same signal !!!\n if len(freq_used) == 0: \n noise_data0 = create_colnoise(t_noise, sexp, cutf, self.seed+nj, onf = onf)\n else:\n noise_data0, _, _, _ = create_multisines(t_noise, freq_used) # create multi sine signal\n else:\n noise_data0 = np.empty(len(t_noise), dtype=np.float64)\n\n noise_data0 = self.broadcast(noise_data0, fast = True) \n \n noise_data.append(noise_data0)\n noise_data0 = [] \n \n noise_data_points = len(noise_data[0]) \n\n # Create signal weight vector inh_factor if it is not fully given\n if len(noise_data) > len(inh_factor):\n inh_factor = [inh_factor[0]] * len(noise_data) \n print \"inh_factor:\", inh_factor\n\n #if equi:\n #pass\n # tstop = t_stim\n \n if max(self.n_syn_ex) == 0: # this means current input\n \n self.set_IStim() # sets amp\n \n if self.fluct_s != []:\n if self.fluct_s[self.a_celltype[0]] > 0:\n if self.id == 0: print \"- adding i fluct\"\n self.connect_fluct()\n \n for i, m in enumerate(self.method_interpol):\n if \"syn\" in m: self.method_interpol[i] = \"syn \" + str(self.syn_tau1/ms) + \"/\" + str(self.syn_tau2/ms) + \"ms\"\n if \"bin\" in m: self.method_interpol[i] = \"bin \" + str(self.bin_width/ms) + \"ms\"\n \n stimulus = []\n for nj in range(len(noise_data)):\n stimulus0, t, t_startstop = construct_Stimulus(noise_data[nj], fs, self.amp[self.a_celltype[0]], ihold = 0, delay_baseline = self.delay_baseline) # , tail_points = 0\n stimulus.append(stimulus0)\n tstop = t[-1]\n \n self.set_IPlay2(stimulus, t)\n if self.id == 0: print \"- starting colored noise transfer function estimation! with amp = \" + str(np.round(self.amp[self.a_celltype[0]],4)) + \", ihold = \" + str(np.round(self.ihold[self.a_celltype[0]],4)) + \", ihold_sigma = \" + str(np.round(self.ihold_sigma,4)) + \", dt = \" + str(self.dt) + \" => maximum frequency = \" + str(fmax) + \"\\r\" \n \n else:\n\n self.give_freq = False\n ihold = self.set_i(self.ihold) # just sets amp, ihold should not change! \n\n if 'gsyn_in' not in self.method_interpol: \n pass\n else:\n self.g_syn_ex = [1]*len(self.N)\n \n \n if ((self.fluct_g_e0 != []) or (self.fluct_g_i0 != [])):\n if ((self.fluct_g_e0[self.a_celltype[0]] > 0) or (self.fluct_g_i0[self.a_celltype[0]] > 0)):\n if self.id == 0: print \"- adding g fluct\"\n self.connect_gfluct(E_i=-65)\n \n stimulus = []\n for nj in range(len(noise_data)):\n stimulus0, t, t_startstop = construct_Stimulus(noise_data[nj], fs, amp=1, ihold = 0, tail_points = 0, delay_baseline = self.delay_baseline) # self.amp\n stimulus.append(stimulus0)\n \n noise_data = [] \n tstop = t[-1]\n \n if self.N[self.a_celltype[0]] > 1:\n self.set_IStim(ihold = [0]*self.n_celltypes, ihold_sigma = [0]*self.n_celltypes, random_start = True, tstart_offset = 1)\n if self.id == 0: print \"- add random start\"\n \n #print \"Enter Synplay()\"\n self.set_SynPlay(stimulus, t, t_startstop = t_startstop) \n #print \"Exit Synplay()\"\n\n if self.id == 0: print \"- starting colored noise transfer function estimation with synaptic input! with amp = \" + str(np.round(self.amp,4)) + \", ihold = \" + str(np.round(self.ihold,4)) + \", ihold_sigma = \" + str(np.round(self.ihold_sigma,4)) + \", dt = \" + str(self.dt) + \" => maximum frequency = \" + str(fmax) + \"\\r\" \n \n amp_vec = []\n mag_vec = [] \n pha_vec = []\n freq_used = []\n ca = []\n SNR_mat = []\n VAFf_mat = []\n Qual_mat = []\n CF_mat = [] \n VAF_mat = []\n stim = []\n stim_re_mat = []\n resp_mat = []\n current_re = []\n ihold1 = []\n tk = []\n K_mat = []\n gsyn_in = []\n fmean = []\n fmax = [] \n fmstd = [] \n fcvm = [] \n fmeanA = []\n fmaxA = [] \n fmstdA = [] \n fcvmA = [] \n t_all_vec_input_sorted = []\n id_all_vec_input_sorted = []\n \n if (self.id == 0) and (max(self.n_syn_ex) > 0):\n print range(self.n_celltypes), np.shape(self.t_all_vec_input)\n for l in range(self.n_celltypes): \n ie = argsort(self.t_all_vec_input[l]) \n t_all_vec_input_sorted.append( self.t_all_vec_input[l][ie] )\n id_all_vec_input_sorted.append( self.id_all_vec_input[l][ie].astype(int) )\n \n #if (self.id == 0): \n # print self.g_syn_ex\n # print np.array(self.g_syn_ex)>= 0\n \n #print \"g_syn_ex:\",self.g_syn_ex\n if np.array(np.array(self.g_syn_ex)>= 0).any():\n \n if hasattr(self.cells[self.a_celltype[0]][0], 'get_states') and equi:\n print \"- Equilibrate!\"\n self.run(tstop, do_loadstate = False)\n m = md5.new()\n cell_exe_new = self.cell_exe[0]\n m.update(cell_exe_new)\n filename = './states_' + self.celltype[0] + '_' + m.hexdigest() + '_Population.b'\n self.cells[self.a_celltype[0]][0].get_states(filename)\n else:\n self.run(tstop, do_loadstate = False)\n \n i_startstop = []\n \n results = self.get(t_startstop, i_startstop) \n time = results.get('time')\n current = results.get('current') \n voltage = results.get('voltage') \n fmean = results.get('fmean') \n gsyn = results.get('gsyn') \n freq_times = results.get('freq_times')\n spike_freq = results.get('spike_freq')\n t_all_vec_vec = results.get('t_all_vec_vec')\n id_all_vec_vec = results.get('id_all_vec_vec')\n gsyns = results.get('gsyns')\n gsyn_in = results.get('gsyn_in')\n \n fmax = results.get('fmax')\n fmstd = results.get('fmstd')\n fcvm = results.get('fcvm')\n \n fmeanA = results.get('fmeanA') \n fmaxA = results.get('fmaxA')\n fmstdA = results.get('fmstdA')\n fcvmA = results.get('fcvmA')\n \n fbaseA = results.get('fbaseA') \n fbase = results.get('fbase')\n fbstdA = results.get('fbstdA')\n \n \n else: # do not run, analyse input!!!\n \n time = t\n voltage = []\n for l in range(self.n_celltypes): \n voltage.append(np.zeros(len(t)))\n current = []\n \n freq_times = []\n spike_freq = []\n gsyn = []\n gsyn_in = []\n \n t_all_vec_vec = []\n id_all_vec_vec = []\n \n fmean = []\n fmax = []\n fmstd = []\n fcvm = []\n fstdm = []\n \n fmeanA = []\n fmaxA = []\n fmstdA = []\n fcvmA = []\n fbaseA = []\n fbase = []\n fbstdA = []\n \n if self.id == 0:\n \n current = self.n_train_ex\n \n #t_all_vec = self.t_all_vec_input\n #id_all_vec = self.id_all_vec_input\n\n #ie = argsort(t_all_vec) \n #t_all_vec_vec.append( t_all_vec[ie] )\n #id_all_vec_vec.append( id_all_vec[ie].astype(int) )\n \n t_all_vec_vec = t_all_vec_input_sorted\n id_all_vec_vec = id_all_vec_input_sorted\n \n freq_times = arange(0, tstop, self.bin_width)\n spike_freq = np.zeros(len(freq_times))\n \n for j in self.a_celltype:\n \n [num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[j], bins = freq_times)\n\n if self.tau2_ex[0] > 0:\n spike_freq = np.concatenate((zeros(1),num_spikes)) \n print \"NOSYN TEST: start convolution with Ksyn\"\n Ksyn = syn_kernel(arange(0,10*self.tau2_ex[0],self.bin_width), self.tau1_ex[0], self.tau2_ex[0]) \n Ksyn = np.concatenate((zeros(len(Ksyn)-1),Ksyn))\n spike_freq = np.convolve(Ksyn, spike_freq, mode='same')\n print \"NOSYN TEST: convolution finished\"\n else:\n\n if isinstance(self.factor_celltype[j], ( int, long ) ):\n f = self.factor_celltype[j] \n else:\n f = self.factor_celltype[j][0] \n \n spike_freq = spike_freq + f * np.concatenate((zeros(1),num_spikes)) / self.bin_width\n\n fmean.append(self.fmean_input)\n fmax.append(self.fmax_input) \n fmstd.append(self.fmstd_input) \n fcvm.append(self.fcvm_input) \n fstdm.append(self.fstdm_input)\n\n if self.no_fmean == True:\n fmean.append(ihold)\n \n #plt.figure('spike_freq') \n #plt.plot(freq_times, spike_freq)\n #plt.savefig(\"./figs/Pub/Spike_freq_\" + str(self.pickle_prefix) + \".pdf\", dpi = 300, transparent=True) # save it \n #plt.clf()\n \n fmeanA = fmean[0]\n fmaxA = fmax[0]\n fmstdA = fmstd [0] \n fcvmA = fcvm[0]\n fstdmA = fstdm[0]\n \n \n if self.id == 0: \n \n if any([i<0 for i in inh_factor]):\n \n p0 = []\n inhf_idx = []\n for i, inhf in enumerate(inh_factor):\n if inhf < 0: \n p0.append(0) \n inhf_idx.append(i)\n \n plsq = fmin(self.residuals_compute_Transfer, p0, args=(stimulus, spike_freq, freq_times, t, noise_data_points, gsyn, gsyn_in, do_csd, t_qual, K_mat_old, t_startstop, inh_factor))\n p = plsq\n \n ip = 0\n for i in inhf_idx:\n inh_factor[i] = p[ip]\n ip += 1\n \n\n print \"Final inh_factor: \", inh_factor\n \n \n results = self.compute_Transfer(stimulus, spike_freq = spike_freq, freq_times = freq_times, \n t = t, noise_data_points = noise_data_points, gsyn = gsyn, gsyn_in = gsyn_in, \n do_csd = do_csd, t_qual = t_qual, K_mat_old = K_mat_old, t_startstop = t_startstop, inh_factor=inh_factor)\n \n mag_vec, pha_vec, ca, freq, freq_used, fmean_all = results.get('mag_mat'), results.get('pha_mat'), results.get('ca_mat'), results.get('freq'), results.get('freq_used'), results.get('fmean') \n SNR_mat, VAFf_mat, Qual_mat, CF_mat, VAF_mat = results.get('SNR_mat'), results.get('VAFf_mat'), results.get('Qual_mat'), results.get('CF_mat'), results.get('VAF_mat') \n stim, resp_mat, stim_re_mat, tk, K_mat = results.get('stim'), results.get('resp_mat'), results.get('stim_re_mat'), results.get('tk'), results.get('K_mat') \n \n \n self.barrier() # wait for other nodes\n \n \n if self.id == 0:\n \n if t_qual > 0:\n #print t_startstop[0], t_startstop[0]/self.dt, (t_startstop[0]+t_qual)/self.dt\n current_re = current[int(t_startstop[0]/self.dt):int((t_startstop[0]+t_qual)/self.dt)]\n current_re = current_re[int(len(K_mat[self.a_celltype[0]])):int(len(current_re))-int(len(K_mat[self.a_celltype[0]]))]\n \n if len(self.i_holdrs) > 0:\n ihold1 = self.i_holdrs[self.a_celltype[0]][0]\n else:\n ihold1 = []\n \n for l in range(len(self.method_interpol)): # unwrap \n pha_vec[l,:] = unwrap(pha_vec[l,:] * (pi / 180)) * (180 / pi) # unwrap for smooth phase\n \n # only return fraction of actual signal, it is too long!!! \n if time[-1] > self.tmax: \n imax = -1*int(self.tmax/self.dt)\n time = time[imax:]; current = current[imax:]; gsyn = gsyn[imax:]; gsyn_in = gsyn_in[imax:]\n for n in range(self.n_celltypes): \n voltage[n] = voltage[n][imax:]\n \n if freq_times != []: \n if freq_times[-1] > self.tmax:\n imax2 = where(freq_times > self.tmax)[0][0] # for spike frequency \n freq_times = freq_times[0:imax2]; spike_freq = spike_freq[0:imax2] \n \n bvec = [\"_syn\" in st for st in self.method_interpol]\n if np.any(bvec):\n # normalize synaptic integration with others \n mag_vec[1,:]= mag_vec[0,0]*mag_vec[1,:]/mag_vec[1,0] \n \n if self.id == 0: print \"start pickle\"\n \n results = {'freq_used':freq_used, 'amp':amp_vec,'mag':mag_vec,'pha':pha_vec,'ca':ca,'voltage':voltage,'tk':tk,'K_mat':K_mat, 'ihold1': ihold1, 't_startstop':t_startstop, #'stimulus':stimulus,\n 'current':current,'t1':time,'freq_times':freq_times,'spike_freq':spike_freq, 'stim':stim, 'stim_re_mat':stim_re_mat, 'resp_mat':resp_mat, 'current_re':current_re, 'gsyn_in':gsyn_in, 'fmeanA':fmeanA, 'fmaxA':fmaxA, 'fmstdA':fmstdA, 'fcvmA':fcvmA, 'fbaseA':fbaseA, 'fbase':fbase, 'fbstdA':fbstdA,\n 'fmean':fmean,'method_interpol':self.method_interpol, 'SNR':SNR_mat, 'VAF':VAFf_mat, 'Qual':Qual_mat, 'CF':CF_mat, 'VAFs':VAF_mat, 'fmax':fmax, 'fmstd':fmstd, 'fcvm':fcvm, 'inh_factor':inh_factor, 't_all_vec_vec':t_all_vec_vec, 'id_all_vec_vec':id_all_vec_vec} \n \n if self.id == 0:\n if self.dumpsave == 1:\n pickle.dump( results, gzip.GzipFile( filepath, \"wb\" ) )\n print \"pickle done\" \n \n \n if self.plot_train:\n \n for a in self.a_celltype:\n\n #i_start = mlab.find(t_all_vec_vec[a] >= 0)[0]\n #i_stop = mlab.find(t_all_vec_vec[a] >= 5)[0]\n \n #t_all_cut = t_all_vec_vec[a][i_start:i_stop]\n #id_all_cut = id_all_vec_vec[a][i_start:i_stop]\n \n t_all_cut = t_all_vec_vec[a]\n id_all_cut = id_all_vec_vec[a]\n \n f_start_in = mlab.find(t_all_cut >= 0) \n f_stop_in = mlab.find(t_all_cut <= 10) \n \n f_start = f_start_in[0] \n f_stop = f_stop_in[-1]+1 \n use_spikes = t_all_cut[f_start:f_stop]\n use_id = id_all_cut[f_start:f_stop]\n \n plt.figure('results_train') \n ax99 = plt.subplot(1,1,1)\n ax99.plot(use_spikes,use_id,'|', ms=2)\n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Train_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n \n plt.clf()\n \n if len(t_all_cut) > 0:\n \n tbin = 100*ms\n tb = np.arange(0,t[-1],tbin)\n [all_rate, _] = neuronpy.util.spiketrain.get_histogram(t_all_cut, bins = tb)\n all_rate = np.concatenate((np.zeros(1),all_rate)) / self.N[a] / tbin\n \n plt.figure('results_train2') \n plt.plot(tb,all_rate)\n plt.savefig(\"./figs/Pub/PSTH_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n plt.figure('results_noise') \n plt.plot(time,current)\n plt.savefig(\"./figs/Pub/Noise_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n \n if self.plot_input:\n \n if len(t_all_vec_input_sorted[0]) > 0:\n \n i_start = mlab.find(t_all_vec_input_sorted[0] >= 0)[0]\n i_stop = mlab.find(t_all_vec_input_sorted[0] >= 5)[0]\n \n t_all_cut = t_all_vec_input_sorted[0][i_start:i_stop]\n id_all_cut = id_all_vec_input_sorted[0][i_start:i_stop]\n \n plt.figure('results_input') \n ax99 = plt.subplot(1,1,1)\n ax99.plot(t_all_cut,id_all_cut,'|', ms=2)\n plt.text(0.5, 1.1, r'fmean=' + str(round(self.fmean_input,1)) + ',fmax=' + str(round(self.fmax_input,1)) + ',fmstd=' + str(round(self.fmstd_input,1)) + ',fcvm=' + str(round(self.fcvm_input,1)) + ',fstdm=' + str(round(self.fstdm_input,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Input_\" + str(self.pickle_prefix) + \"_N\" + str(self.N[self.a_celltype[0]]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n\n else:\n \n if self.id == 0:\n results = pickle.load( gzip.GzipFile( filepath, \"rb\" ) )\n \n #print results\n #print {key:np.shape(value) for key,value in results.iteritems()}\n \n if self.minimal_dir: # save only info needed for plot\n \n print {key:np.shape(value) for key,value in results.iteritems()}\n \n if \"Fig6_pop_transfer_grc_syngr_nsyn4_cn_a1_noisesynlow_inhlow_adjfinh_varih_N100_CFo6.0_results_pop_cnoise.p\" in filename:\n results['ca'] = [] \n results['resp_mat'] = []\n results['stim'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n results['stim_re_mat'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = [] \n results['gsyn_in'] = []\n \n elif (\"Fig8_pop_transfer_none_synno_cn_cutf30_a1_noisesynlow_ih20_varih_N100_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_none_synno_cn_cutf30_a10_noisesynlow_ih20_varih_N100_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a10_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_is0.14_CFo9.0_results_pop_cnoise.p\" in filename) \\\n :\n\n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n \n elif (\"Fig8_pop_transfer_none_synno_cn_cutf30_a1_noisesynlow_ih20_varih_N50_twopop_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_none_synno_cn_cutf30_a10_noisesynlow_ih20_varih_N50_twopop_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_twopop_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a10_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_is0.14_twopop_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf5_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_CFo14.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf5_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_twopop_CFo14.0_results_pop_cnoise.p\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n \n elif (\"Fig4_pop_transfer_grc_cn_addn100_N[100]_CF[40]_amod[1]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4_pop_transfer_grc_cn_addn1_N[100]_CF[40]_amod[1]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_cn_twopop_N[50, 50]_CF[0.0055, 0.0055]_amod[None, None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_cn_N[100]_CF[0.0055]_amod[None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_slownoise_cn_twopop_N[50, 50]_CF[0.0051, 0.0051]_amod[None, None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_slownoise_cn_N[100]_CF[0.0051]_amod[None]_results_pop_cnoise.p\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n \n elif (\"Fig2_pop_transfer_\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['t1'] = []\n results['voltage'] = [] \n results['freq_times'] = []\n results['spike_freq'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['gsyn_in'] = []\n \n else:\n results['ca'] = [] \n results['resp_mat'] = []\n results['stim'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['t1'] = []\n results['voltage'] = [] \n results['freq_times'] = []\n results['spike_freq'] = []\n results['stim_re_mat'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['gsyn_in'] = []\n\n print {key:np.shape(value) for key,value in results.iteritems()}\n\n pickle.dump( results, gzip.GzipFile( self.minimal_dir + \"/\" + filename, \"wb\" ) ) \n \n else:\n results = {'freq_used':[], 'amp':[],'mag':[],'pha':[],'ca':[],'voltage':[], 'tk':[],'K_mat':[], 'ihold1':[], 't_startstop':[], #'stimulus':[],\n 'current':[],'t1':[],'freq_times':[],'spike_freq':[], 'stim':[], 'stim_re_mat':[], 'current_re':[], 'gsyn_in':[], 'fmeanA':[], 'fmaxA':[], 'fmstdA':[], 'fcvmA':[], 'fbaseA':[], 'fbase':[], 'fbstdA':[],\n 'fmean':[],'method_interpol':self.method_interpol, 'SNR':[], 'VAF':[], 'Qual':[], 'CF':[], 'VAFs':[], 'fmax':[], 'fmstd':[], 'fcvm':[], 'inh_factor':[], 't_all_vec_vec':[], 'id_all_vec_vec':[]} \n \n if self.id == 0: \n\n if self.plot_train: \n\n for a in self.a_celltype:\n \n t1 = results.get('t1') \n voltage = results.get('voltage') \n fmean = results.get('fmean') \n fmax = results.get('fmax') \n fmstd = results.get('fmstd') \n \n \n if results.has_key('t_all_vec_vec'):\n \n if len(results['t_all_vec_vec']) > 0: \n t_all_vec_vec = results.get('t_all_vec_vec') \n id_all_vec_vec = results.get('id_all_vec_vec') \n \n t_all_cut = t_all_vec_vec[a]\n id_all_cut = id_all_vec_vec[a]\n \n f_start_in = mlab.find(t_all_cut >= 0) \n f_stop_in = mlab.find(t_all_cut <= 10) \n \n f_start = f_start_in[0] \n f_stop = f_stop_in[-1]+1 \n use_spikes = t_all_cut[f_start:f_stop]\n use_id = id_all_cut[f_start:f_stop]\n \n plt.figure('results_train') \n ax97 = plt.subplot(1,1,1)\n ax97.plot(use_spikes,use_id,'|', ms=6)\n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax97.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Train_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n\n \n plt.figure('results_voltage') \n ax99 = plt.subplot(2,1,1)\n ax99.plot(t1,voltage[a])\n \n t_noise = arange(0, t_stim, self.dt)\n noise_data = create_colnoise(t_noise, sexp, cutf, 50, onf = onf)\n stimulus, t, t_startstop = construct_Stimulus(noise_data, 1/self.dt, amp=1, ihold = 0, tail_points = 0, delay_baseline = self.delay_baseline) \n ax98 = plt.subplot(2,1,2)\n ax98.plot(t[0:10/self.dt],stimulus[0:10/self.dt],color='k')\n \n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Voltage_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.show()\n plt.clf()\n \n if (self.id == 0) and (do_csd == 1):\n Qual = results.get('Qual') \n for i, ii in enumerate(self.method_interpol):\n print \"\\n[QUAL:] Interpol:\", ii, \"SNR0:\", Qual[i,0,0], \"SNR_cutff:\", Qual[i,0,1], \"SNR_mean:\", Qual[i,0,2], \"\\n VAF0:\", Qual[i,1,0], \"VAF_cutff:\", Qual[i,1,1], \"VAF_mean:\", Qual[i,1,2], \"\\n CF(subtracted):\", Qual[i,2,0], \"VAF(subtracted):\", Qual[i,2,1] \n \n VAF = results.get('VAF')\n freq_used = results.get('freq_used') \n iend = mlab.find(freq_used >= self.xmax)[0] \n print 'm(VAF)=' + str(np.mean(VAF[1][0,0:iend])) \n \n self.barrier() # wait for other nodes\n \n return results\n\n\n# def fun_ssine_Stim(self, freq_used = np.array([1, 10, 100, 1000])*Hz):\n# \"\"\"\n# Compute impedance and/or transfer function using Single sine stimulation\n# Only compute transfer function if there is a steady state (resting) firing rate!\n# \"\"\"\n# self.barrier() # wait for other nodes\n# \n# filepath = \"./data/\" + str(self.pickle_prefix) + \"_results_pop_ssine.p\"\n# \n# if self.do_run or (os.path.isfile(filepath) is False):\n# \n# fs = 1 / self.dt # sampling rate \n# fmax = fs / 2 # maximum frequency (nyquist)\n# \n# if self.id == 0: print \"- starting single sine transfer function estimation! with amp = \" + str(np.round(self.amp[a_celltype[0]],4)) + \", ihold = \" + str(np.round(self.ihold[self.a_celltype[0]],4)) + \", dt = \" + str(self.dt) + \" => maximum frequency = \" + str(fmax) + \"\\r\" \n# \n# if max(self.n_syn_ex) == 0:\n# self.set_IStim() \n# \n# if self.fluct_s != []:\n# if self.fluct_s[self.a_celltype[0]] > 0:\n# if self.id == 0: print \"- adding i fluct\"\n# self.connect_fluct()\n# \n# for i, m in enumerate(self.method_interpol):\n# if \"syn\" in m: self.method_interpol[i] = \"syn \" + str(self.syn_tau1/ms) + \"/\" + str(self.syn_tau2/ms) + \"ms\"\n# if \"bin\" in m: self.method_interpol[i] = \"bin \" + str(self.bin_width/ms) + \"ms\"\n# \n# else:\n# self.give_freq = False\n# ihold = self.set_i(self.ihold) # just sets amp, ihold should not change! \n# \n# if ((self.fluct_g_e0 != []) or (self.fluct_g_i0 != [])):\n# if ((self.fluct_g_e0[self.a_celltype[0]] > 0) or (self.fluct_g_i0[self.a_celltype[0]] > 0)):\n# if self.id == 0: print \"- adding g fluct\"\n# self.connect_gfluct(E_i=-65)\n# \n# #if ((self.fluct_std_e[self.a_celltype[0]] != []) or (self.fluct_std_i[self.a_celltype[0]] != [])):\n# # if ((self.fluct_std_e[self.a_celltype[0]] > 0) or (self.fluct_std_i[self.a_celltype[0]] > 0)):\n# # if self.id == 0: print \"- adding g fluct\"\n# # self.connect_gfluct(E_i=-65)\n# \n# if 'gsyn_in' not in self.method_interpol: \n# pass\n# else:\n# self.g_syn_ex = 1\n# \n# \n# for i, fu in enumerate(freq_used):\n# \n# if self.id == 0: print \"- single sine processing frequency = \" + str(fu)\n# \n# t, stimulus, i_startstop, t_startstop = create_singlesine(fu = fu, amp = self.amp[a_celltype[0]], ihold = 0, dt = self.dt, periods = 20, minlength = 2*s, t_prestim = 1*s)\n# tstop = t[-1]\n# \n# if i == 0: t_startstop_plot = t_startstop\n# \n# if max(self.n_syn_ex) == 0:\n# self.set_IPlay(stimulus, t)\n# else:\n# self.set_SynPlay(stimulus, t) \n# \n# if self.g_syn_ex >= 0: # should also be true for current input!!!\n# \n# self.run(tstop)\n# \n# if i == 0: # do this here to have something to return\n# \n# # select first sinusoidal to plot, later\n# voltage_plot = []\n# current_plot = []\n# time_plot = []\n# freq_times_plot = []\n# spike_freq_plot = []\n# gsyn_plot = []\n# \n# # construct vectors\n# amp_vec = zeros(len(freq_used)) # amplitude vector\n# fmean_all = zeros(len(freq_used)) # mean firing frequency (all cells combined)\n# fmean = zeros(len(freq_used)) # mean firing frequency (one cell)\n# ca = zeros(len(freq_used), dtype=complex)\n# \n# # create matrix to hold all different interpolation methods:\n# mag_vec = zeros((len(self.method_interpol),len(freq_used))) # magnitude vector\n# pha_vec = zeros((len(self.method_interpol),len(freq_used))) # phase vector \n# NI_vec = zeros((len(self.method_interpol),len(freq_used))) # NI vector\n# VAF_vec = zeros((len(self.method_interpol),len(freq_used))) # VAF vector\n# \n# results = self.get(t_startstop, i_startstop) # t1 should be equal to t!!!\n# time, voltage, current, fmean0, gsyn = results.get('time'), results.get('voltage'), results.get('current'), results.get('fmean'), results.get('gsyn')\n# freq_times, spike_freq, t_all_vec_vec, id_all_vec_vec, gsyns = results.get('freq_times'), results.get('spike_freq'), results.get('t_all_vec_vec'), results.get('id_all_vec_vec'), results.get('gsyns')\n# \n# else:\n# \n# time = t\n# voltage = []\n# voltage.append(np.zeros(len(t)))\n# current = stimulus\n# \n# freq_times = []\n# spike_freq = []\n# fmean0 = ihold\n# gsyn = []\n# gsyn_in = []\n# \n# t_all_vec_vec = []\n# id_all_vec_vec = []\n# \n# \n# if self.id == 0:\n# \n# t_all_vec = []\n# t_all_vec.append([])\n# t_all_vec[0] = np.concatenate(self.t_all_vec_input)\n# \n# id_all_vec = []\n# id_all_vec.append([])\n# id_all_vec[0] = np.concatenate(self.id_all_vec_input)\n# \n# ie = argsort(t_all_vec[0]) \n# t_all_vec_vec.append( t_all_vec[0][ie] )\n# id_all_vec_vec.append( id_all_vec[0][ie].astype(int) ) # \n# \n# \n# freq_times = arange(0, tstop, self.bin_width)\n# [num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[0], bins = freq_times)\n# spike_freq = np.concatenate((zeros(1),num_spikes)) / self.bin_width\n#\n# \n# if self.id == 0:\n#\n# fmean[i] = fmean0[0]\n#\n# if i == 0: \n# \n# # select first sinusoidal to plot\n# voltage_plot = voltage\n# current_plot = current\n# time_plot = time\n# freq_times_plot = freq_times\n# spike_freq_plot = spike_freq\n# gsyn_plot = gsyn\n# \n# \n# for l in range(len(self.method_interpol)):\n# \n# if \"bin\" in self.method_interpol[l]:\n# \n# # binning and linear interpolation\n# stimulus_signal = stimulus[i_startstop[0]:i_startstop[1]] # cut out relevant signal\n# t_input_signal = t[i_startstop[0]:i_startstop[1]] - t[i_startstop[0]]\n# \n# spike_freq_interp = interp(t, freq_times, spike_freq, left=0, right=0) # interpolate to be eqivalent with input, set zero at beginning and end!\n# freq_out_signal_interp = spike_freq_interp[i_startstop[0]:i_startstop[1]] # cut out relevant signal\n# vamp, mag_vec[l,i], pha_vec[l,i], fmean_all[i], _ = get_magphase(stimulus_signal, t_input_signal, freq_out_signal_interp, t_input_signal, method = \"fft\", f = fu)\n# \n# results = est_quality(t_input_signal, fu, freq_out_signal_interp, self.amp[a_celltype[0]]*mag_vec[l,i], pha_vec[l,i]/ (180 / pi), fmean_all[i]) \n# NI_vec[l,i], VAF_vec[l,i] = results.get('NI'), results.get('VAF')\n# print \"-[bin] NI: \" + str(NI_vec[l,i]) + \", VAF: \" + str(VAF_vec[l,i])\n# \n# if \"syn\" in self.method_interpol[l]:\n# \n# # synaptic integration \n# dt_out = t_input_signal[2] - t_input_signal[1]\n# shift = self.nc_delay/dt_out # shift response by the nc delay to remove offset\n# freq_out_signal_syn = gsyn[i_startstop[0]+shift:i_startstop[1]+shift] # cut out relevant signal\n# \n# vamp, mag_vec[l,i], pha_vec[l,i], fm, _ = get_magphase(stimulus_signal, t_input_signal, freq_out_signal_syn, t_input_signal, method = \"fft\", f = fu)\n# \n# results = est_quality(t_input_signal, fu, freq_out_signal_syn, self.amp[a_celltype[0]]*mag_vec[l,i], pha_vec[l,i]/ (180 / pi), fm) \n# NI_vec[l,i], VAF_vec[l,i] = results.get('NI'), results.get('VAF')\n# print \"-[syn] NI: \" + str(NI_vec[l,i]) + \", VAF: \" + str(VAF_vec[l,i])\n# \n# \n# self.barrier() # wait for other nodes\n# \n# #print \"rest: \" + str(vrest) + \" freq_used:\" + str(freq_used) + \" amp_vec:\" + str(amp_vec) + \" mag_vec:\" + str(mag_vec) + \" pha_vec:\" + str(pha_vec)\n# \n# if self.id == 0:\n# \n# for l in range(len(self.method_interpol)): # unwrap \n# pha_vec[l,:] = unwrap(pha_vec[l,:] * (pi / 180)) * (180 / pi) # unwrap for smooth phase\n# \n# # only return fraction of actual signal, it is too long!!! \n# if time_plot[-1] > self.tmax: \n# imax = where(time_plot > self.tmax)[0][0] # for voltage, current and time\n# time_plot = time_plot[0:imax]; current_plot = current_plot[0:imax]; gsyn_plot = gsyn_plot[0:imax]\n# for n in range(self.n_celltypes): \n# voltage_plot[n] = voltage_plot[n][0:imax]\n# \n# if freq_times_plot != []: \n# if freq_times_plot[-1] > self.tmax:\n# imax2 = where(freq_times_plot > self.tmax)[0][0] # for spike frequency \n# freq_times_plot = freq_times_plot[0:imax2]; spike_freq_plot = spike_freq_plot[0:imax2] \n# \n# # normalize synaptic integration with with first magnitude, may by syn itself! \n# bvec = [\"syn\" in st for st in self.method_interpol]\n# if np.any(bvec):\n# k = where(bvec) \n# mag_vec[k,:]= mag_vec[0,0]*mag_vec[k,:]/mag_vec[k,0]\n# \n# NI_vec = (freq_used, NI_vec)\n# VAF_vec = (freq_used, VAF_vec)\n# results = {'freq_used':freq_used, 'amp':amp_vec,'mag':mag_vec,'pha':pha_vec,'ca':ca,'voltage':voltage_plot, 't_startstop':t_startstop_plot,\n# 'current':current_plot,'t1':time_plot,'freq_times':freq_times_plot,'spike_freq':spike_freq_plot,\n# 'fmean':mean(fmean),'method_interpol':self.method_interpol, 'NI':NI_vec, 'VAF':VAF_vec}\n# \n# if self.id == 0:\n# pickle.dump( results, gzip.GzipFile( filepath, \"wb\" ) )\n# \n# else:\n# \n# if self.id == 0:\n# results = pickle.load( gzip.GzipFile( filepath, \"rb\" ) )\n# else:\n# results = {'freq_used':[], 'amp':[],'mag':[],'pha':[],'ca':[],'voltage':[], 't_startstop':[],\n# 'current':[],'t1':[],'freq_times':[],'spike_freq':[],\n# 'fmean':[],'method_interpol':self.method_interpol,'NI':[],'VAF':[]} \n# \n# return results\n \n def get_RC(self, opt_plot):\n \n if self.id == 0:\n if \"analytical\" in opt_plot: # simplest case, only uses rm and tau, scaling necessary \n exec self.cell_exe[self.a_celltype[0]]\n sim = Stimulation(cell, temperature = self.temperature)\n rm, cm, taum = sim.get_RCtau()\n else:\n rm = cm = taum = 0\n \n if \"if\" in opt_plot:\n Vrest = cell.soma(0.5).pas.e*mV\n Vth = cell.spkout.thresh*mV \n Vreset = cell.spkout.vrefrac*mV\n else:\n Vreset = 0*mV; Vth = 1*mV; Vrest = 0*mV\n \n sim = None\n cell = None \n else:\n rm = cm = taum = 0\n Vreset = 0*mV; Vth = 1*mV; Vrest = 0*mV\n \n return rm, cm, taum, Vreset, Vth, Vrest\n\n\n def fun_plot(self, currlabel=\"control\", dowhat=\"cnoise\", freq_used=np.array([]), cutf=10, sexp=0, t_stim=100*s, ymax=0, ax=None, SNR=None, VAF=None, t_qual=0, opt_plot=np.array([]), method_interpol_plot=[], do_csd = 1):\n\n SNR_switch = SNR\n VAF_switch = VAF\n \n rm, cm, taum, Vreset, Vth, Vrest = self.get_RC(opt_plot)\n \n if dowhat == \"cnoise\":\n \n if do_csd == 0:\n t_qual = 0; SNR_switch = 0; VAF_switch = 0\n\n results = self.fun_cnoise_Stim(t_stim = t_stim, cutf = cutf, sexp = sexp, t_qual = t_qual, freq_used = freq_used, do_csd = do_csd)\n \n freq_used, amp_vec, mag, pha, ca, voltage, current, t1 = results.get('freq_used'), results.get('amp'), results.get('mag'), results.get('pha'), results.get('ca'), results.get('voltage'), results.get('current'), results.get('t1') \n freq_times, spike_freq, fmean, method_interpol, SNR, VAF, Qual = results.get('freq_times'), results.get('spike_freq'), results.get('fmean'), results.get('method_interpol'), results.get('SNR'), results.get('VAF'), results.get('Qual') \n stim, stim_re_mat, current_re, tk, K_mat_old = results.get('stim'), results.get('stim_re_mat'), results.get('current_re'), results.get('tk'), results.get('K_mat')\n \n elif dowhat == \"ssine\":\n \n results = self.fun_ssine_Stim(freq_used = freq_used0)\n \n freq_used, amp_vec, mag, pha, ca, voltage, current, t1 = results.get('freq_used'), results.get('amp'), results.get('mag'), results.get('pha'), results.get('ca'), results.get('voltage'), results.get('current'), results.get('t1') \n freq_times, spike_freq, fmean, method_interpol, VAF = results.get('freq_times'), results.get('spike_freq'), results.get('fmean'), results.get('method_interpol'), results.get('VAF') \n tk = []\n K_mat_old = []\n\n # analyse\n if self.id == 0:\n \n print \"Mean rate: \" + str(fmean)\n \n # Turn it off if set to zero\n if SNR_switch == 0: SNR = None\n if VAF_switch == 0: VAF = None \n\n \n if t_qual > 0:\n \n plt.figure(\"Reconstruct\")\n \n ax1 = subplot(2,1,1)\n \n ax1.plot(np.arange(len(stim))*dt-1, current_re*1e3, 'b', linewidth=1) \n ax1.plot(np.arange(len(stim))*dt-1, (stim)*1e3, 'k-', linewidth=1)\n ax1.plot(np.arange(len(stim))*dt-1, (stim_re_mat[0,:])*1e3, 'r', linewidth=1, alpha=1)\n \n #adjust_spines(ax1, ['left','bottom'], d_out = 10) \n #ax1.axis(xmin=0, xmax=1) \n \n #ax1.axis(ymin=8.3, ymax=10.7)\n #ax1.yaxis.set_ticks(array([8.5,9,9.5,10,10.5]))\n #ax1.set_title(\"Reconstruction\") \n \n #ax1.set_xlabel(\"s\") \n #ax1.set_ylabel(\"pA\")\n \n #ax1.text(0.15, 10.7, \"Input current\", color=color3, fontsize = 8)\n #ax1.text(0.8, 10.7, \"Signal\", color=\"#000000\", fontsize = 8)\n #ax1.text(0.0, 8.2, \"Reconstruction\", color=color2, fontsize = 8)\n \n ax2 = subplot(2,1,2)\n ax2.plot(tk, K_mat_old[0], 'k', linewidth=1) \n \n \n self.save_plot(directory = \"./figs/dump/\", prefix = \"reconstruct\")\n \n plt.figure(\"Transfer\")\n \n currtitle = currlabel + \" pop \" + dowhat + \", \" + self.celltype[self.a_celltype[0]] \n \n ax = plot_transfer(currtitle, freq_used, mag, pha, t1, current, voltage[self.a_celltype[0]], freq_times, spike_freq, taum, fmean, self.ihold, rm, Vreset, Vth, Vrest, method_interpol, method_interpol_plot, SNR = SNR, VAF = VAF, ymax = self.ymax, ax = self.ax, linewidth = self.linewidth, color_vec = self.color_vec, alpha = self.alpha, opt_plot = opt_plot) \n \n suptitle(\"Population transfer function of \" + str(self.N[self.a_celltype[0]]) + \" \" + self.celltype[self.a_celltype[0]] + \", amp: \" + str(np.round(self.amp[self.a_celltype[0]],4)) + \", amod: \" + str(self.amod) + \", ih: \" + str(np.round(self.ihold,4)) + \", ih_s: \" + str(np.round(self.ihold_sigma,4)) + \", fm: \" + str(np.round(fmean,2)) + \", fl_s: \" + str(self.fluct_s)) \n \n return VAF, SNR, ax, tk, K_mat_old \n \n\n def save_plot(self, directory = \"./figs/dump/\", prefix = \" \"):\n \n if pop.id == 0:\n \n from datetime import datetime\n idate = datetime.now().strftime('%Y%m%d_%H%M') # %S\n savefig(directory + idate + \"-pop_transfer_\" + prefix + \"_\" + self.celltype[self.a_celltype[0]] + \"_N\" + str(self.N[self.a_celltype[0]]) + \"_ihold\" + str(np.round(self.ihold,4)) + \"_amp\" + str(np.round(self.amp[self.a_celltype[0]],4)) + \".pdf\", dpi = 300) # save it\n\n \n def do_pca_ica(self, t_analysis_delay=0, t_analysis_stop=1, time=0, signals=0, output_dim=10, n_processes=32, n_chunks=32, do_ica=1, n_celltype = 0):\n \n if self.use_mpi:\n \n filepath = self.data_dir + \"/\" + str(self.pickle_prefix) + \"_results_pop_pca_ica.p\"\n \n if self.do_run or (os.path.isfile(filepath) is False):\n \n # PCA\n \n # remove beginning\n dt = time[2]-time[1]\n t = time[int(t_analysis_delay/dt):int(t_analysis_stop/dt)] \n pca_mat = np.array(signals[n_celltype]).T[int(t_analysis_delay/dt):int(t_analysis_stop/dt),:]\n \n node = mdp.nodes.PCANode(output_dim=output_dim, svd=True)\n \n # pad with zeros to be able to split into chunks!\n n_add = n_chunks-np.remainder(np.shape(pca_mat)[0],n_chunks)\n mat_add = np.zeros((n_add, np.shape(pca_mat)[1]))\n pca_mat_add = np.concatenate((pca_mat, mat_add))\n pca_mat_iter = np.split(pca_mat_add, n_chunks) \n \n flow = mdp.parallel.ParallelFlow([node])\n \n start_time = ttime.time()\n \n with mdp.parallel.ProcessScheduler(n_processes=n_processes, verbose=True) as scheduler:\n flow.train([pca_mat_iter], scheduler=scheduler) # input has to be list, why??\n \n process_time = ttime.time() - start_time\n \n s = np.array(flow.execute(pca_mat_iter))\n s = s[0:len(t),:] # resize to length of t!\n \n #print \"node.d: \",node.d\n var_vec = node.d/sum(node.d)\n print 'Explained variance (', 0, ') : ', round(node.explained_variance,4)\n print 'Variance (' , 0, ') : ', var_vec\n print 'Time to run (' , 0, ') : ', process_time\n \n s2 = []\n if do_ica:\n # ICA\n #s2 = mdp.fastica(s)\n ica = mdp.nodes.FastICANode() #CuBICANode()\n ica.train(s)\n s2 = ica(s)\n \n results = {'t':t, 'pca':s,'pca_var':var_vec,'pca_var_expl':round(node.explained_variance,4), 'ica':s2}\n \n if self.id == 0:\n if self.dumpsave == 1:\n pickle.dump( results, gzip.GzipFile( filepath, \"wb\" ) )\n \n else:\n \n if self.id == 0:\n results = pickle.load( gzip.GzipFile( filepath, \"rb\" ) ) \n \n else:\n \n # remove beginning\n dt = time[2]-time[1]\n t = time[int(t_analysis_delay/dt):int(t_analysis_stop/dt)] \n pca_mat = np.array(signals[n_celltype]).T[int(t_analysis_delay/dt):int(t_analysis_stop/dt),:]\n \n node = mdp.nodes.PCANode(output_dim=output_dim, svd=True)\n\n start_time = ttime.time()\n \n node.train(pca_mat)\n s = node(pca_mat)\n \n process_time = ttime.time() - start_time \n #print \"node.d: \",node.d\n var_vec = node.d/sum(node.d)\n print 'Explained variance (', 0, ') : ', round(node.explained_variance,4)\n print 'Variance (' , 0, ') : ', var_vec\n print 'Time to run (' , 0, ') : ', process_time\n \n s2 = []\n if do_ica:\n # ICA\n #s2 = mdp.fastica(s)\n ica = mdp.nodes.FastICANode() #CuBICANode()\n ica.train(s)\n s2 = ica(s)\n \n results = {'t':t, 'pca':s,'pca_var':var_vec,'pca_var_expl':round(node.explained_variance,4), 'ica':s2}\n\n return results\n \n \n def net_run(self, tstop, simprop = \"default\", t_analysis_delay=0, t_analysis_stop=1, stim_start=0):\n\n freq_times = []\n t_all_vec_vec = []\n id_all_vec_vec = []\n gsyns = []\n w_mat = []\n winh_mat = []\n time = []\n voltage = []\n current = []\n \n filepath = self.data_dir + \"/\" + str(self.pickle_prefix) + \"_results_pop_randomnet.hdf5\"\n \n if self.do_run or (os.path.isfile(filepath) is False):\n \n self.run(tstop)\n \n self.no_fmean = True\n results = self.get()\n \n time, voltage, current, fmean, gsyn = results.get('time'), results.get('voltage'), results.get('current'), results.get('fmean'), results.get('gsyn')\n freq_times, spike_freq, t_all_vec_vec, id_all_vec_vec, gsyns, w_mat, winh_mat = results.get('freq_times'), results.get('spike_freq'), results.get('t_all_vec_vec'), results.get('id_all_vec_vec'), results.get('gsyns'), results.get('w_mat'), results.get('winh_mat')\n \n if self.id == 0:\n if self.dumpsave == 1:\n #pickle.dump( results, open( filepath, \"wb\" ) ) # gzip.GzipFile\n \n print \"- Saving\", filepath\n \n f = h5py.File(filepath, 'w')\n f.create_dataset('time', data=time, compression='gzip', shuffle=True)\n f.create_dataset('voltage', data=np.array(voltage), compression='gzip', shuffle=True)\n f.create_dataset('current', data=current, compression='gzip', shuffle=True)\n f.create_dataset('freq_times', data=freq_times, compression='gzip', shuffle=True)\n \n #f.create_dataset('t_all_vec_vec', data=np.array(t_all_vec_vec), compression='lzf', shuffle=True)\n #f.create_dataset('id_all_vec_vec', data=np.array(id_all_vec_vec), compression='lzf', shuffle=True)\n #f.create_dataset('gsyns', data=np.array(gsyns), compression='lzf', shuffle=True)\n\n for i in range(len(self.N)):\n subgroup = f.create_group(\"cell\" + str(i))\n subgroup.create_dataset('t_all_vec_vec', data=t_all_vec_vec[i], compression='gzip', shuffle=True)\n subgroup.create_dataset('id_all_vec_vec', data=id_all_vec_vec[i], compression='gzip', shuffle=True)\n subgroup.create_dataset('g', data=gsyns[i], compression='gzip', shuffle=True)\n\n #for j in range(len(gsyns[i])):\n # subsubgroup = subgroup.create_group(\"gsyn\" + str(j))\n # subsubgroup.create_dataset('g', data=gsyns[i][j], compression='lzf', shuffle=True)\n \n f.close() \n print \"- Save finished\"\n \n #filename = slugify(simprop)\n\n #syn_grc = np.array(gsyns[0])\n \n #import scipy\n #from scipy import io\n \n #print \"Saving .mat\"\n #data = {}\n #data['syn_grc'] = syn_grc[:,int(t_analysis_delay/self.bin_width):int(t_analysis_stop/self.bin_width)]\n #data['time'] = freq_times[int(t_analysis_delay/self.bin_width):int(t_analysis_stop/self.bin_width)]-stim_start\n #scipy.io.savemat('./figs/' + filename + '.mat',data)\n \n else:\n \n if self.id == 0:\n #results = pickle.load( open( filepath, \"rb\" ) ) #gzip.GzipFile\n f = h5py.File(filepath, 'r')\n \n time = np.array(f['time'])\n voltage = np.array(f['voltage'])\n current = np.array(f['current'])\n freq_times = np.array(f['freq_times'])\n \n \n for i in range(len(self.N)):\n t_all_vec_vec.append(np.array(f['/cell' + str(i) + '/t_all_vec_vec'])) \n id_all_vec_vec.append(np.array(f['/cell' + str(i) + '/id_all_vec_vec'])) \n gsyns.append(np.array(f['/cell' + str(i) + '/g'])) \n \n #gsyns.append([])\n #for j in range(self.N[i]):\n # gsyns[i].append(np.array(f['/cell' + str(i) + '/gsyn' + str(j) + '/g' ])) \n\n f.close()\n \n return time, voltage, current, t_all_vec_vec, id_all_vec_vec, gsyns, freq_times, w_mat, winh_mat \n\n \n def delall(self): \n \n if self.use_mpi: \n self.pc.gid_clear()\n print \"- clearing gids\"\n else:\n pass\n #h.topology() \n #for sec in h.allsec():\n # print \"- deleting section:\", sec.name()\n # #h(\"%s{delete_section()}\"%sec.name())\n # sec.push()\n # h.delete_section()\n #h.topology()\n \n for n in range(self.n_celltypes): \n for m in self.cells[n]:\n m.destroy()\n del m \n del self.cells\n del self.nc_vecstim\n del self.netcons\n del self.nclist\n print h.topology() \n \n \n def delrerun(self): \n \n del self.nc_vecstim\n del self.netcons\n del self.nclist\n del self.vecstim\n del self.spike_vec\n del self.ST_stims\n del self.PF_stims\n \n self.netcons = [] \n self.nclist = []\n self.nc_vecstim = []\n self.vecstim = []\n self.spike_vec = []\n self.ST_stims = []\n self.PF_stims = []\n \n self.t_vec = []\n self.id_vec = []\n self.rec_v = []\n \n for n in range(self.n_celltypes):\n if self.use_mpi:\n self.t_vec.append(h.Vector()) # np.array([0])\n self.id_vec.append(h.Vector()) # np.array([-1], dtype=int)\n else:\n self.t_vec.append([])\n \n self.rec_v.append(h.Vector())\n\n for cell in self.cells[n]:\n self.t_vec[n].append(h.Vector())\n cell.nc_spike.record(self.t_vec[n][-1]) \n\n self.flucts = [] # Fluctuating inputs on this host\n self.noises = [] # Random number generators on this host\n self.plays = [] # Play inputs on this host\n self.rec_is = []\n self.trains = [] \n \n self.ic_holds = []\n self.i_holdrs = []\n self.i_holds = []\n self.ic_starts = [] \n self.vc_starts = []\n self.ic_steps = []\n self.tvecs = []\n self.ivecs = [] \n self.noises = []\n self.record_syn = []\n self.id_all_vec_input = []\n self.t_all_vec_input = []\n self.syn_ex_dist = []\n self.syn_inh_dist = []\n\n \n# test code\nif __name__ == '__main__':\n \n # mpiexec -f ~/machinefile -enable-x -n 96 python Population.py --noplot\n \n from Stimulation import *\n from Plotter import *\n from Stimhelp import *\n\n from cells.IfCell import *\n import scipy\n from scipy import io\n \n dt = 0.1*ms\n dt = 0.025*ms\n \n do_run = 1\n if results.norun: # do not run again use pickled files!\n print \"- Not running, using saved files\"\n do_run = 0\n \n \n do = np.array([\"transfer\"])\n opts = np.array([\"if_cnoise\", \"grc_cnoise\"]) #ssine \n #opts = np.array([\"if_cnoise\"]) #ssine\n #opts = np.array([\"if_recon\"]) #ssine\n opts = np.array([\"if_syn_CFvec\"]) \n #opts = np.array([\"prk_cnoise\"])\n opts = np.array([\"if_cnoise\", \"if_ssine\"]) #ssine \n opts = np.array([\"if_ssine\"]) #ssine \n opts = np.array([\"grc_cnoise_addn_cn_\", \"grc_cnoise_cn_\", \"grc_cnoise_addn_cn_a01\"]) \n opts = np.array([\"grc_cnoise_addn100_cn_\", \"grc_cnoise_addn_cn_\", \"grc_cnoise_cn_\"]) \n opts = np.array([\"grc_cnoise_addn100_cn_\"])\n opts = np.array([\"grc_cnoise_addn100_\"])\n opts = np.array([\"grc_cnoise_addn_cn_\"])\n #opts = np.array([\"grc_cnoise\"])\n #opts = np.array([\"grc_cnoise_cn\", \"grc_cnoise_addn_cn\"]) \n #opts = np.array([\"if_cnoise_addn\", \"if_cnoise\"]) \n \n do = np.array([\"timeconst\"])\n \n #do = np.array([\"transfer\"])\n #opts = np.array([\"grc_cnoise_syn\"])\n #opts = np.array([\"grc_recon_syn\"])\n \n #do = np.array([\"prk_test\"])\n \n \n if \"prk_test\" in do:\n \n import multiprocessing\n from Purkinje import Purkinje\n cell = Purkinje() \n\n # set up recording\n # Time\n rec_t = h.Vector()\n rec_t.record(h._ref_t)\n \n # Voltage\n rec_v = h.Vector()\n rec_v.record(cell.soma(0.5)._ref_v)\n\n tstop = 500\n v_init = -60\n \n stim = h.IClamp(cell.soma(0.5))\n stim.amp = 0.0/nA\n stim.delay = 1\n stim.dur = 1000\n \n cpu = multiprocessing.cpu_count()\n h.load_file(\"parcom.hoc\")\n p = h.ParallelComputeTool()\n p.change_nthread(cpu,1)\n p.multisplit(1)\n print 'cpus:', cpu\n \n h.load_file(\"stdrun.hoc\")\n h.celsius = 37 \n h.init()\n h.tstop = tstop\n dt = 0.025 # ms\n h.dt = dt\n h.steps_per_ms = 1 / dt \n h.v_init = v_init \n \n h.finitialize()\n h.run()\n \n t1 = np.array(rec_t)\n voltage = np.array(rec_v)\n s, spike_times = get_spikes(voltage, -20, t1)\n\n print 1000/diff( spike_times)\n\n plt.figure()\n plt.subplot(2,1,1)\n plt.plot(t1, voltage)\n \n plt.show()\n\n\n if \"transfer\" in do:\n \n # SET DEFAULT VALUES FOR THIS PLOT\n fig_size = [11.7, 8.3]\n params = {'backend': 'ps', 'axes.labelsize': 9, 'axes.linewidth' : 0.5, 'title.fontsize': 8, 'text.fontsize': 9,\n 'legend.borderpad': 0.2, 'legend.fontsize': 8, 'legend.linewidth': 0.1, 'legend.loc': 'best', # 'lower right' \n 'legend.ncol': 4, 'xtick.labelsize': 8, 'ytick.labelsize': 8, 'text.usetex': False, 'figure.figsize': fig_size}\n rcParams.update(params) \n \n \n freq_used0 = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 35, 40, 45, 50, 55, 60, 65, 70, 80, 100, 1000])*Hz\n #freq_used0 = np.concatenate((arange(0.1, 1, 0.1), arange(1, 501, 1) ))\n freq_used0 = np.array([1, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 200, 400, 600, 800, 1000])\n \n SNR = None \n NI = None\n VAF = None\n \n t_stim = 1000*s # only for cnoise \n \n opt_plot = np.array([\"only_mag\",\"normalize\", \"dB\"]) # \n #opt_plot = np.array([\"normalize\", \"dB\"]) # \n \n color_vec = (np.array([\"Red\", \"Blue\", \"HotPink\", \"Indigo\"]), np.array([\"Blue\", \"Orange\", \"HotPink\", \"Indigo\"]))\n #color=cm.jet(1.*i/x)\n \n method_interpol = np.array(['bin','syn']) \n method_interpol = np.array(['bin']) \n \n for i, o in enumerate(opts):\n \n dt = 0.025*ms\n bin_width = 5*ms\n bin_width = dt\n jitter = 0*ms\n \n n_syn_ex = [0] \n g_syn_ex = [1]\n noise_syn = 0 \n inh_hold = 0 \n n_syn_inh = [0] \n g_syn_inh = [1]\n tau1_ex = 0\n tau2_ex = 10*ms\n tau1_inh = 0\n tau2_inh = 100*ms\n \n cutf = 20\n sexp = -1\n\n cutf = 0\n sexp = 0\n \n ihold = [10]\n amod = 0.1 # relative value\n give_freq = True\n \n anoise = [0]\n fluct_tau = 0*ms \n \n N = [100]\n \n amp = 0 # absolute value\n fluct_s = [0] # absolute value 0.0008\n ihold_sigma = [0] # 0.01 absolute value\n \n CF_var = [[5,10,20]]\n CF_var = False\n \n syn_tau1 = 5*ms\n syn_tau2 = 5*ms\n \n do_csd = 1\n \n if \"if\" in o:\n \n do_csd = 1\n \n color_vec = (np.array([\"Blue\"]), np.array([\"Blue\"]))\n #color_vec = (np.array([\"Red\"]), np.array([\"Red\"]))\n \n cellimport = []\n celltype = [\"IfCell\"]\n #cell_exe = [\"cell = IfCell()\"]\n #cell_exe = [\"cell = IfCell(e = -70*mV, thresh = -69*mV, vrefrac = -70*mV)\"] \n #cell_exe = [\"cell = IfCell(e = 0*mV, thresh = 1*mV, vrefrac = 0*mV)\"]\n \n # Brunel\n #cell_exe = [\"cell = IfCell(C = 0.0005 *uF, R = 40*MOhm, e = -70*mV, thresh = -50*mV, vrefrac = -56*mV); cell.add_resonance(tau_r = 100*ms, gr = 0.025*uS)\"] \n \n #cell_exe = [\"cell = IfCell(C = 0.0001*uF, R = 40*MOhm, sigma_C = 0.2, sigma_R = 0.2)\"] \n #cell_exe = [\"cell = IfCell(C = 0.0001*uF, R = 40*MOhm)\"] # tau = 4 ms\n #cell_exe = [\"cell = IfCell(C = 0.0001*uF, R = 40*MOhm, s_reset_noise = 0*mV)\"] # tau = 4 ms\n \n #GrC resting: 737 MOhm, 2.985e-06 uF tau: 0.0022 s\n #GrC transfer fit: tau: 0.027 s => with 2.985e-06 uF, R = 0.027/2.985e-12 = 9045 MOhm\n \n #cell_exe = [\"cell = IfCell(C = 2.985e-06*uF, R = 9045*MOhm)\"] \n \n thresh = -41.8 \n R = 5227*MOhm\n #tau_passive = 3e-06*5227 = 15.7ms\n \n cell_exe = [\"cell = IfCell(C = 3.0e-06*uF, R = \" + str(R) + \", e = -71.5*mV, thresh =\" + str(thresh) + \", vrefrac = -71.5*mV)\"]\n \n prefix = \"if_tf\"\n \n istart = 0 \n istop = 0.01\n di = 0.00001\n \n \n syn_tau1 = 10*ms\n syn_tau2 = 10*ms\n \n # Indirect\n give_freq = True\n ihold = [40]\n amod = 1 # relative value\n \n anoise = [0] \n fluct_tau = 0*ms \n \n #anoise = 0.1\n #fluct_tau = 100*ms\n \n# # Direct\n# give_freq = False\n# ihold = [0.00569223341176]\n# amod = None\n# amp = 7.31353725e-06\n# \n# anoise = None\n# fluct_s = [3.65676863e-06] \n# fluct_tau = 0*ms\n# \n# # Low CF, No low noise\n# N = [10000]\n# give_freq = False\n# ihold = [0.004]\n# ihold_sigma = [0.1/2] # 0.1/2 0.01 realtive value\n# amod = None\n# amp = 0.0021\n# \n# anoise = None\n# fluct_s = [0.00] # .005\n# fluct_tau = 0*ms\n \n \n# # Low CF, With low noise\n# N = [10000]\n# give_freq = False\n# ihold = [0.002]\n# ihold_sigma = [0.1/2] # 0.1/2 0.01 realtive value\n# amod = None\n# amp = 0.001\n# \n# anoise = None\n# fluct_s = [0.002] # .005\n# fluct_tau = 100*ms\n \n if \"resif\" in o:\n \n do_csd = 1\n \n color_vec = (np.array([\"Blue\"]), np.array([\"Blue\"]))\n #color_vec = (np.array([\"Red\"]), np.array([\"Red\"]))\n \n cellimport = []\n celltype = [\"IfCell\"]\n \n gr = 5.56e-05*uS \n tau_r = 19.6*ms\n R = 5227*MOhm\n delta_t = 4.85*ms\n thresh = (0.00568*nA * R) - 71.5*mV # \n thresh = -41.8 \n \n cellimport = []\n celltype = \"IfCell\"\n cell_exe = \"cell = IfCell(C = 3e-06*uF, R = \" + str(R) + \", e = -71.5*mV, thresh =\" + str(thresh) + \", vrefrac = -71.5*mV, dgk =\" + str(gr) + \", egk = -71.5*mV, ctau =\" + str(tau_r) + \")\"\n\n prefix = \"resif_tf\"\n \n istart = 0 \n istop = 0.01\n di = 0.00001\n \n \n syn_tau1 = 10*ms\n syn_tau2 = 10*ms\n \n # Indirect\n give_freq = True\n ihold = [40]\n amod = 1 # relative value\n \n anoise = [0] \n fluct_tau = 0*ms \n dt = 0.1*ms\n \n \n \n if \"if_syn\" in o:\n \n N = [1] \n ihold = [40] \n amod = 1 # relative value\n \n prefix = \"if_syntf\" \n \n n_syn_ex = 1 \n\n g_syn_ex = 0 \n \n noise_syn = 0\n\n fluct_tau = 0*ms \n \n freq_used = np.array([])\n \n tau1_ex=0*ms\n tau2_ex=10*ms\n \n anoise = [0]\n\n \n if \"grc\" in o:\n \n color_vec = (np.array([\"Blue\"]), np.array([\"Blue\"]))\n\n cellimport = [\"from GRANULE_Cell import Grc\"]\n celltype = [\"Grc\"]\n cell_exe = [\"cell = Grc(np.array([0.,0.,0.]))\"] \n \n prefix = \"grc_tf\" \n\n istart = 0 \n istop = 0.1\n di = 0.01\n \n syn_tau1 = 10*ms\n syn_tau2 = 10*ms\n \n # Indirect\n give_freq = True\n ihold = [40]\n amod = 1 # relative value\n \n anoise = [0]\n fluct_tau = 0*ms \n \n #anoise = 0.1\n #fluct_tau = 100*ms\n \n# # Direct\n# give_freq = False\n# ihold = [0.0058021085712642992]\n# amod = None\n# amp = 7.31353725e-06\n# \n# anoise = None\n# fluct_s = [3.65676863e-06] \n# fluct_tau = 0*ms\n# \n# # Low CF, No low noise\n# N = [50]\n# give_freq = False\n# ihold = [0.0049]\n# ihold_sigma = [0.1/2] # 0.1/2 0.01 realtive value\n# amod = None\n# amp = 0.0021\n# \n# anoise = None\n# fluct_s = [0.00] # .005\n# fluct_tau = 0*ms\n# \n# \n# # Low CF, With low noise\n# N = [10000]\n# give_freq = False\n# ihold = [0.003]\n# ihold_sigma = [0.1/2] # 0.1/2 0.01 realtive value\n# amod = None\n# amp = 0.001\n# \n# anoise = None\n# fluct_s = [0.002] # .005\n# fluct_tau = 100*ms\n \n \n use_multisplit = False\n use_mpi = True\n simstep = 1*s\n \n if \"prk\" in o:\n \n N = [1] \n ihold = [60] \n \n color_vec = (np.array([\"Blue\"]), np.array([\"Blue\"]))\n\n cellimport = [\"from Purkinje import Purkinje\"]\n celltype = [\"Prk\"]\n cell_exe = [\"cell = Purkinje()\"] \n \n prefix = \"prk_tf\" \n\n temperature = 37\n\n istart = 0 \n istop = 0.1\n di = 0.005\n \n use_multisplit = True\n use_mpi = False\n \n t_stim = 5*s # only for cnoise \n simstep = 1*s\n\n\n if \"grc_syn\" in o:\n \n N = [1] \n ihold = [125] \n amod = 1 # relative value\n \n prefix = \"grc_syntf\" \n \n cutf = 20\n sexp = -1\n \n cutf = 0\n sexp = 0\n \n n_syn_ex = 1 \n g_syn_ex = -1\n noise_syn = 1\n\n n_syn_inh = -1\n inh_hold = 0\n g_syn_inh = 0\n \n fluct_tau = 0*ms \n \n freq_used = np.array([])\n \n anoise = 0\n \n \n if \"_addn\" in o:\n \n anoise = [6] # RESPONSIBLE FOR FILTERING EFFECT!!!\n fluct_tau = 1*ms \n prefix = prefix + \"_addn\"\n color_vec = (np.array([\"Red\"]), np.array([\"Red\"]))\n \n if \"_addn100\" in o:\n \n anoise = [2] # RESPONSIBLE FOR FILTERING EFFECT!!!\n fluct_tau = 100*ms \n prefix = prefix + \"100\"\n color_vec = (np.array([\"Green\"]), np.array([\"Green\"]))\n \n if \"_cn_\" in o:\n \n cutf = 20\n sexp = -1\n prefix = prefix + \"_cn\"\n \n if \"_a01\" in o:\n \n amod=0.1\n prefix = prefix + \"_a01\"\n \n\n \n plt.figure(i)\n pickle_prefix = \"Population.py_\" + prefix\n \n #comm = MPI.COMM_WORLD\n #comm.Barrier() # wait for other nodes\n \n pop = Population(cellimport = cellimport, celltype = celltype, cell_exe = cell_exe, N = N, temperature = 37, ihold = ihold, ihold_sigma = ihold_sigma, amp = amp, amod = amod, give_freq = give_freq, do_run = do_run, pickle_prefix = pickle_prefix, istart = istart, istop = istop, di = di, dt = dt) \n pop.bin_width = bin_width\n pop.jitter = jitter\n pop.anoise = anoise\n pop.fluct_s = fluct_s \n pop.fluct_tau = fluct_tau \n pop.method_interpol = method_interpol \n pop.no_fmean = False\n pop.CF_var = CF_var\n \n pop.tau1_ex=tau1_ex\n pop.tau2_ex=tau2_ex\n pop.tau1_inh=tau1_inh\n pop.tau2_inh=tau2_inh\n \n pop.n_syn_ex = n_syn_ex \n pop.g_syn_ex = g_syn_ex \n \n pop.noise_syn = noise_syn \n pop.inh_hold = inh_hold \n pop.n_syn_inh = n_syn_inh \n pop.g_syn_inh = g_syn_inh\n \n pop.force_run = False\n pop.use_multisplit = use_multisplit\n pop.use_mpi = use_mpi\n pop.simstep = simstep\n pop.use_local_dt = False\n pop.syn_tau1 = syn_tau1\n pop.syn_tau2 = syn_tau2\n pop.plot_input = False\n \n \n if n_syn_inh == -1:\n pop.connect_gfluct(g_i0=g_syn_inh)\n \n #pop.test_mod(n_syn_ex = n_syn_ex, g_syn_ex = g_syn_ex, noise_syn = noise_syn, inh_hold = inh_hold, n_syn_inh = n_syn_inh, g_syn_inh = g_syn_inh, do_plot = True)\n \n if \"ssine\" in o:\n pop.color_vec = color_vec\n #pop.color_vec = (np.array([\"Red\", \"Orange\", \"HotPink\", \"Indigo\"]), np.array([\"Red\", \"Orange\", \"HotPink\", \"Indigo\"])) \n pop.fun_plot(currlabel = \"control\", dowhat = \"ssine\", freq_used = freq_used0, opt_plot = opt_plot)\n\n pop.save_plot(directory = \"./figs/dump/\") \n \n if \"cnoise\" in o:\n \n freq_used = np.array([])\n pop.color_vec = color_vec\n #pop.color_vec = (np.array([\"Blue\", \"Green\", \"DimGray\", \"DarkGoldenRod\"]), np.array([\"Blue\", \"Green\", \"DimGray\", \"DarkGoldenRod\"])) \n pop.fun_plot(currlabel = \"control\", dowhat = \"cnoise\", t_stim = t_stim, cutf = cutf, sexp = sexp, t_qual = 0, opt_plot = opt_plot, freq_used = freq_used, do_csd = do_csd)\n \n pop.save_plot(directory = \"./figs/dump/\") \n \n \n if \"recon\" in o:\n \n pop.color_vec = color_vec \n #VAF, SNR, ax, tk, K_mat_old = pop.fun_plot(currlabel = \"control\", dowhat = \"cnoise\", t_stim = t_stim, cutf = cutf, sexp = sexp, t_qual = 0, opt_plot = opt_plot, n_syn_ex = n_syn_ex, g_syn_ex = g_syn_ex, noise_syn = noise_syn, inh_hold = inh_hold, n_syn_inh = n_syn_inh, g_syn_inh = g_syn_inh, SNR=0, freq_used = freq_used)\n \n # RECONSTRUCT!\n freq_used = np.array([9, 47, 111, 1000])*Hz\n t_stim = 10*s\n\n tk = arange(0,0.8192*2,pop.dt)\n K_mat_old = zeros((len(method_interpol),len(tk)), dtype=complex)\n \n if pop.id == 0:\n\n sigma = 0.1e-3\n a=0.1\n t0 = tk[floor(len(tk)/2)]\n K_mat_old[0] = gauss_func(tk, a, t0, sigma)\n \n K_mat_old = np.array([])\n\n results = pop.fun_cnoise_Stim(t_stim = t_stim, cutf = cutf, sexp = sexp, t_qual = 5, n_syn_ex = n_syn_ex, g_syn_ex = g_syn_ex, noise_syn = noise_syn, inh_hold = inh_hold, n_syn_inh = n_syn_inh, g_syn_inh = g_syn_inh, freq_used = freq_used, K_mat_old = K_mat_old, seed = 311)\n \n freq_used, amp_vec, mag, pha, ca, voltage, current, t1 = results.get('freq_used'), results.get('amp'), results.get('mag'), results.get('pha'), results.get('ca'), results.get('voltage'), results.get('current'), results.get('t1') \n freq_times, spike_freq, fmean, method_interpol, SNR, VAF, Qual = results.get('freq_times'), results.get('spike_freq'), results.get('fmean'), results.get('method_interpol'), results.get('SNR'), results.get('VAF'), results.get('Qual') \n stim, resp_mat, stim_re_mat = results.get('stim'), results.get('resp_mat'), results.get('stim_re_mat')\n \n if pop.id == 0:\n \n plt.figure('Reconstruct')\n axR0 = plt.subplot(4,1,1)\n axR1 = plt.subplot(4,1,2)\n axR2 = plt.subplot(4,1,3)\n axR3 = plt.subplot(4,1,4)\n \n axR0.plot(np.arange(len(stim))*pop.dt, resp_mat[0,:])\n axR0.axis(xmin=0.9, xmax=1)\n #axR0.plot(t1, voltage[0])\n axR1.plot(np.arange(len(stim))*pop.dt, stim, 'b')\n axR1.axis(xmin=0.9, xmax=1)\n axR2.plot(np.arange(len(stim))*pop.dt, stim_re_mat[0,:], 'r')\n axR2.axis(xmin=0.9, xmax=1)\n axR3.plot(tk, K_mat_old[0])\n plt.savefig(\"./figs/dump/Reconstruct.pdf\", dpi = 300, transparent=True) # save it\n \n pop = None\n \n \n plt.show()\n \n \n if \"timeconst\" in do:\n \n from lmfit import minimize, Parameters\n \n # SET DEFAULT VALUES FOR THIS PLOT\n fig_size = [11.7, 8.3]\n params = {'backend': 'ps', 'axes.labelsize': 9, 'axes.linewidth' : 0.5, 'title.fontsize': 8, 'text.fontsize': 9,\n 'legend.borderpad': 0.2, 'legend.fontsize': 8, 'legend.linewidth': 0.1, 'legend.loc': 'best', # 'lower right' \n 'legend.ncol': 4, 'xtick.labelsize': 8, 'ytick.labelsize': 8, 'text.usetex': False, 'figure.figsize': fig_size}\n rcParams.update(params) \n \n dt = 0.025*ms\n \n prefix = \"timeconst\"\n pickle_prefix = \"Population.py_\" + prefix\n \n stimtype = \"inh_50ms_20ms\"\n \n if stimtype == \"ex_20ms\":\n \n trun = 2.9\n tstart = 1.8\n tstop = 2.7\n\n celltype = [\"IfCell\"]\n cell_exe = [\"cell = IfCell(C = 0.0001*uF, R = 200*MOhm)\"]\n N = [5000]\n \n pop = Population(celltype = celltype, cell_exe = cell_exe, N = N, temperature = 0, do_run = do_run, pickle_prefix = pickle_prefix, dt = dt) \n \n pop.method_interpol = np.array([\"bin\", \"syn\"])\n pop.method_interpol = np.array([\"bin\"])\n \n modulation_vec = pop.set_PulseStim(start_time=[100*ms], dur=[3000*ms], steadyf=[100*Hz], pulsef=[150*Hz], pulse_start=[2000*ms], pulse_len=[500*ms], weight0=[1*nS], tau01=[0*ms], tau02=[20*ms], weight1=[0*nS], tau11=[0*ms], tau12=[1*ms])\n \n params = Parameters()\n params.add('amp', value=0.1)\n params.add('shift', value=10)\n params.add('tau1', value=1, vary=False) # alpha! \n params.add('tau2', value=20*ms) \n \n \n if stimtype == \"ex_gr\":\n \n trun = 6.9\n tstart = 4.8\n tstop = 6.5\n\n cellimport = [\"from GRANULE_Cell import Grc\"]\n celltype = [\"Grc\"]\n cell_exe = [\"cell = Grc(np.array([0.,0.,0.]))\"]\n N = [4096*10]\n \n pop = Population(cellimport = cellimport, celltype = celltype, cell_exe = cell_exe, N = N, temperature = 37, do_run = do_run, pickle_prefix = pickle_prefix, dt = dt) \n \n pop.method_interpol = np.array([\"bin\", \"syn\"])\n pop.method_interpol = np.array([\"bin\"])\n \n modulation_vec = pop.set_PulseStim(start_time=[100*ms], dur=[7000*ms], steadyf=[20*Hz], pulsef=[30*Hz], pulse_start=[5000*ms], pulse_len=[500*ms])\n \n params = Parameters()\n params.add('amp', value=0.1)\n params.add('shift', value=10)\n params.add('tau1', value=1, vary=False) # alpha! \n params.add('tau2', value=20*ms) \n \n \n if stimtype == \"inh_50ms_20ms\":\n \n trun = 2.9\n tstart = 1.8\n tstop = 2.7\n \n celltype = [\"IfCell\", \"IfCell\"]\n cell_exe = [\"cell = IfCell()\", \"cell = IfCell()\"]\n \n N = [10000,10000]\n \n pop = Population(celltype = celltype, cell_exe = cell_exe, N = N, temperature = 0, do_run = do_run, pickle_prefix = pickle_prefix, dt = dt) \n \n pop.method_interpol = np.array([\"bin\", \"syn\"])\n pop.method_interpol = np.array([\"bin\"])\n \n modulation_vec = pop.set_PulseStim(start_time=[100*ms,100*ms], dur=[3000*ms,3000*ms], steadyf=[100*Hz,50*Hz], pulsef=[100*Hz,80*Hz], pulse_start=[2000*ms,2000*ms], pulse_len=[500*ms,500*ms], weight0=[1*nS,1*nS], tau01=[1*ms,1*ms], tau02=[20*ms,20*ms], weight1=[0,0], tau11=[0*ms,0*ms], tau12=[1*ms,1*ms])\n\n pop.connect_cells(conntype='inh', weight=0.001, tau=50)\n \n params = Parameters()\n params.add('amp', value=-0.1)\n params.add('shift', value=10)\n params.add('tau1', value=1, vary=False) # alpha! \n params.add('tau2', value=20*ms)\n \n \n if stimtype == \"inh_gr\":\n\n trun = 9.9 \n tstart = 4.8\n tstop = 8\n \n cellimport = [\"from GRANULE_Cell import Grc\", \"from templates.golgi.Golgi_template import Goc\"]\n celltype = [\"Grc\",\"Goc_noloop\"]\n cell_exe = [\"cell = Grc(np.array([0.,0.,0.]))\",\"cell = Goc(np.array([0.,0.,0.]))\"]\n N = [100,4]\n #N = [4096, 27]\n #N = [4096*5, 27*5]\n\n pop = Population(cellimport = cellimport, celltype = celltype, cell_exe = cell_exe, N = N, temperature = 37, do_run = do_run, pickle_prefix = pickle_prefix, dt = dt) \n \n pop.method_interpol = np.array([\"bin\", \"syn\"])\n pop.method_interpol = np.array([\"bin\"])\n \n modulation_vec = pop.set_PulseStim(start_time=[100*ms,100*ms], dur=[9800*ms,9800*ms], steadyf=[60*Hz,10*Hz], pulsef=[60*Hz,22*Hz], pulse_start=[5000*ms,5000*ms], pulse_len=[1500*ms,1500*ms])\n\n pop.connect_cells(conntype='inh_gr', weight = 0.3)\n \n params = Parameters()\n params.add('amp', value=-0.1)\n params.add('shift', value=10)\n params.add('tau1', value=1, vary=False) # alpha! \n params.add('tau2', value=20*ms)\n \n \n if stimtype == \"inh_50ms_curr\":\n \n trun = 2.9\n tstart = 1.8\n tstop = 2.8\n \n celltype = [\"IfCell\", \"IfCell\"]\n cell_exe = [\"cell = IfCell()\", \"cell = IfCell()\"]\n \n N = [1000,1000]\n \n give_freq = True\n \n istart = 0 \n istop = 0.2\n di = 0.01\n \n ihold = [100, 50] \n ihold_sigma = [0.01, 0.01] # relative sigma\n \n pop = Population(celltype = celltype, cell_exe = cell_exe, N = N, temperature = 0, ihold = ihold, ihold_sigma = ihold_sigma, give_freq = give_freq, do_run = do_run, pickle_prefix = pickle_prefix, istart = istart, istop = istop, di = di, dt = dt) \n \n pop.method_interpol = np.array([\"bin\", \"syn\"])\n pop.method_interpol = np.array([\"bin\"])\n \n tstep = 2 \n tdur = 0.5\n \n istep = [100,100]\n current1 = np.concatenate(([ihold[1]*np.ones(round((tstep)/pop.dt)), istep[1]*np.ones(round(tdur/pop.dt)),ihold[1]*np.ones(round((trun-tstep-tdur)/pop.dt)) ])) \n \n pop.set_IStim()\n pop.set_IStep(istep = istep, istep_sigma = [0.01,0.01], tstep = tstep, tdur = tdur)\n \n pop.connect_cells(conntype='inh', weight=0.0003, tau=50)\n \n pop.fluct_s = [0.02,0.05]\n pop.connect_fluct() \n \n params = Parameters()\n params.add('amp', value=-0.1)\n params.add('shift', value=10)\n params.add('tau1', value=1, vary=False) # alpha! \n params.add('tau2', value=20*ms)\n \n \n if stimtype == \"inh_gr_curr\":\n \n trun = 9.9 \n tstart = 4.8\n tstop = 8\n \n cellimport = [\"from GRANULE_Cell import Grc\", \"from templates.golgi.Golgi_template import Goc\"]\n celltype = [\"Grc\",\"Goc_noloop\"]\n cell_exe = [\"cell = Grc(np.array([0.,0.,0.]))\",\"cell = Goc(np.array([0.,0.,0.]))\"]\n N = [100,4]\n N = [4096, 27]\n N = [4096*10, 27*10] \n\n give_freq = True\n \n # GRC \n #istart = 0 \n #istop = 0.1\n #di = 0.01\n \n #GOC\n istart = 0 \n istop = 0.5\n di = 0.02\n \n ihold = [100, 10] \n ihold_sigma = [0, 0] # relative sigma\n \n pop = Population(cellimport = cellimport, celltype = celltype, cell_exe = cell_exe, N = N, temperature = 37, ihold = ihold, ihold_sigma = ihold_sigma, give_freq = give_freq, do_run = do_run, pickle_prefix = pickle_prefix, istart = istart, istop = istop, di = di, dt = dt) \n \n pop.method_interpol = np.array([\"bin\", \"syn\"])\n pop.method_interpol = np.array([\"bin\"])\n \n tstep = 5 \n tdur = 2\n \n istep = [100,50]\n current1 = np.concatenate(([ihold[1]*np.ones(round((tstep)/pop.dt)), istep[1]*np.ones(round(tdur/pop.dt)),ihold[1]*np.ones(round((trun-tstep-tdur)/pop.dt)) ])) \n \n pop.set_IStim()\n pop.set_IStep(istep = istep, istep_sigma = [0,0], tstep = tstep, tdur = tdur)\n \n pop.connect_cells(conntype='inh_gr', weight = 0.4)\n \n pop.fluct_s = [0.05,2]\n pop.connect_fluct() \n \n params = Parameters()\n params.add('amp', value=-0.1)\n params.add('shift', value=10)\n params.add('tau1', value=1, vary=False) # alpha! \n params.add('tau2', value=20*ms) \n \n \n pop.run_steps(trun)\n \n self.no_fmean = True\n results = pop.get()\n time, voltage, current, fmean, gsyn = results.get('time'), results.get('voltage'), results.get('current'), results.get('fmean'), results.get('gsyn')\n freq_times, spike_freq, t_all_vec_vec, id_all_vec_vec, gsyns = results.get('freq_times'), results.get('spike_freq'), results.get('t_all_vec_vec'), results.get('id_all_vec_vec'), results.get('gsyns')\n \n if pop.id == 0:\n \n bin_width = 1*ms\n freq_times = arange(0, time[-1], bin_width)\n [num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[0], bins = freq_times)\n spike_freq = np.concatenate((zeros(1),num_spikes)) / bin_width / N[0]\n \n \n if \"inh\" in stimtype: # generate input current, to complicated to get it out\n \n if \"curr\" in stimtype:\n time1 = np.arange(0, trun, pop.dt)\n \n r_mod = interp(freq_times, time1, current1, left=0, right=0)\n \n [num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[1], bins = freq_times)\n spike_freq1 = np.concatenate((zeros(1),num_spikes)) / bin_width / N[1]\n else:\n r_mod = interp(freq_times, modulation_vec[1][0], modulation_vec[1][1], left=0, right=0)\n \n [num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[1], bins = freq_times)\n spike_freq1 = np.concatenate((zeros(1),num_spikes)) / bin_width / N[1]\n \n elif \"ex\" in stimtype:\n r_mod = interp(freq_times, modulation_vec[0][0], modulation_vec[0][1], left=0, right=0) \n\n\n def modelfun(amp, shift, tau1, tau2, bin_width, r_mod):\n \n tau1 = tau1\n tau2 = tau2\n \n t1 = np.arange(0,10*tau2,bin_width)\n K = amp*syn_kernel(t1, tau1, tau2) \n K = np.concatenate((np.zeros(len(K)-1),K))\n t2 = np.arange(0,len(K)*bin_width,bin_width)\n \n model = np.convolve(K, r_mod, mode='same') + shift\n \n return model\n\n \n def residual(params, r_mod, data=None, bin_width=1*ms, tstart=0, tstop=3):\n \n amp = params['amp'].value\n shift = params['shift'].value\n tau1 = params['tau1'].value\n tau2 = params['tau2'].value\n \n model = modelfun(amp, shift, tau1, tau2, bin_width, r_mod)\n \n return (data[int(tstart/bin_width):int(tstop/bin_width)]-model[int(tstart/bin_width):int(tstop/bin_width)])\n \n \n result = minimize(residual, params, args=(r_mod, spike_freq, bin_width, tstart, tstop))\n \n print \"chisqr: \", result.chisqr\n print 'Best-Fit Values:'\n for name, par in params.items():\n print ' %s = %.4f +/- %.4f ' % (name, par.value, par.stderr)\n \n amp = params['amp'].value\n shift = params['shift'].value\n tau1 = params['tau1'].value\n tau2 = params['tau2'].value\n \n model = modelfun(amp, shift, tau1, tau2, bin_width = bin_width, r_mod = r_mod) \n \n \n if \"ex\" in stimtype:\n plt.figure(0)\n plt.plot(freq_times[int(0.5/bin_width):int(trun/bin_width)], spike_freq[int(0.5/bin_width):int(trun/bin_width)], freq_times[int(0.5/bin_width):int(trun/bin_width)], model[int(0.5/bin_width):int(trun/bin_width)])\n plt.figure(1)\n plt.plot(time, voltage[0]), freq_times, r_mod, time, current\n #plt.figure(100) \n #plt.plot(t_all_vec_vec[0],id_all_vec_vec[0],'k|')\n #plt.savefig(\"./figs/dump/taufit_\" + str(stimtype) + \"_spikes.pdf\", dpi = 300) # save it \n \n else:\n plt.figure(0)\n plt.plot(freq_times[int(0.5/bin_width):int(trun/bin_width)], spike_freq1[int(0.5/bin_width):int(trun/bin_width)], freq_times[int(0.5/bin_width):int(trun/bin_width)], spike_freq[int(0.5/bin_width):int(trun/bin_width)], freq_times[int(0.5/bin_width):int(trun/bin_width)], model[int(0.5/bin_width):int(trun/bin_width)])\n plt.figure(1)\n plt.plot(time, voltage[0], time, voltage[1], freq_times, r_mod, time, current)\n plt.figure(100) \n #plt.plot(t_all_vec_vec[0],id_all_vec_vec[0],'k|')\n #plt.plot(t_all_vec_vec[1],id_all_vec_vec[1],'b|')\n #plt.savefig(\"./figs/dump/taufit_\" + str(stimtype) + \"_spikes.pdf\", dpi = 300) # save it \n \n \n plt.figure(0)\n plt.title('Fit: ' + str(stimtype) + ', tau1=' + str(tau1) + ' tau2=' + str(tau2))\n plt.savefig(\"./figs/dump/taufit_\" + str(stimtype) + \"_rate.png\", dpi = 300) # save it \n \n plt.figure(1)\n plt.savefig(\"./figs/dump/taufit_\" + str(stimtype) + \"_voltage.png\", dpi = 300) # save it \n \n \n plt.show()\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import re
import random
import requests
from bs4 import BeautifulSoup
import js2py
from fake_useragent import UserAgent
def _get_request_key(session):
res = session.post("https://spys.one/en/socks-proxy-list/")
soup = BeautifulSoup(res.text, 'html.parser')
return soup.find("input", {"name": "xx0"}).get("value")
def _get_proxy_list(session, xx0):
res = session.post("https://spys.one/en/socks-proxy-list/",
data=f"xx0={xx0}&xpp={0}&xf1={0}&xf2={0}&xf4={0}&xf5={2}",
headers={
"Content-Type": "application/x-www-form-urlencoded",
})
soup = BeautifulSoup(res.text, 'html.parser')
js = js2py.EvalJs({"document": {"write": lambda a: a}})
js.execute(soup.select_one("body > script").string)
addrs = soup.select("tr[onmouseover] > td:first-child")
ports = [js.eval(i.find("script").string) for i in addrs]
addrs = [i.get_text() for i in addrs]
ports = [re.sub(r"<[^<]*>", "", i) for i in ports]
return list(map(''.join, zip(addrs, ports)))
class ProxyScrapper:
def __init__(self):
self._proxies = []
def refresh(self):
session = requests.Session()
session.headers["User-Agent"] = UserAgent().random
print("Rotating proxy list")
xx0 = _get_request_key(session)
print(f"Got proxy request key xx0={xx0}")
addrs = _get_proxy_list(session, xx0)
self._proxies = [f"socks5://{i}" for i in addrs]
print(f"Got {len(self._proxies)} proxies")
def random(self):
assert(len(self._proxies) > 0)
return random.choice(self._proxies)
|
normal
|
{
"blob_id": "647dde6e3288ded29336062b78baacc3a92908a7",
"index": 478,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProxyScrapper:\n\n def __init__(self):\n self._proxies = []\n\n def refresh(self):\n session = requests.Session()\n session.headers['User-Agent'] = UserAgent().random\n print('Rotating proxy list')\n xx0 = _get_request_key(session)\n print(f'Got proxy request key xx0={xx0}')\n addrs = _get_proxy_list(session, xx0)\n self._proxies = [f'socks5://{i}' for i in addrs]\n print(f'Got {len(self._proxies)} proxies')\n\n def random(self):\n assert len(self._proxies) > 0\n return random.choice(self._proxies)\n",
"step-3": "<mask token>\n\n\ndef _get_request_key(session):\n res = session.post('https://spys.one/en/socks-proxy-list/')\n soup = BeautifulSoup(res.text, 'html.parser')\n return soup.find('input', {'name': 'xx0'}).get('value')\n\n\n<mask token>\n\n\nclass ProxyScrapper:\n\n def __init__(self):\n self._proxies = []\n\n def refresh(self):\n session = requests.Session()\n session.headers['User-Agent'] = UserAgent().random\n print('Rotating proxy list')\n xx0 = _get_request_key(session)\n print(f'Got proxy request key xx0={xx0}')\n addrs = _get_proxy_list(session, xx0)\n self._proxies = [f'socks5://{i}' for i in addrs]\n print(f'Got {len(self._proxies)} proxies')\n\n def random(self):\n assert len(self._proxies) > 0\n return random.choice(self._proxies)\n",
"step-4": "import re\nimport random\nimport requests\nfrom bs4 import BeautifulSoup\nimport js2py\nfrom fake_useragent import UserAgent\n\n\ndef _get_request_key(session):\n res = session.post('https://spys.one/en/socks-proxy-list/')\n soup = BeautifulSoup(res.text, 'html.parser')\n return soup.find('input', {'name': 'xx0'}).get('value')\n\n\ndef _get_proxy_list(session, xx0):\n res = session.post('https://spys.one/en/socks-proxy-list/', data=\n f'xx0={xx0}&xpp={0}&xf1={0}&xf2={0}&xf4={0}&xf5={2}', headers={\n 'Content-Type': 'application/x-www-form-urlencoded'})\n soup = BeautifulSoup(res.text, 'html.parser')\n js = js2py.EvalJs({'document': {'write': lambda a: a}})\n js.execute(soup.select_one('body > script').string)\n addrs = soup.select('tr[onmouseover] > td:first-child')\n ports = [js.eval(i.find('script').string) for i in addrs]\n addrs = [i.get_text() for i in addrs]\n ports = [re.sub('<[^<]*>', '', i) for i in ports]\n return list(map(''.join, zip(addrs, ports)))\n\n\nclass ProxyScrapper:\n\n def __init__(self):\n self._proxies = []\n\n def refresh(self):\n session = requests.Session()\n session.headers['User-Agent'] = UserAgent().random\n print('Rotating proxy list')\n xx0 = _get_request_key(session)\n print(f'Got proxy request key xx0={xx0}')\n addrs = _get_proxy_list(session, xx0)\n self._proxies = [f'socks5://{i}' for i in addrs]\n print(f'Got {len(self._proxies)} proxies')\n\n def random(self):\n assert len(self._proxies) > 0\n return random.choice(self._proxies)\n",
"step-5": "import re\nimport random\nimport requests\nfrom bs4 import BeautifulSoup\nimport js2py\nfrom fake_useragent import UserAgent\n\n\ndef _get_request_key(session):\n res = session.post(\"https://spys.one/en/socks-proxy-list/\")\n soup = BeautifulSoup(res.text, 'html.parser')\n return soup.find(\"input\", {\"name\": \"xx0\"}).get(\"value\")\n\n\ndef _get_proxy_list(session, xx0):\n res = session.post(\"https://spys.one/en/socks-proxy-list/\",\n data=f\"xx0={xx0}&xpp={0}&xf1={0}&xf2={0}&xf4={0}&xf5={2}\",\n headers={\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n })\n\n soup = BeautifulSoup(res.text, 'html.parser')\n js = js2py.EvalJs({\"document\": {\"write\": lambda a: a}})\n js.execute(soup.select_one(\"body > script\").string)\n\n addrs = soup.select(\"tr[onmouseover] > td:first-child\")\n ports = [js.eval(i.find(\"script\").string) for i in addrs]\n addrs = [i.get_text() for i in addrs]\n ports = [re.sub(r\"<[^<]*>\", \"\", i) for i in ports]\n\n return list(map(''.join, zip(addrs, ports)))\n\n\nclass ProxyScrapper:\n def __init__(self):\n self._proxies = []\n\n def refresh(self):\n session = requests.Session()\n session.headers[\"User-Agent\"] = UserAgent().random\n print(\"Rotating proxy list\")\n\n xx0 = _get_request_key(session)\n print(f\"Got proxy request key xx0={xx0}\")\n\n addrs = _get_proxy_list(session, xx0)\n self._proxies = [f\"socks5://{i}\" for i in addrs]\n print(f\"Got {len(self._proxies)} proxies\")\n\n def random(self):\n assert(len(self._proxies) > 0)\n return random.choice(self._proxies)\n",
"step-ids": [
0,
4,
5,
7,
8
]
}
|
[
0,
4,
5,
7,
8
] |
a=[1,2,3,4,5]
max=0
for i in a:
if i>=max:
max=i
elif i<=min:
min=i
print max
print min
|
normal
|
{
"blob_id": "65da68d33aa382ed6deeff3c66a063ee299c2567",
"index": 1448,
"step-1": "a=[1,2,3,4,5]\nmax=0\nfor i in a:\n\tif i>=max:\n\t\tmax=i\n\telif i<=min:\n\t\tmin=i\nprint max\nprint min\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#from setup_env import *
#from mmlibrary import *
from astropy.coordinates import SkyCoord
import astropy.units as u
from mmlibrary import *
import numpy as np
import lal
from scipy.special import logsumexp
import cpnest, cpnest.model
# Oggetto per test: GW170817
#GW = SkyCoord('13h07m05.49s', '23d23m02.0s', unit=(u.hourangle, u.deg))
DL=33.4
dDL=3.34
GW = SkyCoord(ra = '13h07m05.49s', dec = '23d23m02.0s',
unit=('hourangle','deg'))
def Mstar(omega):
'''
Calcolo magnitudine di taglio Schechter function
'''
return -20.47 + 5.0*np.log10(omega.h)
def Schechter_unnormed(M, omega, alpha):
'''
Funzione di Schechter non normalizzata
'''
Ms = Mstar(omega)
tmp = 10**(-0.4*(M-Ms))
return tmp**(alpha+1.0)*np.exp(-tmp)
def normalise(omega, alpha, Mmin = -30,Mmax = -10):
'''
Normalizzazione funzione di Schechter (todo: fare analitica)
'''
M = np.linspace(Mmin, Mmax, 100)
return np.sum([Schechter_unnormed(Mi, omega, alpha = alpha)*np.diff(M)[0] for Mi in M])
def Schechter(M, omega, alpha = -1.07):
'''
Funzione di Schechter normalizzata
'''
return Schechter_unnormed(M, omega, alpha = alpha)/normalise(omega, alpha = alpha)
def Mthreshold(DL, mth = 27.0):
'''
Magnitudine assoluta di soglia
'''
return mth - 5.0*np.log10(1e5*DL)
def mabs(m, DL):
return m - 5.0*np.log10(1e5*DL)
def HubbleLaw(D_L, omega): # Da rivedere: test solo 1 ordine
return D_L*omega.h/(3e3) # Sicuro del numero?
def gaussian(x,x0,sigma):
return np.exp(-(x-x0)**2/(2*sigma**2))/(sigma*np.sqrt(2*np.pi))
class completeness(cpnest.model.Model):
def __init__(self, catalog):
self.names=['z', 'h', 'om', 'ol']
self.bounds=[[0.001,0.012],
[0.5,1.],
[0.04,1.],
[0.,1.]]
self.omega = lal.CreateCosmologicalParameters(0.7,0.5,0.5,-1.,0.,0.)
self.catalog = catalog
def log_prior(self, x):
# controllo finitezza e theta(M-Mth)
if not(np.isfinite(super(completeness, self).log_prior(x))):
return -np.inf
else:
self.omega.h = x['h']
self.omega.om = x['om']
self.omega.ol = x['ol']
zgw = x['z']
logP = 0.0
for zi,mi in zip(self.catalog['z'],self.catalog['Bmag']):
DL = lal.LuminosityDistance(self.omega, zi)
Mabsi = mabs(mi,DL)
if Mthreshold(DL) < Mabsi:
return -np.inf
else:
# Update parametri cosmologici con simulazione
# Calcolo prior. Ciascuna coordinata è pesata con le probabilità
# delle coordinate ('banane') GW, così come z.
# Temporaneamente, è assunta gaussiana intorno a un evento.
logP += np.log(Schechter(Mabsi, self.omega))
#log_P_RA = np.log(gaussian(x['ra'],Gal.ra.rad,Gal.ra.rad/100.))
#log_P_DEC = np.log(gaussian(x['dec'],Gal.dec.rad,Gal.dec.rad/100.))
logP += np.log(lal.ComovingVolumeElement(zi, self.omega))
return logP
# PROBLEMA! Come introduco le delta(ra,dec)?
def log_likelihood(self, x):
logL = 0.0
zgw = x['z']
logL += np.log(gaussian(lal.LuminosityDistance(self.omega, zgw), DL,dDL))
logL += logsumexp([gaussian(zgw, zgi, zgi/10.0) for zgi in self.catalog['z']])
#logL += np.log(gaussian(x['ra'],GW.ra.rad,GW.ra.rad/10.))
#logL += np.log(gaussian(x['dec'],GW.dec.rad,GW.dec.rad/10.))
return logL
if __name__ == '__main__':
Gal_cat = GalInABox([190,200],[-25,-15], u.deg, u.deg, catalog='GLADE')[::100]
M = completeness(Gal_cat)
job = cpnest.CPNest(M, verbose=2, nthreads=4, nlive=1000, maxmcmc=1024)
job.run()
# GLADE galaxy catalog
|
normal
|
{
"blob_id": "fa5468741e9884f6c8bcacaf9d560b5c93ee781a",
"index": 8906,
"step-1": "<mask token>\n\n\nclass completeness(cpnest.model.Model):\n\n def __init__(self, catalog):\n self.names = ['z', 'h', 'om', 'ol']\n self.bounds = [[0.001, 0.012], [0.5, 1.0], [0.04, 1.0], [0.0, 1.0]]\n self.omega = lal.CreateCosmologicalParameters(0.7, 0.5, 0.5, -1.0, \n 0.0, 0.0)\n self.catalog = catalog\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef Schechter_unnormed(M, omega, alpha):\n \"\"\"\n Funzione di Schechter non normalizzata\n \"\"\"\n Ms = Mstar(omega)\n tmp = 10 ** (-0.4 * (M - Ms))\n return tmp ** (alpha + 1.0) * np.exp(-tmp)\n\n\ndef normalise(omega, alpha, Mmin=-30, Mmax=-10):\n \"\"\"\n Normalizzazione funzione di Schechter (todo: fare analitica)\n \"\"\"\n M = np.linspace(Mmin, Mmax, 100)\n return np.sum([(Schechter_unnormed(Mi, omega, alpha=alpha) * np.diff(M)\n [0]) for Mi in M])\n\n\n<mask token>\n\n\ndef Mthreshold(DL, mth=27.0):\n \"\"\"\n Magnitudine assoluta di soglia\n \"\"\"\n return mth - 5.0 * np.log10(100000.0 * DL)\n\n\ndef mabs(m, DL):\n return m - 5.0 * np.log10(100000.0 * DL)\n\n\ndef HubbleLaw(D_L, omega):\n return D_L * omega.h / 3000.0\n\n\n<mask token>\n\n\nclass completeness(cpnest.model.Model):\n\n def __init__(self, catalog):\n self.names = ['z', 'h', 'om', 'ol']\n self.bounds = [[0.001, 0.012], [0.5, 1.0], [0.04, 1.0], [0.0, 1.0]]\n self.omega = lal.CreateCosmologicalParameters(0.7, 0.5, 0.5, -1.0, \n 0.0, 0.0)\n self.catalog = catalog\n\n def log_prior(self, x):\n if not np.isfinite(super(completeness, self).log_prior(x)):\n return -np.inf\n else:\n self.omega.h = x['h']\n self.omega.om = x['om']\n self.omega.ol = x['ol']\n zgw = x['z']\n logP = 0.0\n for zi, mi in zip(self.catalog['z'], self.catalog['Bmag']):\n DL = lal.LuminosityDistance(self.omega, zi)\n Mabsi = mabs(mi, DL)\n if Mthreshold(DL) < Mabsi:\n return -np.inf\n else:\n logP += np.log(Schechter(Mabsi, self.omega))\n logP += np.log(lal.ComovingVolumeElement(zi, self.omega))\n return logP\n\n def log_likelihood(self, x):\n logL = 0.0\n zgw = x['z']\n logL += np.log(gaussian(lal.LuminosityDistance(self.omega, zgw), DL,\n dDL))\n logL += logsumexp([gaussian(zgw, zgi, zgi / 10.0) for zgi in self.\n catalog['z']])\n return logL\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef Mstar(omega):\n \"\"\"\n Calcolo magnitudine di taglio Schechter function\n \"\"\"\n return -20.47 + 5.0 * np.log10(omega.h)\n\n\ndef Schechter_unnormed(M, omega, alpha):\n \"\"\"\n Funzione di Schechter non normalizzata\n \"\"\"\n Ms = Mstar(omega)\n tmp = 10 ** (-0.4 * (M - Ms))\n return tmp ** (alpha + 1.0) * np.exp(-tmp)\n\n\ndef normalise(omega, alpha, Mmin=-30, Mmax=-10):\n \"\"\"\n Normalizzazione funzione di Schechter (todo: fare analitica)\n \"\"\"\n M = np.linspace(Mmin, Mmax, 100)\n return np.sum([(Schechter_unnormed(Mi, omega, alpha=alpha) * np.diff(M)\n [0]) for Mi in M])\n\n\ndef Schechter(M, omega, alpha=-1.07):\n \"\"\"\n Funzione di Schechter normalizzata\n \"\"\"\n return Schechter_unnormed(M, omega, alpha=alpha) / normalise(omega,\n alpha=alpha)\n\n\ndef Mthreshold(DL, mth=27.0):\n \"\"\"\n Magnitudine assoluta di soglia\n \"\"\"\n return mth - 5.0 * np.log10(100000.0 * DL)\n\n\ndef mabs(m, DL):\n return m - 5.0 * np.log10(100000.0 * DL)\n\n\ndef HubbleLaw(D_L, omega):\n return D_L * omega.h / 3000.0\n\n\ndef gaussian(x, x0, sigma):\n return np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) / (sigma * np.sqrt(2 *\n np.pi))\n\n\nclass completeness(cpnest.model.Model):\n\n def __init__(self, catalog):\n self.names = ['z', 'h', 'om', 'ol']\n self.bounds = [[0.001, 0.012], [0.5, 1.0], [0.04, 1.0], [0.0, 1.0]]\n self.omega = lal.CreateCosmologicalParameters(0.7, 0.5, 0.5, -1.0, \n 0.0, 0.0)\n self.catalog = catalog\n\n def log_prior(self, x):\n if not np.isfinite(super(completeness, self).log_prior(x)):\n return -np.inf\n else:\n self.omega.h = x['h']\n self.omega.om = x['om']\n self.omega.ol = x['ol']\n zgw = x['z']\n logP = 0.0\n for zi, mi in zip(self.catalog['z'], self.catalog['Bmag']):\n DL = lal.LuminosityDistance(self.omega, zi)\n Mabsi = mabs(mi, DL)\n if Mthreshold(DL) < Mabsi:\n return -np.inf\n else:\n logP += np.log(Schechter(Mabsi, self.omega))\n logP += np.log(lal.ComovingVolumeElement(zi, self.omega))\n return logP\n\n def log_likelihood(self, x):\n logL = 0.0\n zgw = x['z']\n logL += np.log(gaussian(lal.LuminosityDistance(self.omega, zgw), DL,\n dDL))\n logL += logsumexp([gaussian(zgw, zgi, zgi / 10.0) for zgi in self.\n catalog['z']])\n return logL\n\n\nif __name__ == '__main__':\n Gal_cat = GalInABox([190, 200], [-25, -15], u.deg, u.deg, catalog='GLADE')[\n ::100]\n M = completeness(Gal_cat)\n job = cpnest.CPNest(M, verbose=2, nthreads=4, nlive=1000, maxmcmc=1024)\n job.run()\n",
"step-4": "from astropy.coordinates import SkyCoord\nimport astropy.units as u\nfrom mmlibrary import *\nimport numpy as np\nimport lal\nfrom scipy.special import logsumexp\nimport cpnest, cpnest.model\nDL = 33.4\ndDL = 3.34\nGW = SkyCoord(ra='13h07m05.49s', dec='23d23m02.0s', unit=('hourangle', 'deg'))\n\n\ndef Mstar(omega):\n \"\"\"\n Calcolo magnitudine di taglio Schechter function\n \"\"\"\n return -20.47 + 5.0 * np.log10(omega.h)\n\n\ndef Schechter_unnormed(M, omega, alpha):\n \"\"\"\n Funzione di Schechter non normalizzata\n \"\"\"\n Ms = Mstar(omega)\n tmp = 10 ** (-0.4 * (M - Ms))\n return tmp ** (alpha + 1.0) * np.exp(-tmp)\n\n\ndef normalise(omega, alpha, Mmin=-30, Mmax=-10):\n \"\"\"\n Normalizzazione funzione di Schechter (todo: fare analitica)\n \"\"\"\n M = np.linspace(Mmin, Mmax, 100)\n return np.sum([(Schechter_unnormed(Mi, omega, alpha=alpha) * np.diff(M)\n [0]) for Mi in M])\n\n\ndef Schechter(M, omega, alpha=-1.07):\n \"\"\"\n Funzione di Schechter normalizzata\n \"\"\"\n return Schechter_unnormed(M, omega, alpha=alpha) / normalise(omega,\n alpha=alpha)\n\n\ndef Mthreshold(DL, mth=27.0):\n \"\"\"\n Magnitudine assoluta di soglia\n \"\"\"\n return mth - 5.0 * np.log10(100000.0 * DL)\n\n\ndef mabs(m, DL):\n return m - 5.0 * np.log10(100000.0 * DL)\n\n\ndef HubbleLaw(D_L, omega):\n return D_L * omega.h / 3000.0\n\n\ndef gaussian(x, x0, sigma):\n return np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) / (sigma * np.sqrt(2 *\n np.pi))\n\n\nclass completeness(cpnest.model.Model):\n\n def __init__(self, catalog):\n self.names = ['z', 'h', 'om', 'ol']\n self.bounds = [[0.001, 0.012], [0.5, 1.0], [0.04, 1.0], [0.0, 1.0]]\n self.omega = lal.CreateCosmologicalParameters(0.7, 0.5, 0.5, -1.0, \n 0.0, 0.0)\n self.catalog = catalog\n\n def log_prior(self, x):\n if not np.isfinite(super(completeness, self).log_prior(x)):\n return -np.inf\n else:\n self.omega.h = x['h']\n self.omega.om = x['om']\n self.omega.ol = x['ol']\n zgw = x['z']\n logP = 0.0\n for zi, mi in zip(self.catalog['z'], self.catalog['Bmag']):\n DL = lal.LuminosityDistance(self.omega, zi)\n Mabsi = mabs(mi, DL)\n if Mthreshold(DL) < Mabsi:\n return -np.inf\n else:\n logP += np.log(Schechter(Mabsi, self.omega))\n logP += np.log(lal.ComovingVolumeElement(zi, self.omega))\n return logP\n\n def log_likelihood(self, x):\n logL = 0.0\n zgw = x['z']\n logL += np.log(gaussian(lal.LuminosityDistance(self.omega, zgw), DL,\n dDL))\n logL += logsumexp([gaussian(zgw, zgi, zgi / 10.0) for zgi in self.\n catalog['z']])\n return logL\n\n\nif __name__ == '__main__':\n Gal_cat = GalInABox([190, 200], [-25, -15], u.deg, u.deg, catalog='GLADE')[\n ::100]\n M = completeness(Gal_cat)\n job = cpnest.CPNest(M, verbose=2, nthreads=4, nlive=1000, maxmcmc=1024)\n job.run()\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#from setup_env import *\n#from mmlibrary import *\n\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\n\nfrom mmlibrary import *\n\nimport numpy as np\nimport lal\n\nfrom scipy.special import logsumexp\nimport cpnest, cpnest.model\n\n# Oggetto per test: GW170817\n#GW = SkyCoord('13h07m05.49s', '23d23m02.0s', unit=(u.hourangle, u.deg))\nDL=33.4\ndDL=3.34\n\nGW = SkyCoord(ra = '13h07m05.49s', dec = '23d23m02.0s',\n unit=('hourangle','deg'))\n\n\ndef Mstar(omega):\n '''\n Calcolo magnitudine di taglio Schechter function\n '''\n return -20.47 + 5.0*np.log10(omega.h)\n\ndef Schechter_unnormed(M, omega, alpha):\n '''\n Funzione di Schechter non normalizzata\n '''\n Ms = Mstar(omega)\n tmp = 10**(-0.4*(M-Ms))\n return tmp**(alpha+1.0)*np.exp(-tmp)\n\ndef normalise(omega, alpha, Mmin = -30,Mmax = -10):\n '''\n Normalizzazione funzione di Schechter (todo: fare analitica)\n '''\n M = np.linspace(Mmin, Mmax, 100)\n return np.sum([Schechter_unnormed(Mi, omega, alpha = alpha)*np.diff(M)[0] for Mi in M])\n\ndef Schechter(M, omega, alpha = -1.07):\n '''\n Funzione di Schechter normalizzata\n '''\n return Schechter_unnormed(M, omega, alpha = alpha)/normalise(omega, alpha = alpha)\n\ndef Mthreshold(DL, mth = 27.0):\n '''\n Magnitudine assoluta di soglia\n '''\n return mth - 5.0*np.log10(1e5*DL)\n\ndef mabs(m, DL):\n return m - 5.0*np.log10(1e5*DL)\n\n\ndef HubbleLaw(D_L, omega): # Da rivedere: test solo 1 ordine\n return D_L*omega.h/(3e3) # Sicuro del numero?\n\ndef gaussian(x,x0,sigma):\n return np.exp(-(x-x0)**2/(2*sigma**2))/(sigma*np.sqrt(2*np.pi))\n\nclass completeness(cpnest.model.Model):\n\n def __init__(self, catalog):\n self.names=['z', 'h', 'om', 'ol']\n self.bounds=[[0.001,0.012],\n [0.5,1.],\n [0.04,1.],\n [0.,1.]]\n self.omega = lal.CreateCosmologicalParameters(0.7,0.5,0.5,-1.,0.,0.)\n self.catalog = catalog\n\n\n def log_prior(self, x):\n # controllo finitezza e theta(M-Mth)\n\n if not(np.isfinite(super(completeness, self).log_prior(x))):\n return -np.inf\n else:\n self.omega.h = x['h']\n self.omega.om = x['om']\n self.omega.ol = x['ol']\n zgw = x['z']\n logP = 0.0\n for zi,mi in zip(self.catalog['z'],self.catalog['Bmag']):\n DL = lal.LuminosityDistance(self.omega, zi)\n Mabsi = mabs(mi,DL)\n if Mthreshold(DL) < Mabsi:\n\n return -np.inf\n else:\n # Update parametri cosmologici con simulazione\n\n # Calcolo prior. Ciascuna coordinata è pesata con le probabilità\n # delle coordinate ('banane') GW, così come z.\n # Temporaneamente, è assunta gaussiana intorno a un evento.\n logP += np.log(Schechter(Mabsi, self.omega))\n #log_P_RA = np.log(gaussian(x['ra'],Gal.ra.rad,Gal.ra.rad/100.))\n #log_P_DEC = np.log(gaussian(x['dec'],Gal.dec.rad,Gal.dec.rad/100.))\n logP += np.log(lal.ComovingVolumeElement(zi, self.omega))\n\n return logP\n # PROBLEMA! Come introduco le delta(ra,dec)?\n\n def log_likelihood(self, x):\n logL = 0.0\n zgw = x['z']\n\n logL += np.log(gaussian(lal.LuminosityDistance(self.omega, zgw), DL,dDL))\n logL += logsumexp([gaussian(zgw, zgi, zgi/10.0) for zgi in self.catalog['z']])\n #logL += np.log(gaussian(x['ra'],GW.ra.rad,GW.ra.rad/10.))\n #logL += np.log(gaussian(x['dec'],GW.dec.rad,GW.dec.rad/10.))\n\n return logL\n\nif __name__ == '__main__':\n Gal_cat = GalInABox([190,200],[-25,-15], u.deg, u.deg, catalog='GLADE')[::100]\n M = completeness(Gal_cat)\n\n job = cpnest.CPNest(M, verbose=2, nthreads=4, nlive=1000, maxmcmc=1024)\n job.run()\n# GLADE galaxy catalog\n",
"step-ids": [
2,
9,
13,
15,
16
]
}
|
[
2,
9,
13,
15,
16
] |
# -*- coding: utf-8 -*-
import json
import os
import io
import shutil
import pytest
from chi_annotator.algo_factory.common import TrainingData
from chi_annotator.task_center.config import AnnotatorConfig
from chi_annotator.task_center.data_loader import load_local_data
from chi_annotator.task_center.model import Interpreter
from chi_annotator.task_center.model import Trainer
from tests.utils.txt_to_json import create_tmp_test_jsonfile, rm_tmp_file
class TestTrainer(object):
@classmethod
def setup_class(cls):
""" setup any state specific to the execution of the given class (which
usually contains tests).
"""
pass
@classmethod
def teardown_class(cls):
""" teardown any state that was previously setup with a call to
setup_class.
"""
pass
"""
test Trainer and Interpreter
"""
def ignore_test_load_local_data(self):
"""
test load local json format data
:return:
"""
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
rm_tmp_file("tmp.json")
assert train_data is not None
assert len(train_data.training_examples) == 1000
assert "text" not in train_data.training_examples[0].data
assert "label" in train_data.training_examples[0].data
def ignore_test_load_config(self):
"""
test load config
:return:
"""
config = AnnotatorConfig(\
filename="chi_annotator/user_instance/examples/classify/spam_email_classify_config.json")
assert config["name"] == "email_spam_classification"
def ignor_test_load_default_config(self):
"""
test load default config
:return:
"""
config = AnnotatorConfig()
assert config["config"] == "config.json"
def ignore_test_trainer_init(self):
"""
test trainer
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
def ignore_test_pipeline_flow(self):
"""
test trainer's train func for pipeline
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
# create tmp train set
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
# rm tmp train set
rm_tmp_file("tmp.json")
interpreter = trainer.train(train_data)
assert interpreter is not None
out1 = interpreter.parse(("点连接拿红包啦"))
# test persist and load
persisted_path = trainer.persist(config['path'],
config['project'],
config['fixed_model_name'])
interpreter_loaded = Interpreter.load(persisted_path, config)
out2 = interpreter_loaded.parse("点连接拿红包啦")
assert out1.get("classifylabel").get("name") == out2.get("classifylabel").get("name")
# remove tmp models
shutil.rmtree(config['path'], ignore_errors=True)
def ignore_test_trainer_persist(self):
"""
test pipeline persist, metadata will be saved
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
# char_tokenizer component should been created
assert trainer.pipeline[0] is not None
# create tmp train set
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
# rm tmp train set
rm_tmp_file("tmp.json")
trainer.train(train_data)
persisted_path = trainer.persist(config['path'],
config['project'],
config['fixed_model_name'])
# load persisted metadata
metadata_path = os.path.join(persisted_path, 'metadata.json')
with io.open(metadata_path) as f:
metadata = json.load(f)
assert 'trained_at' in metadata
# rm tmp files and dirs
shutil.rmtree(config['path'], ignore_errors=False)
def ignore_test_train_model_empty_pipeline(self):
"""
train model with no component
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
config['pipeline'] = []
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
rm_tmp_file("tmp.json")
with pytest.raises(ValueError):
trainer = Trainer(config)
trainer.train(train_data)
def ignore_test_handles_pipeline_with_non_existing_component(self):
"""
handle no exist component in pipeline
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
config['pipeline'].append("unknown_component")
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
rm_tmp_file("tmp.json")
with pytest.raises(Exception) as execinfo:
trainer = Trainer(config)
trainer.train(train_data)
assert "Failed to find component" in str(execinfo.value)
def ignore_test_load_and_persist_without_train(self):
"""
test save and load model without train
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
# create tmp train set
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
# rm tmp train set
rm_tmp_file("tmp.json")
# interpreter = trainer.train(train_data)
# test persist and load
persisted_path = trainer.persist(config['path'],
config['project'],
config['fixed_model_name'])
interpreter_loaded = Interpreter.load(persisted_path, config)
assert interpreter_loaded.pipeline
assert interpreter_loaded.parse("hello") is not None
assert interpreter_loaded.parse("Hello today is Monday, again!") is not None
# remove tmp models
shutil.rmtree(config['path'], ignore_errors=False)
def ignore_test_train_with_empty_data(self):
"""
test train with empty train data
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
# create tmp train set
train_data = TrainingData([])
# rm tmp train set
trainer.train(train_data)
# test persist and load
persisted_path = trainer.persist(config['path'],
config['project'],
config['fixed_model_name'])
interpreter_loaded = Interpreter.load(persisted_path, config)
assert interpreter_loaded.pipeline
assert interpreter_loaded.parse("hello") is not None
assert interpreter_loaded.parse("Hello today is Monday, again!") is not None
# remove tmp models
shutil.rmtree(config['path'], ignore_errors=False)
|
normal
|
{
"blob_id": "192c44540018b9e1ab857bdbfba6fdb39bb74431",
"index": 8769,
"step-1": "<mask token>\n\n\nclass TestTrainer(object):\n <mask token>\n\n @classmethod\n def teardown_class(cls):\n \"\"\" teardown any state that was previously setup with a call to\n setup_class.\n \"\"\"\n pass\n <mask token>\n\n def ignore_test_load_local_data(self):\n \"\"\"\n test load local json format data\n :return:\n \"\"\"\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n assert train_data is not None\n assert len(train_data.training_examples) == 1000\n assert 'text' not in train_data.training_examples[0].data\n assert 'label' in train_data.training_examples[0].data\n\n def ignore_test_load_config(self):\n \"\"\"\n test load config\n :return:\n \"\"\"\n config = AnnotatorConfig(filename=\n 'chi_annotator/user_instance/examples/classify/spam_email_classify_config.json'\n )\n assert config['name'] == 'email_spam_classification'\n\n def ignor_test_load_default_config(self):\n \"\"\"\n test load default config\n :return:\n \"\"\"\n config = AnnotatorConfig()\n assert config['config'] == 'config.json'\n\n def ignore_test_trainer_init(self):\n \"\"\"\n test trainer\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n\n def ignore_test_pipeline_flow(self):\n \"\"\"\n test trainer's train func for pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n interpreter = trainer.train(train_data)\n assert interpreter is not None\n out1 = interpreter.parse('点连接拿红包啦')\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n out2 = interpreter_loaded.parse('点连接拿红包啦')\n assert out1.get('classifylabel').get('name') == out2.get(\n 'classifylabel').get('name')\n shutil.rmtree(config['path'], ignore_errors=True)\n\n def ignore_test_trainer_persist(self):\n \"\"\"\n test pipeline persist, metadata will be saved\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n assert trainer.pipeline[0] is not None\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n metadata_path = os.path.join(persisted_path, 'metadata.json')\n with io.open(metadata_path) as f:\n metadata = json.load(f)\n assert 'trained_at' in metadata\n shutil.rmtree(config['path'], ignore_errors=False)\n <mask token>\n\n def ignore_test_handles_pipeline_with_non_existing_component(self):\n \"\"\"\n handle no exist component in pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n config['pipeline'].append('unknown_component')\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n with pytest.raises(Exception) as execinfo:\n trainer = Trainer(config)\n trainer.train(train_data)\n assert 'Failed to find component' in str(execinfo.value)\n <mask token>\n\n def ignore_test_train_with_empty_data(self):\n \"\"\"\n test train with empty train data\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n train_data = TrainingData([])\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse('hello') is not None\n assert interpreter_loaded.parse('Hello today is Monday, again!'\n ) is not None\n shutil.rmtree(config['path'], ignore_errors=False)\n",
"step-2": "<mask token>\n\n\nclass TestTrainer(object):\n\n @classmethod\n def setup_class(cls):\n \"\"\" setup any state specific to the execution of the given class (which\n usually contains tests).\n \"\"\"\n pass\n\n @classmethod\n def teardown_class(cls):\n \"\"\" teardown any state that was previously setup with a call to\n setup_class.\n \"\"\"\n pass\n <mask token>\n\n def ignore_test_load_local_data(self):\n \"\"\"\n test load local json format data\n :return:\n \"\"\"\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n assert train_data is not None\n assert len(train_data.training_examples) == 1000\n assert 'text' not in train_data.training_examples[0].data\n assert 'label' in train_data.training_examples[0].data\n\n def ignore_test_load_config(self):\n \"\"\"\n test load config\n :return:\n \"\"\"\n config = AnnotatorConfig(filename=\n 'chi_annotator/user_instance/examples/classify/spam_email_classify_config.json'\n )\n assert config['name'] == 'email_spam_classification'\n\n def ignor_test_load_default_config(self):\n \"\"\"\n test load default config\n :return:\n \"\"\"\n config = AnnotatorConfig()\n assert config['config'] == 'config.json'\n\n def ignore_test_trainer_init(self):\n \"\"\"\n test trainer\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n\n def ignore_test_pipeline_flow(self):\n \"\"\"\n test trainer's train func for pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n interpreter = trainer.train(train_data)\n assert interpreter is not None\n out1 = interpreter.parse('点连接拿红包啦')\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n out2 = interpreter_loaded.parse('点连接拿红包啦')\n assert out1.get('classifylabel').get('name') == out2.get(\n 'classifylabel').get('name')\n shutil.rmtree(config['path'], ignore_errors=True)\n\n def ignore_test_trainer_persist(self):\n \"\"\"\n test pipeline persist, metadata will be saved\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n assert trainer.pipeline[0] is not None\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n metadata_path = os.path.join(persisted_path, 'metadata.json')\n with io.open(metadata_path) as f:\n metadata = json.load(f)\n assert 'trained_at' in metadata\n shutil.rmtree(config['path'], ignore_errors=False)\n <mask token>\n\n def ignore_test_handles_pipeline_with_non_existing_component(self):\n \"\"\"\n handle no exist component in pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n config['pipeline'].append('unknown_component')\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n with pytest.raises(Exception) as execinfo:\n trainer = Trainer(config)\n trainer.train(train_data)\n assert 'Failed to find component' in str(execinfo.value)\n\n def ignore_test_load_and_persist_without_train(self):\n \"\"\"\n test save and load model without train\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse('hello') is not None\n assert interpreter_loaded.parse('Hello today is Monday, again!'\n ) is not None\n shutil.rmtree(config['path'], ignore_errors=False)\n\n def ignore_test_train_with_empty_data(self):\n \"\"\"\n test train with empty train data\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n train_data = TrainingData([])\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse('hello') is not None\n assert interpreter_loaded.parse('Hello today is Monday, again!'\n ) is not None\n shutil.rmtree(config['path'], ignore_errors=False)\n",
"step-3": "<mask token>\n\n\nclass TestTrainer(object):\n\n @classmethod\n def setup_class(cls):\n \"\"\" setup any state specific to the execution of the given class (which\n usually contains tests).\n \"\"\"\n pass\n\n @classmethod\n def teardown_class(cls):\n \"\"\" teardown any state that was previously setup with a call to\n setup_class.\n \"\"\"\n pass\n <mask token>\n\n def ignore_test_load_local_data(self):\n \"\"\"\n test load local json format data\n :return:\n \"\"\"\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n assert train_data is not None\n assert len(train_data.training_examples) == 1000\n assert 'text' not in train_data.training_examples[0].data\n assert 'label' in train_data.training_examples[0].data\n\n def ignore_test_load_config(self):\n \"\"\"\n test load config\n :return:\n \"\"\"\n config = AnnotatorConfig(filename=\n 'chi_annotator/user_instance/examples/classify/spam_email_classify_config.json'\n )\n assert config['name'] == 'email_spam_classification'\n\n def ignor_test_load_default_config(self):\n \"\"\"\n test load default config\n :return:\n \"\"\"\n config = AnnotatorConfig()\n assert config['config'] == 'config.json'\n\n def ignore_test_trainer_init(self):\n \"\"\"\n test trainer\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n\n def ignore_test_pipeline_flow(self):\n \"\"\"\n test trainer's train func for pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n interpreter = trainer.train(train_data)\n assert interpreter is not None\n out1 = interpreter.parse('点连接拿红包啦')\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n out2 = interpreter_loaded.parse('点连接拿红包啦')\n assert out1.get('classifylabel').get('name') == out2.get(\n 'classifylabel').get('name')\n shutil.rmtree(config['path'], ignore_errors=True)\n\n def ignore_test_trainer_persist(self):\n \"\"\"\n test pipeline persist, metadata will be saved\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n assert trainer.pipeline[0] is not None\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n metadata_path = os.path.join(persisted_path, 'metadata.json')\n with io.open(metadata_path) as f:\n metadata = json.load(f)\n assert 'trained_at' in metadata\n shutil.rmtree(config['path'], ignore_errors=False)\n\n def ignore_test_train_model_empty_pipeline(self):\n \"\"\"\n train model with no component\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n config['pipeline'] = []\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n with pytest.raises(ValueError):\n trainer = Trainer(config)\n trainer.train(train_data)\n\n def ignore_test_handles_pipeline_with_non_existing_component(self):\n \"\"\"\n handle no exist component in pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n config['pipeline'].append('unknown_component')\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n with pytest.raises(Exception) as execinfo:\n trainer = Trainer(config)\n trainer.train(train_data)\n assert 'Failed to find component' in str(execinfo.value)\n\n def ignore_test_load_and_persist_without_train(self):\n \"\"\"\n test save and load model without train\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse('hello') is not None\n assert interpreter_loaded.parse('Hello today is Monday, again!'\n ) is not None\n shutil.rmtree(config['path'], ignore_errors=False)\n\n def ignore_test_train_with_empty_data(self):\n \"\"\"\n test train with empty train data\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n train_data = TrainingData([])\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse('hello') is not None\n assert interpreter_loaded.parse('Hello today is Monday, again!'\n ) is not None\n shutil.rmtree(config['path'], ignore_errors=False)\n",
"step-4": "<mask token>\n\n\nclass TestTrainer(object):\n\n @classmethod\n def setup_class(cls):\n \"\"\" setup any state specific to the execution of the given class (which\n usually contains tests).\n \"\"\"\n pass\n\n @classmethod\n def teardown_class(cls):\n \"\"\" teardown any state that was previously setup with a call to\n setup_class.\n \"\"\"\n pass\n \"\"\"\n test Trainer and Interpreter\n \"\"\"\n\n def ignore_test_load_local_data(self):\n \"\"\"\n test load local json format data\n :return:\n \"\"\"\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n assert train_data is not None\n assert len(train_data.training_examples) == 1000\n assert 'text' not in train_data.training_examples[0].data\n assert 'label' in train_data.training_examples[0].data\n\n def ignore_test_load_config(self):\n \"\"\"\n test load config\n :return:\n \"\"\"\n config = AnnotatorConfig(filename=\n 'chi_annotator/user_instance/examples/classify/spam_email_classify_config.json'\n )\n assert config['name'] == 'email_spam_classification'\n\n def ignor_test_load_default_config(self):\n \"\"\"\n test load default config\n :return:\n \"\"\"\n config = AnnotatorConfig()\n assert config['config'] == 'config.json'\n\n def ignore_test_trainer_init(self):\n \"\"\"\n test trainer\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n\n def ignore_test_pipeline_flow(self):\n \"\"\"\n test trainer's train func for pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n interpreter = trainer.train(train_data)\n assert interpreter is not None\n out1 = interpreter.parse('点连接拿红包啦')\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n out2 = interpreter_loaded.parse('点连接拿红包啦')\n assert out1.get('classifylabel').get('name') == out2.get(\n 'classifylabel').get('name')\n shutil.rmtree(config['path'], ignore_errors=True)\n\n def ignore_test_trainer_persist(self):\n \"\"\"\n test pipeline persist, metadata will be saved\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n assert trainer.pipeline[0] is not None\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n metadata_path = os.path.join(persisted_path, 'metadata.json')\n with io.open(metadata_path) as f:\n metadata = json.load(f)\n assert 'trained_at' in metadata\n shutil.rmtree(config['path'], ignore_errors=False)\n\n def ignore_test_train_model_empty_pipeline(self):\n \"\"\"\n train model with no component\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n config['pipeline'] = []\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n with pytest.raises(ValueError):\n trainer = Trainer(config)\n trainer.train(train_data)\n\n def ignore_test_handles_pipeline_with_non_existing_component(self):\n \"\"\"\n handle no exist component in pipeline\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n config['pipeline'].append('unknown_component')\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n with pytest.raises(Exception) as execinfo:\n trainer = Trainer(config)\n trainer.train(train_data)\n assert 'Failed to find component' in str(execinfo.value)\n\n def ignore_test_load_and_persist_without_train(self):\n \"\"\"\n test save and load model without train\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n tmp_path = create_tmp_test_jsonfile('tmp.json')\n train_data = load_local_data(tmp_path)\n rm_tmp_file('tmp.json')\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse('hello') is not None\n assert interpreter_loaded.parse('Hello today is Monday, again!'\n ) is not None\n shutil.rmtree(config['path'], ignore_errors=False)\n\n def ignore_test_train_with_empty_data(self):\n \"\"\"\n test train with empty train data\n :return:\n \"\"\"\n test_config = 'tests/data/test_config/test_config.json'\n config = AnnotatorConfig(test_config)\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n train_data = TrainingData([])\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'], config['project'],\n config['fixed_model_name'])\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse('hello') is not None\n assert interpreter_loaded.parse('Hello today is Monday, again!'\n ) is not None\n shutil.rmtree(config['path'], ignore_errors=False)\n",
"step-5": "# -*- coding: utf-8 -*-\nimport json\nimport os\nimport io\nimport shutil\n\nimport pytest\n\nfrom chi_annotator.algo_factory.common import TrainingData\nfrom chi_annotator.task_center.config import AnnotatorConfig\nfrom chi_annotator.task_center.data_loader import load_local_data\nfrom chi_annotator.task_center.model import Interpreter\nfrom chi_annotator.task_center.model import Trainer\nfrom tests.utils.txt_to_json import create_tmp_test_jsonfile, rm_tmp_file\n\n\nclass TestTrainer(object):\n\n @classmethod\n def setup_class(cls):\n \"\"\" setup any state specific to the execution of the given class (which\n usually contains tests).\n \"\"\"\n pass\n\n @classmethod\n def teardown_class(cls):\n \"\"\" teardown any state that was previously setup with a call to\n setup_class.\n \"\"\"\n pass\n\n \"\"\"\n test Trainer and Interpreter\n \"\"\"\n def ignore_test_load_local_data(self):\n \"\"\"\n test load local json format data\n :return:\n \"\"\"\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n rm_tmp_file(\"tmp.json\")\n assert train_data is not None\n assert len(train_data.training_examples) == 1000\n assert \"text\" not in train_data.training_examples[0].data\n assert \"label\" in train_data.training_examples[0].data\n\n def ignore_test_load_config(self):\n \"\"\"\n test load config\n :return:\n \"\"\"\n config = AnnotatorConfig(\\\n filename=\"chi_annotator/user_instance/examples/classify/spam_email_classify_config.json\")\n assert config[\"name\"] == \"email_spam_classification\"\n\n def ignor_test_load_default_config(self):\n \"\"\"\n test load default config\n :return:\n \"\"\"\n config = AnnotatorConfig()\n assert config[\"config\"] == \"config.json\"\n\n def ignore_test_trainer_init(self):\n \"\"\"\n test trainer\n :return:\n \"\"\"\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n\n def ignore_test_pipeline_flow(self):\n \"\"\"\n test trainer's train func for pipeline\n :return:\n \"\"\"\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n # create tmp train set\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n # rm tmp train set\n rm_tmp_file(\"tmp.json\")\n\n interpreter = trainer.train(train_data)\n assert interpreter is not None\n out1 = interpreter.parse((\"点连接拿红包啦\"))\n\n # test persist and load\n persisted_path = trainer.persist(config['path'],\n config['project'],\n config['fixed_model_name'])\n\n interpreter_loaded = Interpreter.load(persisted_path, config)\n out2 = interpreter_loaded.parse(\"点连接拿红包啦\")\n assert out1.get(\"classifylabel\").get(\"name\") == out2.get(\"classifylabel\").get(\"name\")\n\n # remove tmp models\n shutil.rmtree(config['path'], ignore_errors=True)\n\n def ignore_test_trainer_persist(self):\n \"\"\"\n test pipeline persist, metadata will be saved\n :return:\n \"\"\"\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n # char_tokenizer component should been created\n assert trainer.pipeline[0] is not None\n # create tmp train set\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n # rm tmp train set\n rm_tmp_file(\"tmp.json\")\n\n trainer.train(train_data)\n persisted_path = trainer.persist(config['path'],\n config['project'],\n config['fixed_model_name'])\n # load persisted metadata\n metadata_path = os.path.join(persisted_path, 'metadata.json')\n with io.open(metadata_path) as f:\n metadata = json.load(f)\n assert 'trained_at' in metadata\n # rm tmp files and dirs\n shutil.rmtree(config['path'], ignore_errors=False)\n\n def ignore_test_train_model_empty_pipeline(self):\n \"\"\"\n train model with no component\n :return:\n \"\"\"\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n config['pipeline'] = []\n\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n rm_tmp_file(\"tmp.json\")\n\n with pytest.raises(ValueError):\n trainer = Trainer(config)\n trainer.train(train_data)\n\n def ignore_test_handles_pipeline_with_non_existing_component(self):\n \"\"\"\n handle no exist component in pipeline\n :return:\n \"\"\"\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n config['pipeline'].append(\"unknown_component\")\n\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n rm_tmp_file(\"tmp.json\")\n\n with pytest.raises(Exception) as execinfo:\n trainer = Trainer(config)\n trainer.train(train_data)\n assert \"Failed to find component\" in str(execinfo.value)\n\n def ignore_test_load_and_persist_without_train(self):\n \"\"\"\n test save and load model without train\n :return:\n \"\"\"\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n # create tmp train set\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n # rm tmp train set\n rm_tmp_file(\"tmp.json\")\n\n # interpreter = trainer.train(train_data)\n # test persist and load\n persisted_path = trainer.persist(config['path'],\n config['project'],\n config['fixed_model_name'])\n\n interpreter_loaded = Interpreter.load(persisted_path, config)\n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse(\"hello\") is not None\n assert interpreter_loaded.parse(\"Hello today is Monday, again!\") is not None\n # remove tmp models\n shutil.rmtree(config['path'], ignore_errors=False)\n\n def ignore_test_train_with_empty_data(self):\n \"\"\"\n test train with empty train data\n :return:\n \"\"\"\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n\n trainer = Trainer(config)\n assert len(trainer.pipeline) > 0\n # create tmp train set\n\n train_data = TrainingData([])\n # rm tmp train set\n\n trainer.train(train_data)\n # test persist and load\n persisted_path = trainer.persist(config['path'],\n config['project'],\n config['fixed_model_name'])\n\n interpreter_loaded = Interpreter.load(persisted_path, config)\n \n assert interpreter_loaded.pipeline\n assert interpreter_loaded.parse(\"hello\") is not None\n assert interpreter_loaded.parse(\"Hello today is Monday, again!\") is not None\n \n # remove tmp models\n shutil.rmtree(config['path'], ignore_errors=False)\n\n\n\n",
"step-ids": [
10,
12,
13,
14,
16
]
}
|
[
10,
12,
13,
14,
16
] |
import json
import os
import time
import urllib.request
import pandas as pd
from lib.db.dbutils import (
check_blacklisted,
check_ticker_exists,
get_db,
update_blacklisted,
)
def get_data(url, delay=20):
while True:
df = json.loads(urllib.request.urlopen(url).read())
if df.get("Note", 0) == 0:
break
time.sleep(20)
return df
def grab_a_ticker(symbol="MSFT", apiKey=None):
if apiKey is None:
apiKey = os.environ.get("API_KEY")
# Check if ticker already exists in the database
if not check_ticker_exists(symbol) and not check_blacklisted(symbol):
requestUrl = r"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}"
metaDataUrl = r"https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}"
data = get_data(requestUrl.format(symbol, apiKey))
metaData = get_data(metaDataUrl.format(symbol, apiKey))
df = pd.DataFrame(
pd.DataFrame(data.get("Time Series (Daily)")).transpose()[
"4. close"
]
).reset_index()
df.columns = ["Date", "Price"]
df["Symbol"] = data["Meta Data"]["2. Symbol"]
if len(metaData["bestMatches"]) > 0:
met_df = (
pd.DataFrame(metaData["bestMatches"][0], index=[0])[
["1. symbol", "2. name", "3. type", "4. region"]
]
.reset_index()
.drop(["index"], axis=1)
)
met_df.columns = ["Symbol", "Name", "Type", "Region"]
else:
print(metaData.keys())
met_df = pd.DataFrame()
try:
assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol
df.to_sql(
"time_series", con=get_db(), if_exists="append", index=False
)
met_df.to_sql(
"stock_meta_data",
con=get_db(),
if_exists="append",
index=False,
)
except AssertionError as e:
print(
"'Couldn't get it right with assertion error: {}".format(
str(e)
)
)
update_blacklisted(symbol)
except Exception as e:
print(str(e))
update_blacklisted(symbol)
else:
print("Symbol {} already exists.".format(symbol))
|
normal
|
{
"blob_id": "3c8e6a93c4d5616b9199cf473d298bfa2dc191af",
"index": 9971,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef grab_a_ticker(symbol='MSFT', apiKey=None):\n if apiKey is None:\n apiKey = os.environ.get('API_KEY')\n if not check_ticker_exists(symbol) and not check_blacklisted(symbol):\n requestUrl = (\n 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}'\n )\n metaDataUrl = (\n 'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}'\n )\n data = get_data(requestUrl.format(symbol, apiKey))\n metaData = get_data(metaDataUrl.format(symbol, apiKey))\n df = pd.DataFrame(pd.DataFrame(data.get('Time Series (Daily)')).\n transpose()['4. close']).reset_index()\n df.columns = ['Date', 'Price']\n df['Symbol'] = data['Meta Data']['2. Symbol']\n if len(metaData['bestMatches']) > 0:\n met_df = pd.DataFrame(metaData['bestMatches'][0], index=[0])[[\n '1. symbol', '2. name', '3. type', '4. region']].reset_index(\n ).drop(['index'], axis=1)\n met_df.columns = ['Symbol', 'Name', 'Type', 'Region']\n else:\n print(metaData.keys())\n met_df = pd.DataFrame()\n try:\n assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol\n df.to_sql('time_series', con=get_db(), if_exists='append',\n index=False)\n met_df.to_sql('stock_meta_data', con=get_db(), if_exists=\n 'append', index=False)\n except AssertionError as e:\n print(\"'Couldn't get it right with assertion error: {}\".format(\n str(e)))\n update_blacklisted(symbol)\n except Exception as e:\n print(str(e))\n update_blacklisted(symbol)\n else:\n print('Symbol {} already exists.'.format(symbol))\n",
"step-3": "<mask token>\n\n\ndef get_data(url, delay=20):\n while True:\n df = json.loads(urllib.request.urlopen(url).read())\n if df.get('Note', 0) == 0:\n break\n time.sleep(20)\n return df\n\n\ndef grab_a_ticker(symbol='MSFT', apiKey=None):\n if apiKey is None:\n apiKey = os.environ.get('API_KEY')\n if not check_ticker_exists(symbol) and not check_blacklisted(symbol):\n requestUrl = (\n 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}'\n )\n metaDataUrl = (\n 'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}'\n )\n data = get_data(requestUrl.format(symbol, apiKey))\n metaData = get_data(metaDataUrl.format(symbol, apiKey))\n df = pd.DataFrame(pd.DataFrame(data.get('Time Series (Daily)')).\n transpose()['4. close']).reset_index()\n df.columns = ['Date', 'Price']\n df['Symbol'] = data['Meta Data']['2. Symbol']\n if len(metaData['bestMatches']) > 0:\n met_df = pd.DataFrame(metaData['bestMatches'][0], index=[0])[[\n '1. symbol', '2. name', '3. type', '4. region']].reset_index(\n ).drop(['index'], axis=1)\n met_df.columns = ['Symbol', 'Name', 'Type', 'Region']\n else:\n print(metaData.keys())\n met_df = pd.DataFrame()\n try:\n assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol\n df.to_sql('time_series', con=get_db(), if_exists='append',\n index=False)\n met_df.to_sql('stock_meta_data', con=get_db(), if_exists=\n 'append', index=False)\n except AssertionError as e:\n print(\"'Couldn't get it right with assertion error: {}\".format(\n str(e)))\n update_blacklisted(symbol)\n except Exception as e:\n print(str(e))\n update_blacklisted(symbol)\n else:\n print('Symbol {} already exists.'.format(symbol))\n",
"step-4": "import json\nimport os\nimport time\nimport urllib.request\nimport pandas as pd\nfrom lib.db.dbutils import check_blacklisted, check_ticker_exists, get_db, update_blacklisted\n\n\ndef get_data(url, delay=20):\n while True:\n df = json.loads(urllib.request.urlopen(url).read())\n if df.get('Note', 0) == 0:\n break\n time.sleep(20)\n return df\n\n\ndef grab_a_ticker(symbol='MSFT', apiKey=None):\n if apiKey is None:\n apiKey = os.environ.get('API_KEY')\n if not check_ticker_exists(symbol) and not check_blacklisted(symbol):\n requestUrl = (\n 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}'\n )\n metaDataUrl = (\n 'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}'\n )\n data = get_data(requestUrl.format(symbol, apiKey))\n metaData = get_data(metaDataUrl.format(symbol, apiKey))\n df = pd.DataFrame(pd.DataFrame(data.get('Time Series (Daily)')).\n transpose()['4. close']).reset_index()\n df.columns = ['Date', 'Price']\n df['Symbol'] = data['Meta Data']['2. Symbol']\n if len(metaData['bestMatches']) > 0:\n met_df = pd.DataFrame(metaData['bestMatches'][0], index=[0])[[\n '1. symbol', '2. name', '3. type', '4. region']].reset_index(\n ).drop(['index'], axis=1)\n met_df.columns = ['Symbol', 'Name', 'Type', 'Region']\n else:\n print(metaData.keys())\n met_df = pd.DataFrame()\n try:\n assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol\n df.to_sql('time_series', con=get_db(), if_exists='append',\n index=False)\n met_df.to_sql('stock_meta_data', con=get_db(), if_exists=\n 'append', index=False)\n except AssertionError as e:\n print(\"'Couldn't get it right with assertion error: {}\".format(\n str(e)))\n update_blacklisted(symbol)\n except Exception as e:\n print(str(e))\n update_blacklisted(symbol)\n else:\n print('Symbol {} already exists.'.format(symbol))\n",
"step-5": "import json\nimport os\nimport time\nimport urllib.request\n\nimport pandas as pd\n\nfrom lib.db.dbutils import (\n check_blacklisted,\n check_ticker_exists,\n get_db,\n update_blacklisted,\n)\n\n\ndef get_data(url, delay=20):\n while True:\n df = json.loads(urllib.request.urlopen(url).read())\n if df.get(\"Note\", 0) == 0:\n break\n time.sleep(20)\n return df\n\n\ndef grab_a_ticker(symbol=\"MSFT\", apiKey=None):\n if apiKey is None:\n apiKey = os.environ.get(\"API_KEY\")\n # Check if ticker already exists in the database\n if not check_ticker_exists(symbol) and not check_blacklisted(symbol):\n requestUrl = r\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={}&outputsize=full&apikey={}\"\n metaDataUrl = r\"https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={}&apikey={}\"\n data = get_data(requestUrl.format(symbol, apiKey))\n metaData = get_data(metaDataUrl.format(symbol, apiKey))\n df = pd.DataFrame(\n pd.DataFrame(data.get(\"Time Series (Daily)\")).transpose()[\n \"4. close\"\n ]\n ).reset_index()\n\n df.columns = [\"Date\", \"Price\"]\n df[\"Symbol\"] = data[\"Meta Data\"][\"2. Symbol\"]\n if len(metaData[\"bestMatches\"]) > 0:\n met_df = (\n pd.DataFrame(metaData[\"bestMatches\"][0], index=[0])[\n [\"1. symbol\", \"2. name\", \"3. type\", \"4. region\"]\n ]\n .reset_index()\n .drop([\"index\"], axis=1)\n )\n met_df.columns = [\"Symbol\", \"Name\", \"Type\", \"Region\"]\n else:\n print(metaData.keys())\n met_df = pd.DataFrame()\n\n try:\n assert met_df.iloc[0, :].Symbol == df.iloc[0, :].Symbol\n df.to_sql(\n \"time_series\", con=get_db(), if_exists=\"append\", index=False\n )\n met_df.to_sql(\n \"stock_meta_data\",\n con=get_db(),\n if_exists=\"append\",\n index=False,\n )\n except AssertionError as e:\n print(\n \"'Couldn't get it right with assertion error: {}\".format(\n str(e)\n )\n )\n update_blacklisted(symbol)\n except Exception as e:\n print(str(e))\n update_blacklisted(symbol)\n else:\n print(\"Symbol {} already exists.\".format(symbol))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Ylist = ['yes', 'Yes', 'Y', 'y']
Nlist = ['no', 'No', 'N', 'n']
America = ['America', 'america', 'amer', 'rica']
TRW = ['1775', 'The Revolutionary war', 'the Revolutionary war', 'the revolutionary war', 'The Revolutionary War',
'trw', 'Trw', 'TRW']
TCW = ['1861', 'The civil war', 'The civil War', 'The Civil war', 'The Civil war', 'The civil War', 'The Civil War',
'TCW', 'tcw', 'Tcw']
TGW = ['1917', 'The Great War', 'the great war', 'the great War', 'the Great war', 'The great war', 'WW1', 'ww1', 'Ww1',
'wW1', 'World War One', 'World war 1']
WW2 = ['1941', 'WW2', 'ww2', 'Ww2', 'W2', 'World war two', 'World war two', 'World War 2', 'World War Two',
'world war two', 'world war two']
# Russia
Russia = ['Russia', 'russia', 'rusia', 'ra', 'Ra', 'Rusia', 'Ru']
RJW = ['1904', 'TRJW', 'trjw']
|
normal
|
{
"blob_id": "6e07dcc3f3b8c7fbf8ce8d481b9612e7496967bd",
"index": 8316,
"step-1": "<mask token>\n",
"step-2": "Ylist = ['yes', 'Yes', 'Y', 'y']\nNlist = ['no', 'No', 'N', 'n']\nAmerica = ['America', 'america', 'amer', 'rica']\nTRW = ['1775', 'The Revolutionary war', 'the Revolutionary war',\n 'the revolutionary war', 'The Revolutionary War', 'trw', 'Trw', 'TRW']\nTCW = ['1861', 'The civil war', 'The civil War', 'The Civil war',\n 'The Civil war', 'The civil War', 'The Civil War', 'TCW', 'tcw', 'Tcw']\nTGW = ['1917', 'The Great War', 'the great war', 'the great War',\n 'the Great war', 'The great war', 'WW1', 'ww1', 'Ww1', 'wW1',\n 'World War One', 'World war 1']\nWW2 = ['1941', 'WW2', 'ww2', 'Ww2', 'W2', 'World war two', 'World war two',\n 'World War 2', 'World War Two', 'world war two', 'world war two']\nRussia = ['Russia', 'russia', 'rusia', 'ra', 'Ra', 'Rusia', 'Ru']\nRJW = ['1904', 'TRJW', 'trjw']\n",
"step-3": "Ylist = ['yes', 'Yes', 'Y', 'y']\r\nNlist = ['no', 'No', 'N', 'n']\r\nAmerica = ['America', 'america', 'amer', 'rica']\r\nTRW = ['1775', 'The Revolutionary war', 'the Revolutionary war', 'the revolutionary war', 'The Revolutionary War',\r\n 'trw', 'Trw', 'TRW']\r\nTCW = ['1861', 'The civil war', 'The civil War', 'The Civil war', 'The Civil war', 'The civil War', 'The Civil War',\r\n 'TCW', 'tcw', 'Tcw']\r\nTGW = ['1917', 'The Great War', 'the great war', 'the great War', 'the Great war', 'The great war', 'WW1', 'ww1', 'Ww1',\r\n 'wW1', 'World War One', 'World war 1']\r\nWW2 = ['1941', 'WW2', 'ww2', 'Ww2', 'W2', 'World war two', 'World war two', 'World War 2', 'World War Two',\r\n 'world war two', 'world war two']\r\n# Russia\r\nRussia = ['Russia', 'russia', 'rusia', 'ra', 'Ra', 'Rusia', 'Ru']\r\nRJW = ['1904', 'TRJW', 'trjw']\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# coding=utf-8
from __future__ import print_function
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
basedir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.append('trainer')
sys.path.append('downloader')
from gen.gen_captcha import gen_dataset, load_templates, candidates
from gen.img_process import grey_to_binary, clear_paper_noise
from model.nn import load_model_nn
from model.common import find_model_ckpt
import tensorflow as tf
from gen.utils import vec2str
import numpy as np
from PIL import Image
from downloader import download
def show_im(dataset):
data = np.uint8(dataset[0]).reshape((30, 96)) * 255
im = Image.fromarray(data)
im.show()
def test_model(captcha):
im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))
im = im.convert('L')
im = grey_to_binary(im)
im = clear_paper_noise(im, 5)
# im.show()
# templates = load_templates(os.path.join('trainer', 'templates'))
model = load_model_nn()
x = model['x']
keep_prob = model['keep_prob']
saver = model['saver']
prediction = model['prediction']
graph = model['graph']
model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint'))
# print("Used the model:", model_ckpt_path)
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
saver.restore(session, model_ckpt_path)
# dataset, labels = gen_dataset(1, templates) # generate one image
dataset = []
dataset.append(np.asarray(im.convert("L")).reshape([30 * 96]) / 255)
label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0}, session=session)[0]
string = ''
for i in range(4):
string += chr(label[i] + ord('0'))
print(string)
if __name__ == "__main__":
if len(sys.argv) <= 1:
captcha = download(1)[0]
else:
captcha = sys.argv[1]
test_model(captcha)
|
normal
|
{
"blob_id": "8e34b5e15c5b6107d6841e7b567abf967c631f1b",
"index": 7440,
"step-1": "<mask token>\n\n\ndef show_im(dataset):\n data = np.uint8(dataset[0]).reshape((30, 96)) * 255\n im = Image.fromarray(data)\n im.show()\n\n\ndef test_model(captcha):\n im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))\n im = im.convert('L')\n im = grey_to_binary(im)\n im = clear_paper_noise(im, 5)\n model = load_model_nn()\n x = model['x']\n keep_prob = model['keep_prob']\n saver = model['saver']\n prediction = model['prediction']\n graph = model['graph']\n model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint')\n )\n with tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n saver.restore(session, model_ckpt_path)\n dataset = []\n dataset.append(np.asarray(im.convert('L')).reshape([30 * 96]) / 255)\n label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0},\n session=session)[0]\n string = ''\n for i in range(4):\n string += chr(label[i] + ord('0'))\n print(string)\n\n\n<mask token>\n",
"step-2": "<mask token>\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append('trainer')\nsys.path.append('downloader')\n<mask token>\n\n\ndef show_im(dataset):\n data = np.uint8(dataset[0]).reshape((30, 96)) * 255\n im = Image.fromarray(data)\n im.show()\n\n\ndef test_model(captcha):\n im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))\n im = im.convert('L')\n im = grey_to_binary(im)\n im = clear_paper_noise(im, 5)\n model = load_model_nn()\n x = model['x']\n keep_prob = model['keep_prob']\n saver = model['saver']\n prediction = model['prediction']\n graph = model['graph']\n model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint')\n )\n with tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n saver.restore(session, model_ckpt_path)\n dataset = []\n dataset.append(np.asarray(im.convert('L')).reshape([30 * 96]) / 255)\n label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0},\n session=session)[0]\n string = ''\n for i in range(4):\n string += chr(label[i] + ord('0'))\n print(string)\n\n\nif __name__ == '__main__':\n if len(sys.argv) <= 1:\n captcha = download(1)[0]\n else:\n captcha = sys.argv[1]\n test_model(captcha)\n",
"step-3": "<mask token>\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nbasedir = os.getcwd()\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append('trainer')\nsys.path.append('downloader')\n<mask token>\n\n\ndef show_im(dataset):\n data = np.uint8(dataset[0]).reshape((30, 96)) * 255\n im = Image.fromarray(data)\n im.show()\n\n\ndef test_model(captcha):\n im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))\n im = im.convert('L')\n im = grey_to_binary(im)\n im = clear_paper_noise(im, 5)\n model = load_model_nn()\n x = model['x']\n keep_prob = model['keep_prob']\n saver = model['saver']\n prediction = model['prediction']\n graph = model['graph']\n model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint')\n )\n with tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n saver.restore(session, model_ckpt_path)\n dataset = []\n dataset.append(np.asarray(im.convert('L')).reshape([30 * 96]) / 255)\n label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0},\n session=session)[0]\n string = ''\n for i in range(4):\n string += chr(label[i] + ord('0'))\n print(string)\n\n\nif __name__ == '__main__':\n if len(sys.argv) <= 1:\n captcha = download(1)[0]\n else:\n captcha = sys.argv[1]\n test_model(captcha)\n",
"step-4": "from __future__ import print_function\nimport os\nimport sys\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nbasedir = os.getcwd()\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append('trainer')\nsys.path.append('downloader')\nfrom gen.gen_captcha import gen_dataset, load_templates, candidates\nfrom gen.img_process import grey_to_binary, clear_paper_noise\nfrom model.nn import load_model_nn\nfrom model.common import find_model_ckpt\nimport tensorflow as tf\nfrom gen.utils import vec2str\nimport numpy as np\nfrom PIL import Image\nfrom downloader import download\n\n\ndef show_im(dataset):\n data = np.uint8(dataset[0]).reshape((30, 96)) * 255\n im = Image.fromarray(data)\n im.show()\n\n\ndef test_model(captcha):\n im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))\n im = im.convert('L')\n im = grey_to_binary(im)\n im = clear_paper_noise(im, 5)\n model = load_model_nn()\n x = model['x']\n keep_prob = model['keep_prob']\n saver = model['saver']\n prediction = model['prediction']\n graph = model['graph']\n model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint')\n )\n with tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n saver.restore(session, model_ckpt_path)\n dataset = []\n dataset.append(np.asarray(im.convert('L')).reshape([30 * 96]) / 255)\n label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0},\n session=session)[0]\n string = ''\n for i in range(4):\n string += chr(label[i] + ord('0'))\n print(string)\n\n\nif __name__ == '__main__':\n if len(sys.argv) <= 1:\n captcha = download(1)[0]\n else:\n captcha = sys.argv[1]\n test_model(captcha)\n",
"step-5": "# coding=utf-8\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nbasedir = os.getcwd()\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append('trainer')\nsys.path.append('downloader')\n\nfrom gen.gen_captcha import gen_dataset, load_templates, candidates\nfrom gen.img_process import grey_to_binary, clear_paper_noise\nfrom model.nn import load_model_nn\nfrom model.common import find_model_ckpt\nimport tensorflow as tf\nfrom gen.utils import vec2str\nimport numpy as np\nfrom PIL import Image\nfrom downloader import download\n\ndef show_im(dataset):\n data = np.uint8(dataset[0]).reshape((30, 96)) * 255\n im = Image.fromarray(data)\n im.show()\n\ndef test_model(captcha):\n im = Image.open(os.path.join(basedir, 'downloader', 'captchas', captcha))\n im = im.convert('L')\n im = grey_to_binary(im)\n im = clear_paper_noise(im, 5)\n # im.show()\n # templates = load_templates(os.path.join('trainer', 'templates'))\n\n model = load_model_nn()\n x = model['x']\n keep_prob = model['keep_prob']\n saver = model['saver']\n prediction = model['prediction']\n graph = model['graph']\n model_ckpt_path, _ = find_model_ckpt(os.path.join('trainer', '.checkpoint'))\n # print(\"Used the model:\", model_ckpt_path)\n\n\n with tf.Session(graph=graph) as session:\n tf.global_variables_initializer().run()\n saver.restore(session, model_ckpt_path)\n\n # dataset, labels = gen_dataset(1, templates) # generate one image\n dataset = []\n dataset.append(np.asarray(im.convert(\"L\")).reshape([30 * 96]) / 255)\n\n label = prediction.eval(feed_dict={x: dataset, keep_prob: 1.0}, session=session)[0]\n string = ''\n for i in range(4):\n string += chr(label[i] + ord('0'))\n print(string)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) <= 1:\n captcha = download(1)[0]\n else:\n captcha = sys.argv[1]\n test_model(captcha)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from app01 import models
from rest_framework.views import APIView
# from api.utils.response import BaseResponse
from rest_framework.response import Response
from rest_framework.pagination import PageNumberPagination
from api.serializers.course import DegreeCourseSerializer
# 查询所有学位课程
class DegreeCourseView(APIView):
def get(self,request,*args,**kwargs):
response = {'code':100,'data':None,'error':None}
try:
# 从数据库获取数据
degreecourse_list = models.DegreeCourse.objects.all()
# 分页
# page = PageNumberPagination()
# course_list = page.paginate_queryset(queryset,request,self)
# 分页之后的结果执行序列化
ser_obj = DegreeCourseSerializer(degreecourse_list,many=True)
response['data'] = ser_obj.data
except Exception as e:
response['error'] = '获取数据失败'
return Response(response)
class DegreeCourseDetailView(APIView):
def get(self, request, pk, *args, **kwargs):
response = {'code': 100, 'data': None, 'error': None}
try:
degree_course = models.DegreeCourse.objects.filter(id=pk).first()
ser = DegreeCourseSerializer(degree_course)
response['data'] = ser.data
except Exception as e:
response['code'] = 500
response['error'] = '获取数据失败'
return Response(response)
|
normal
|
{
"blob_id": "2b3f8b1ac4735785683c00f6e6ced85d201de53f",
"index": 8567,
"step-1": "<mask token>\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n",
"step-2": "<mask token>\n\n\nclass DegreeCourseView(APIView):\n <mask token>\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n",
"step-3": "<mask token>\n\n\nclass DegreeCourseView(APIView):\n\n def get(self, request, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degreecourse_list = models.DegreeCourse.objects.all()\n ser_obj = DegreeCourseSerializer(degreecourse_list, many=True)\n response['data'] = ser_obj.data\n except Exception as e:\n response['error'] = '获取数据失败'\n return Response(response)\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n",
"step-4": "from app01 import models\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import PageNumberPagination\nfrom api.serializers.course import DegreeCourseSerializer\n\n\nclass DegreeCourseView(APIView):\n\n def get(self, request, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degreecourse_list = models.DegreeCourse.objects.all()\n ser_obj = DegreeCourseSerializer(degreecourse_list, many=True)\n response['data'] = ser_obj.data\n except Exception as e:\n response['error'] = '获取数据失败'\n return Response(response)\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n",
"step-5": "from app01 import models\nfrom rest_framework.views import APIView\n# from api.utils.response import BaseResponse\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import PageNumberPagination\nfrom api.serializers.course import DegreeCourseSerializer\n\n\n# 查询所有学位课程\n\nclass DegreeCourseView(APIView):\n\n def get(self,request,*args,**kwargs):\n response = {'code':100,'data':None,'error':None}\n\n try:\n # 从数据库获取数据\n degreecourse_list = models.DegreeCourse.objects.all()\n\n # 分页\n # page = PageNumberPagination()\n # course_list = page.paginate_queryset(queryset,request,self)\n\n # 分页之后的结果执行序列化\n ser_obj = DegreeCourseSerializer(degreecourse_list,many=True)\n\n response['data'] = ser_obj.data\n except Exception as e:\n\n response['error'] = '获取数据失败'\n\n return Response(response)\n\n\n\n\nclass DegreeCourseDetailView(APIView):\n\n def get(self, request, pk, *args, **kwargs):\n response = {'code': 100, 'data': None, 'error': None}\n try:\n degree_course = models.DegreeCourse.objects.filter(id=pk).first()\n\n ser = DegreeCourseSerializer(degree_course)\n response['data'] = ser.data\n except Exception as e:\n response['code'] = 500\n response['error'] = '获取数据失败'\n return Response(response)\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.contrib.auth.models import User
from rest_framework.serializers import ModelSerializer
from app_calendar.models import Holiday, Country, Event, User
class CountrySerializer(ModelSerializer):
class Meta:
model = Country
fields = '__all__'
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = '__all__'
class EventSerializer(ModelSerializer):
class Meta:
model = Event
fields = '__all__'
class HolidaySerializerRead(ModelSerializer):
country = CountrySerializer()
class Meta:
model = Holiday
fields = '__all__'
class HolidaySerializerWrite(ModelSerializer):
class Meta:
model = Holiday
fields = '__all__'
|
normal
|
{
"blob_id": "5b366b0f6813f686600df9da4a17f190f034a10c",
"index": 2046,
"step-1": "<mask token>\n\n\nclass EventSerializer(ModelSerializer):\n\n\n class Meta:\n model = Event\n fields = '__all__'\n\n\nclass HolidaySerializerRead(ModelSerializer):\n country = CountrySerializer()\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n\n\nclass HolidaySerializerWrite(ModelSerializer):\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n",
"step-2": "<mask token>\n\n\nclass UserSerializer(ModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n\nclass EventSerializer(ModelSerializer):\n\n\n class Meta:\n model = Event\n fields = '__all__'\n\n\nclass HolidaySerializerRead(ModelSerializer):\n country = CountrySerializer()\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n\n\nclass HolidaySerializerWrite(ModelSerializer):\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n",
"step-3": "<mask token>\n\n\nclass CountrySerializer(ModelSerializer):\n\n\n class Meta:\n model = Country\n fields = '__all__'\n\n\nclass UserSerializer(ModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n\nclass EventSerializer(ModelSerializer):\n\n\n class Meta:\n model = Event\n fields = '__all__'\n\n\nclass HolidaySerializerRead(ModelSerializer):\n country = CountrySerializer()\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n\n\nclass HolidaySerializerWrite(ModelSerializer):\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n",
"step-4": "from django.contrib.auth.models import User\nfrom rest_framework.serializers import ModelSerializer\nfrom app_calendar.models import Holiday, Country, Event, User\n\n\nclass CountrySerializer(ModelSerializer):\n\n\n class Meta:\n model = Country\n fields = '__all__'\n\n\nclass UserSerializer(ModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n\nclass EventSerializer(ModelSerializer):\n\n\n class Meta:\n model = Event\n fields = '__all__'\n\n\nclass HolidaySerializerRead(ModelSerializer):\n country = CountrySerializer()\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n\n\nclass HolidaySerializerWrite(ModelSerializer):\n\n\n class Meta:\n model = Holiday\n fields = '__all__'\n",
"step-5": null,
"step-ids": [
4,
5,
6,
7
]
}
|
[
4,
5,
6,
7
] |
"""
时间最优
思路:
将和为目标值的那 两个 整数定义为 num1 和 num2
创建一个新字典,内容存在数组中的数字及索引
将数组nums转换为字典,
遍历字典, num1为字典中的元素(其实与数组总的元素一样),
num2 为 target减去num1, 判定num2是否在字典中,如果存在,返回字典中num2的值(也就是在数组nums中的下标)和 i(也就是num1在数组中的下标)
如果不存在,设置字典num1的值为i
"""
def two_sum(nums, target):
dct = {}
for i, num1 in enumerate(nums):
num2 = target - num1
if num2 in dct:
return [dct[num2], i]
dct[num1] = i
print(two_sum([14, 2, 31, 4], 6))
|
normal
|
{
"blob_id": "dac8dbb0eba78d4f8dfbe3284325735324a87dc2",
"index": 8674,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef two_sum(nums, target):\n dct = {}\n for i, num1 in enumerate(nums):\n num2 = target - num1\n if num2 in dct:\n return [dct[num2], i]\n dct[num1] = i\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef two_sum(nums, target):\n dct = {}\n for i, num1 in enumerate(nums):\n num2 = target - num1\n if num2 in dct:\n return [dct[num2], i]\n dct[num1] = i\n\n\nprint(two_sum([14, 2, 31, 4], 6))\n",
"step-4": "\"\"\"\r\n时间最优\r\n\r\n思路:\r\n将和为目标值的那 两个 整数定义为 num1 和 num2\r\n创建一个新字典,内容存在数组中的数字及索引\r\n将数组nums转换为字典,\r\n遍历字典, num1为字典中的元素(其实与数组总的元素一样),\r\nnum2 为 target减去num1, 判定num2是否在字典中,如果存在,返回字典中num2的值(也就是在数组nums中的下标)和 i(也就是num1在数组中的下标)\r\n如果不存在,设置字典num1的值为i\r\n\"\"\"\r\n\r\ndef two_sum(nums, target):\r\n dct = {}\r\n for i, num1 in enumerate(nums):\r\n num2 = target - num1\r\n if num2 in dct:\r\n return [dct[num2], i]\r\n dct[num1] = i\r\n\r\n\r\nprint(two_sum([14, 2, 31, 4], 6))\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [path('', views.PostList.as_view(), name='blog_index'), path(
'<slug:slug>/', views.post_detail, name='post_detail'), path(
'tag/<slug:slug>/', views.TagIndexView.as_view(), name='tag')]
|
normal
|
{
"blob_id": "09ea684cfb6f0a521d3bdadf977d9385636bdc83",
"index": 7150,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.PostList.as_view(), name='blog_index'), path(\n '<slug:slug>/', views.post_detail, name='post_detail'), path(\n 'tag/<slug:slug>/', views.TagIndexView.as_view(), name='tag')]\n",
"step-3": "from django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\nurlpatterns = [path('', views.PostList.as_view(), name='blog_index'), path(\n '<slug:slug>/', views.post_detail, name='post_detail'), path(\n 'tag/<slug:slug>/', views.TagIndexView.as_view(), name='tag')]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import socket
import time
import sys
def main():
if len(sys.argv) != 2:
print("usage : %s port")
sys.exit()
port = int(sys.argv[1])
count = 0
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.settimeout(2)
sock.bind(('', port))
sock.sendto(bytes("IBORN", "utf-8"), ('255.255.255.255', port))
lifetime = time.time() + 10
while time.time() < lifetime:
try:
message, address = sock.recvfrom(1024)
message = message.decode("utf-8")
print("Message : %s from : %s" % (message, str(address)))
if message == "IBORN":
sock.sendto(bytes("ILIVE", "utf-8"), address)
print(address)
me = (socket.gethostbyname(socket.gethostname()), sock.getsockname()[1])
if address != me:
count += 1
print("Current count of copies : %s" % count)
elif message == "ILIVE":
if address != me:
count += 1
print("Current count of copies : %s" % count)
elif message == "IEXIT":
if address != me:
count -= 1
print("Current count of copies : %s" % count)
except socket.timeout:
print("No new messages in 2 seconds.")
time.sleep(1)
sock.sendto(bytes("IEXIT", "utf-8"), ('255.255.255.255', port))
print("Count at exit : %s" % count)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "68b9f7317f7c6dcda791338ee642dffb653ac694",
"index": 4804,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n if len(sys.argv) != 2:\n print('usage : %s port')\n sys.exit()\n port = int(sys.argv[1])\n count = 0\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sock.settimeout(2)\n sock.bind(('', port))\n sock.sendto(bytes('IBORN', 'utf-8'), ('255.255.255.255', port))\n lifetime = time.time() + 10\n while time.time() < lifetime:\n try:\n message, address = sock.recvfrom(1024)\n message = message.decode('utf-8')\n print('Message : %s from : %s' % (message, str(address)))\n if message == 'IBORN':\n sock.sendto(bytes('ILIVE', 'utf-8'), address)\n print(address)\n me = socket.gethostbyname(socket.gethostname()\n ), sock.getsockname()[1]\n if address != me:\n count += 1\n print('Current count of copies : %s' % count)\n elif message == 'ILIVE':\n if address != me:\n count += 1\n print('Current count of copies : %s' % count)\n elif message == 'IEXIT':\n if address != me:\n count -= 1\n print('Current count of copies : %s' % count)\n except socket.timeout:\n print('No new messages in 2 seconds.')\n time.sleep(1)\n sock.sendto(bytes('IEXIT', 'utf-8'), ('255.255.255.255', port))\n print('Count at exit : %s' % count)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n if len(sys.argv) != 2:\n print('usage : %s port')\n sys.exit()\n port = int(sys.argv[1])\n count = 0\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sock.settimeout(2)\n sock.bind(('', port))\n sock.sendto(bytes('IBORN', 'utf-8'), ('255.255.255.255', port))\n lifetime = time.time() + 10\n while time.time() < lifetime:\n try:\n message, address = sock.recvfrom(1024)\n message = message.decode('utf-8')\n print('Message : %s from : %s' % (message, str(address)))\n if message == 'IBORN':\n sock.sendto(bytes('ILIVE', 'utf-8'), address)\n print(address)\n me = socket.gethostbyname(socket.gethostname()\n ), sock.getsockname()[1]\n if address != me:\n count += 1\n print('Current count of copies : %s' % count)\n elif message == 'ILIVE':\n if address != me:\n count += 1\n print('Current count of copies : %s' % count)\n elif message == 'IEXIT':\n if address != me:\n count -= 1\n print('Current count of copies : %s' % count)\n except socket.timeout:\n print('No new messages in 2 seconds.')\n time.sleep(1)\n sock.sendto(bytes('IEXIT', 'utf-8'), ('255.255.255.255', port))\n print('Count at exit : %s' % count)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import socket\nimport time\nimport sys\n\n\ndef main():\n if len(sys.argv) != 2:\n print('usage : %s port')\n sys.exit()\n port = int(sys.argv[1])\n count = 0\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sock.settimeout(2)\n sock.bind(('', port))\n sock.sendto(bytes('IBORN', 'utf-8'), ('255.255.255.255', port))\n lifetime = time.time() + 10\n while time.time() < lifetime:\n try:\n message, address = sock.recvfrom(1024)\n message = message.decode('utf-8')\n print('Message : %s from : %s' % (message, str(address)))\n if message == 'IBORN':\n sock.sendto(bytes('ILIVE', 'utf-8'), address)\n print(address)\n me = socket.gethostbyname(socket.gethostname()\n ), sock.getsockname()[1]\n if address != me:\n count += 1\n print('Current count of copies : %s' % count)\n elif message == 'ILIVE':\n if address != me:\n count += 1\n print('Current count of copies : %s' % count)\n elif message == 'IEXIT':\n if address != me:\n count -= 1\n print('Current count of copies : %s' % count)\n except socket.timeout:\n print('No new messages in 2 seconds.')\n time.sleep(1)\n sock.sendto(bytes('IEXIT', 'utf-8'), ('255.255.255.255', port))\n print('Count at exit : %s' % count)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import socket\nimport time\nimport sys\n\n\ndef main():\n if len(sys.argv) != 2:\n print(\"usage : %s port\")\n sys.exit()\n port = int(sys.argv[1])\n count = 0\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sock.settimeout(2)\n sock.bind(('', port))\n\n sock.sendto(bytes(\"IBORN\", \"utf-8\"), ('255.255.255.255', port))\n lifetime = time.time() + 10\n while time.time() < lifetime:\n try:\n message, address = sock.recvfrom(1024)\n message = message.decode(\"utf-8\")\n print(\"Message : %s from : %s\" % (message, str(address)))\n if message == \"IBORN\":\n sock.sendto(bytes(\"ILIVE\", \"utf-8\"), address)\n print(address)\n me = (socket.gethostbyname(socket.gethostname()), sock.getsockname()[1])\n if address != me:\n count += 1\n print(\"Current count of copies : %s\" % count)\n elif message == \"ILIVE\":\n if address != me:\n count += 1\n print(\"Current count of copies : %s\" % count)\n elif message == \"IEXIT\":\n if address != me:\n count -= 1\n print(\"Current count of copies : %s\" % count)\n except socket.timeout:\n print(\"No new messages in 2 seconds.\")\n time.sleep(1)\n sock.sendto(bytes(\"IEXIT\", \"utf-8\"), ('255.255.255.255', port))\n print(\"Count at exit : %s\" % count)\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# encoding: utf-8
"""
plot: regularization on x axis, number of k_best features on y
Created by on 2012-01-27.
Copyright (c) 2012. All rights reserved.
"""
import sys
import os
import json
import numpy as np
import pylab as plt
import itertools as it
from master.libs import plot_lib as plib
from master.libs import read_data_lib as rdl
from master.libs import utils
import matplotlib.gridspec as gridspec
reload(plib)
reload(rdl)
params = {'axes.labelsize': 6,
'font.size': 6,
'legend.fontsize': 7,
'xtick.labelsize':6,
'ytick.labelsize': 6}
plt.rcParams.update(params)
config = json.load(open(sys.argv[1]))
outpath = os.path.join(config['inpath'], 'plots')
if not os.path.exists(outpath):
os.mkdir(outpath)
# variables for results
plt.close('all')
search_res, max_overview, sc, _ = rdl.read_paramsearch_results(config['inpath'],
p_selection=config.get('selection', {}))
if config['plot_param_space']:
for desc in search_res:
fig = plt.figure()
plib.plot_search_matrix(fig, search_res[desc], config['fselection'],
config['method'], config.get('glomeruli', []))
fig.savefig(os.path.join(outpath, config['method'] + '_' + desc + '.' + config['format']))
# descriptor method performance plots
fig = plt.figure(figsize=(3.35, 2))
ptype = config['descriptor_plot_type']
plib.new_descriptor_performance_plot(fig, max_overview, config['fselection'],
config['method'],
config.get('glomeruli', []),
ptype)
fig.subplots_adjust(bottom=0.25)
fig.savefig(os.path.join(outpath, ptype + '_desc_comparison.' + config['format']), dpi=600)
# ML method comparison plot
markers = ['1', '0']
desc2comp = ['EVA_100', 'all']
fig = plt.figure(figsize=(3.35, 1.8))
ax = fig.add_subplot(111)
desc1_collect, desc2_collect = [], []
for i, desc in enumerate(desc2comp):
desc_idx1 = max_overview['svr']['linear']['desc_names'].index(desc)
desc_idx2 = max_overview['forest']['forest']['desc_names'].index(desc)
desc1_collect.extend(max_overview['svr']['linear']['p_selection'][desc_idx1, :])
desc2_collect.extend(max_overview['forest']['forest']['p_selection'][desc_idx2, :])
ax.plot(max_overview['svr']['linear']['p_selection'][desc_idx1, :],
max_overview['forest']['forest']['p_selection'][desc_idx2, :],
'o', mfc=markers[i],
label=desc,
markersize=5)
ax.plot([0, 0.8], [0, 0.8], color='0.5')
plt.axis('scaled')
ax.set_xlim([0, .9])
ax.set_ylim([0, .9])
ax.set_xlabel('SVR (q2)')
ax.set_ylabel('RFR (q2)')
utils.simple_axis(ax)
ax.legend(loc='upper left', numpoints=1, frameon=False, prop={'size': 'small'}, bbox_to_anchor=(0.01, 1))
ticks = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
ticklabels = ['0', '', '.2', '', '.4', '', '.6', '', '.8', '']
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
fig.subplots_adjust(bottom=0.2)
fig.tight_layout()
fig.savefig(os.path.join(outpath, 'best_method_comparison.' + config['format']), dpi=600)
assert len(desc1_collect) == len(desc2_collect)
svr_better = np.sum([1 for d1, d2 in zip(desc1_collect, desc2_collect) if d1 > d2])
rfr_better = np.sum([1 for d1, d2 in zip(desc1_collect, desc2_collect) if d1 < d2])
ratio = float(svr_better) / (np.sum(rfr_better) + np.sum(svr_better))
print('svr better than rfr in {:.2f} \% of the cases'.format(ratio))
if utils.run_from_ipython():
plt.show()
|
normal
|
{
"blob_id": "c5bbfa1a86dbbd431566205ff7d7b941bdceff58",
"index": 1233,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nreload(plib)\nreload(rdl)\n<mask token>\nplt.rcParams.update(params)\n<mask token>\nif not os.path.exists(outpath):\n os.mkdir(outpath)\nplt.close('all')\n<mask token>\nif config['plot_param_space']:\n for desc in search_res:\n fig = plt.figure()\n plib.plot_search_matrix(fig, search_res[desc], config['fselection'],\n config['method'], config.get('glomeruli', []))\n fig.savefig(os.path.join(outpath, config['method'] + '_' + desc +\n '.' + config['format']))\n<mask token>\nplib.new_descriptor_performance_plot(fig, max_overview, config['fselection'\n ], config['method'], config.get('glomeruli', []), ptype)\nfig.subplots_adjust(bottom=0.25)\nfig.savefig(os.path.join(outpath, ptype + '_desc_comparison.' + config[\n 'format']), dpi=600)\n<mask token>\nfor i, desc in enumerate(desc2comp):\n desc_idx1 = max_overview['svr']['linear']['desc_names'].index(desc)\n desc_idx2 = max_overview['forest']['forest']['desc_names'].index(desc)\n desc1_collect.extend(max_overview['svr']['linear']['p_selection'][\n desc_idx1, :])\n desc2_collect.extend(max_overview['forest']['forest']['p_selection'][\n desc_idx2, :])\n ax.plot(max_overview['svr']['linear']['p_selection'][desc_idx1, :],\n max_overview['forest']['forest']['p_selection'][desc_idx2, :], 'o',\n mfc=markers[i], label=desc, markersize=5)\nax.plot([0, 0.8], [0, 0.8], color='0.5')\nplt.axis('scaled')\nax.set_xlim([0, 0.9])\nax.set_ylim([0, 0.9])\nax.set_xlabel('SVR (q2)')\nax.set_ylabel('RFR (q2)')\nutils.simple_axis(ax)\nax.legend(loc='upper left', numpoints=1, frameon=False, prop={'size':\n 'small'}, bbox_to_anchor=(0.01, 1))\n<mask token>\nax.set_yticks(ticks)\nax.set_yticklabels(ticklabels)\nax.set_xticks(ticks)\nax.set_xticklabels(ticklabels)\nfig.subplots_adjust(bottom=0.2)\nfig.tight_layout()\nfig.savefig(os.path.join(outpath, 'best_method_comparison.' + config[\n 'format']), dpi=600)\nassert len(desc1_collect) == len(desc2_collect)\n<mask token>\nprint('svr better than rfr in {:.2f} \\\\% of the cases'.format(ratio))\nif utils.run_from_ipython():\n plt.show()\n",
"step-3": "<mask token>\nreload(plib)\nreload(rdl)\nparams = {'axes.labelsize': 6, 'font.size': 6, 'legend.fontsize': 7,\n 'xtick.labelsize': 6, 'ytick.labelsize': 6}\nplt.rcParams.update(params)\nconfig = json.load(open(sys.argv[1]))\noutpath = os.path.join(config['inpath'], 'plots')\nif not os.path.exists(outpath):\n os.mkdir(outpath)\nplt.close('all')\nsearch_res, max_overview, sc, _ = rdl.read_paramsearch_results(config[\n 'inpath'], p_selection=config.get('selection', {}))\nif config['plot_param_space']:\n for desc in search_res:\n fig = plt.figure()\n plib.plot_search_matrix(fig, search_res[desc], config['fselection'],\n config['method'], config.get('glomeruli', []))\n fig.savefig(os.path.join(outpath, config['method'] + '_' + desc +\n '.' + config['format']))\nfig = plt.figure(figsize=(3.35, 2))\nptype = config['descriptor_plot_type']\nplib.new_descriptor_performance_plot(fig, max_overview, config['fselection'\n ], config['method'], config.get('glomeruli', []), ptype)\nfig.subplots_adjust(bottom=0.25)\nfig.savefig(os.path.join(outpath, ptype + '_desc_comparison.' + config[\n 'format']), dpi=600)\nmarkers = ['1', '0']\ndesc2comp = ['EVA_100', 'all']\nfig = plt.figure(figsize=(3.35, 1.8))\nax = fig.add_subplot(111)\ndesc1_collect, desc2_collect = [], []\nfor i, desc in enumerate(desc2comp):\n desc_idx1 = max_overview['svr']['linear']['desc_names'].index(desc)\n desc_idx2 = max_overview['forest']['forest']['desc_names'].index(desc)\n desc1_collect.extend(max_overview['svr']['linear']['p_selection'][\n desc_idx1, :])\n desc2_collect.extend(max_overview['forest']['forest']['p_selection'][\n desc_idx2, :])\n ax.plot(max_overview['svr']['linear']['p_selection'][desc_idx1, :],\n max_overview['forest']['forest']['p_selection'][desc_idx2, :], 'o',\n mfc=markers[i], label=desc, markersize=5)\nax.plot([0, 0.8], [0, 0.8], color='0.5')\nplt.axis('scaled')\nax.set_xlim([0, 0.9])\nax.set_ylim([0, 0.9])\nax.set_xlabel('SVR (q2)')\nax.set_ylabel('RFR (q2)')\nutils.simple_axis(ax)\nax.legend(loc='upper left', numpoints=1, frameon=False, prop={'size':\n 'small'}, bbox_to_anchor=(0.01, 1))\nticks = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\nticklabels = ['0', '', '.2', '', '.4', '', '.6', '', '.8', '']\nax.set_yticks(ticks)\nax.set_yticklabels(ticklabels)\nax.set_xticks(ticks)\nax.set_xticklabels(ticklabels)\nfig.subplots_adjust(bottom=0.2)\nfig.tight_layout()\nfig.savefig(os.path.join(outpath, 'best_method_comparison.' + config[\n 'format']), dpi=600)\nassert len(desc1_collect) == len(desc2_collect)\nsvr_better = np.sum([(1) for d1, d2 in zip(desc1_collect, desc2_collect) if\n d1 > d2])\nrfr_better = np.sum([(1) for d1, d2 in zip(desc1_collect, desc2_collect) if\n d1 < d2])\nratio = float(svr_better) / (np.sum(rfr_better) + np.sum(svr_better))\nprint('svr better than rfr in {:.2f} \\\\% of the cases'.format(ratio))\nif utils.run_from_ipython():\n plt.show()\n",
"step-4": "<mask token>\nimport sys\nimport os\nimport json\nimport numpy as np\nimport pylab as plt\nimport itertools as it\nfrom master.libs import plot_lib as plib\nfrom master.libs import read_data_lib as rdl\nfrom master.libs import utils\nimport matplotlib.gridspec as gridspec\nreload(plib)\nreload(rdl)\nparams = {'axes.labelsize': 6, 'font.size': 6, 'legend.fontsize': 7,\n 'xtick.labelsize': 6, 'ytick.labelsize': 6}\nplt.rcParams.update(params)\nconfig = json.load(open(sys.argv[1]))\noutpath = os.path.join(config['inpath'], 'plots')\nif not os.path.exists(outpath):\n os.mkdir(outpath)\nplt.close('all')\nsearch_res, max_overview, sc, _ = rdl.read_paramsearch_results(config[\n 'inpath'], p_selection=config.get('selection', {}))\nif config['plot_param_space']:\n for desc in search_res:\n fig = plt.figure()\n plib.plot_search_matrix(fig, search_res[desc], config['fselection'],\n config['method'], config.get('glomeruli', []))\n fig.savefig(os.path.join(outpath, config['method'] + '_' + desc +\n '.' + config['format']))\nfig = plt.figure(figsize=(3.35, 2))\nptype = config['descriptor_plot_type']\nplib.new_descriptor_performance_plot(fig, max_overview, config['fselection'\n ], config['method'], config.get('glomeruli', []), ptype)\nfig.subplots_adjust(bottom=0.25)\nfig.savefig(os.path.join(outpath, ptype + '_desc_comparison.' + config[\n 'format']), dpi=600)\nmarkers = ['1', '0']\ndesc2comp = ['EVA_100', 'all']\nfig = plt.figure(figsize=(3.35, 1.8))\nax = fig.add_subplot(111)\ndesc1_collect, desc2_collect = [], []\nfor i, desc in enumerate(desc2comp):\n desc_idx1 = max_overview['svr']['linear']['desc_names'].index(desc)\n desc_idx2 = max_overview['forest']['forest']['desc_names'].index(desc)\n desc1_collect.extend(max_overview['svr']['linear']['p_selection'][\n desc_idx1, :])\n desc2_collect.extend(max_overview['forest']['forest']['p_selection'][\n desc_idx2, :])\n ax.plot(max_overview['svr']['linear']['p_selection'][desc_idx1, :],\n max_overview['forest']['forest']['p_selection'][desc_idx2, :], 'o',\n mfc=markers[i], label=desc, markersize=5)\nax.plot([0, 0.8], [0, 0.8], color='0.5')\nplt.axis('scaled')\nax.set_xlim([0, 0.9])\nax.set_ylim([0, 0.9])\nax.set_xlabel('SVR (q2)')\nax.set_ylabel('RFR (q2)')\nutils.simple_axis(ax)\nax.legend(loc='upper left', numpoints=1, frameon=False, prop={'size':\n 'small'}, bbox_to_anchor=(0.01, 1))\nticks = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\nticklabels = ['0', '', '.2', '', '.4', '', '.6', '', '.8', '']\nax.set_yticks(ticks)\nax.set_yticklabels(ticklabels)\nax.set_xticks(ticks)\nax.set_xticklabels(ticklabels)\nfig.subplots_adjust(bottom=0.2)\nfig.tight_layout()\nfig.savefig(os.path.join(outpath, 'best_method_comparison.' + config[\n 'format']), dpi=600)\nassert len(desc1_collect) == len(desc2_collect)\nsvr_better = np.sum([(1) for d1, d2 in zip(desc1_collect, desc2_collect) if\n d1 > d2])\nrfr_better = np.sum([(1) for d1, d2 in zip(desc1_collect, desc2_collect) if\n d1 < d2])\nratio = float(svr_better) / (np.sum(rfr_better) + np.sum(svr_better))\nprint('svr better than rfr in {:.2f} \\\\% of the cases'.format(ratio))\nif utils.run_from_ipython():\n plt.show()\n",
"step-5": "#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n plot: regularization on x axis, number of k_best features on y\n\nCreated by on 2012-01-27.\nCopyright (c) 2012. All rights reserved.\n\"\"\"\nimport sys\nimport os\nimport json\nimport numpy as np\nimport pylab as plt\nimport itertools as it\nfrom master.libs import plot_lib as plib\nfrom master.libs import read_data_lib as rdl\nfrom master.libs import utils\nimport matplotlib.gridspec as gridspec\nreload(plib)\nreload(rdl)\n\n\nparams = {'axes.labelsize': 6,\n 'font.size': 6,\n 'legend.fontsize': 7,\n 'xtick.labelsize':6,\n 'ytick.labelsize': 6}\nplt.rcParams.update(params)\n\nconfig = json.load(open(sys.argv[1]))\noutpath = os.path.join(config['inpath'], 'plots')\nif not os.path.exists(outpath):\n os.mkdir(outpath)\n\n# variables for results\nplt.close('all')\nsearch_res, max_overview, sc, _ = rdl.read_paramsearch_results(config['inpath'],\n p_selection=config.get('selection', {}))\n\nif config['plot_param_space']:\n for desc in search_res:\n fig = plt.figure()\n plib.plot_search_matrix(fig, search_res[desc], config['fselection'],\n config['method'], config.get('glomeruli', []))\n fig.savefig(os.path.join(outpath, config['method'] + '_' + desc + '.' + config['format']))\n\n# descriptor method performance plots\nfig = plt.figure(figsize=(3.35, 2))\nptype = config['descriptor_plot_type']\nplib.new_descriptor_performance_plot(fig, max_overview, config['fselection'],\n config['method'],\n config.get('glomeruli', []),\n ptype)\nfig.subplots_adjust(bottom=0.25)\nfig.savefig(os.path.join(outpath, ptype + '_desc_comparison.' + config['format']), dpi=600)\n\n\n# ML method comparison plot\nmarkers = ['1', '0']\ndesc2comp = ['EVA_100', 'all']\nfig = plt.figure(figsize=(3.35, 1.8))\nax = fig.add_subplot(111)\ndesc1_collect, desc2_collect = [], []\nfor i, desc in enumerate(desc2comp):\n desc_idx1 = max_overview['svr']['linear']['desc_names'].index(desc)\n desc_idx2 = max_overview['forest']['forest']['desc_names'].index(desc)\n desc1_collect.extend(max_overview['svr']['linear']['p_selection'][desc_idx1, :])\n desc2_collect.extend(max_overview['forest']['forest']['p_selection'][desc_idx2, :])\n ax.plot(max_overview['svr']['linear']['p_selection'][desc_idx1, :],\n max_overview['forest']['forest']['p_selection'][desc_idx2, :],\n 'o', mfc=markers[i],\n label=desc,\n markersize=5)\nax.plot([0, 0.8], [0, 0.8], color='0.5')\nplt.axis('scaled')\nax.set_xlim([0, .9])\nax.set_ylim([0, .9])\nax.set_xlabel('SVR (q2)')\nax.set_ylabel('RFR (q2)')\nutils.simple_axis(ax)\nax.legend(loc='upper left', numpoints=1, frameon=False, prop={'size': 'small'}, bbox_to_anchor=(0.01, 1))\nticks = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\nticklabels = ['0', '', '.2', '', '.4', '', '.6', '', '.8', '']\nax.set_yticks(ticks)\nax.set_yticklabels(ticklabels)\nax.set_xticks(ticks)\nax.set_xticklabels(ticklabels)\nfig.subplots_adjust(bottom=0.2)\nfig.tight_layout()\nfig.savefig(os.path.join(outpath, 'best_method_comparison.' + config['format']), dpi=600)\n\nassert len(desc1_collect) == len(desc2_collect)\nsvr_better = np.sum([1 for d1, d2 in zip(desc1_collect, desc2_collect) if d1 > d2])\nrfr_better = np.sum([1 for d1, d2 in zip(desc1_collect, desc2_collect) if d1 < d2])\nratio = float(svr_better) / (np.sum(rfr_better) + np.sum(svr_better))\nprint('svr better than rfr in {:.2f} \\% of the cases'.format(ratio))\n\nif utils.run_from_ipython():\n plt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, concatenate
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
from keras.utils import np_utils
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, f1_score
import seaborn as sns
from keras.layers import Input, Dense, Add, Multiply
# macOS特有の警告文を非表示(GPUがないからCPUでやるときに出る)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# パラメータの初期化
classes = [
"normal cells",
"blasts",
"blasts_highSSC_granulocytes",
"blasts_highSSC_middle_ugly",
"blasts_highSSC_upper_dead",
]
num_classes = len(classes)
image_size = 66
# データの読み込み
imagefiles = np.load("imagefiles_supplementary.npz")
X_train = imagefiles['X_train']
X_test = imagefiles['X_test']
y_train = imagefiles['y_train']
y_test = imagefiles['y_test']
# グレースケール画像をCNNに入力するための次元操作
X_train = X_train.reshape((-1, image_size, image_size, 1))
X_test = X_test.reshape((-1, image_size, image_size, 1))
# データの正規化
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
# OneHotVector化する(正解ラベルの位置に1がつく)
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
def _build(_input, *nodes):
x = _input
for node in nodes:
if callable(node):
x = node(x)
elif isinstance(node, list):
x = [_build(x, branch) for branch in node]
elif isinstance(node, tuple):
x = _build(x, *node)
else:
x = node
return x
_input = Input(X_train.shape[1:])
output = _build(
_input,
# Reduction dual-path module×3の定義
# ---------------------------
# 畳み込み層の追加(96:フィルタ数)
# バッチ正規化
# 活性化関数:ReLu
# ---------------------------
# MaxPooling
# ---------------------------
# Reduction dual-path module1
[(Conv2D(96, (3, 3), strides=(2, 2)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu')),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],
# Reduction dual-path module2
Add(),
[(Conv2D(96, (3, 3), strides=(2, 2)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu')),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],
# Reduction dual-path module3
Add(),
[(Conv2D(96, (3, 3), strides=(2, 2)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu')),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],
# Dual-path modules×10の定義
# ---------------------------
# 畳み込み層の追加(112:フィルタ数)
# バッチ正規化
# 活性化関数:ReLu
# ---------------------------
# Dual-path modules2の定義
# 畳み込み層の追加(48:フィルタ数)
# バッチ正規化
# 活性化関数:ReLu
# ---------------------------
# Dual-path modules1
Add(),
[(Conv2D(112, (1, 1), strides=(1, 1)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu'),
),
(Conv2D(48, (3, 3), strides=(1, 1)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu'),
)],
# # Dual-path modules2
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules3
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules4
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules5
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules6
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules7
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules8
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules9
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules10
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# 全結合
Add(),
[MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None),
Flatten(),
Dense(256, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
]
)
model = Model(_input, output)
model.summary()
# # 損失関数の設定
# opt = SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)
# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
#
# # トレーニングの実施
# # 学習
# print("start training")
# hist = model.fit(X_train, y_train, batch_size=32, epochs=30, validation_data=(X_test, y_test))
# # 評価
# print("start eval")
# score = model.evaluate(X_test, y_test, batch_size=32, verbose=1) # verbose:途中結果表示
# print('Test Loss: ', score[0])
# print('Test Accuracy: ', score[1])
#
# model.save('leukemia_cnn_supplementary.h5')
#
# # 学習の様子をグラフへ描画
# # 正解率の推移をプロット
# fig = plt.figure()
# plt.plot(hist.history['accuracy'])
# plt.plot(hist.history['val_accuracy'])
# plt.title('Accuracy')
# plt.legend(['train', 'test'], loc='upper left')
# fig.savefig('result/cnn_supplementary/cnn_accuracy_supplementary.png')
# plt.close()
# # ロスの推移をプロット
# fig = plt.figure()
# plt.plot(hist.history['loss'])
# plt.plot(hist.history['val_loss'])
# plt.title('Loss')
# plt.legend(['train', 'test'], loc='upper left')
# fig.savefig('result/cnn_supplementary/cnn_loss_supplementary.png')
# plt.close()
# # Confusion matrix作成
# plt.figure()
# y_pred = model.predict(X_test)
# y_test = imagefiles['y_test'] # one hot vector化されているのでロードし直す
# cm = confusion_matrix(y_test, np.argmax(y_pred, axis=1))
# ticklabels = ["blasts_highSSC_granulocytes",
# "blasts_highSSC_middle_ugly",
# "blasts",
# "normal cells",
# "blasts_highSSC_upper_dead"]
# sns.heatmap(cm, annot=True, cmap='Blues', yticklabels=ticklabels, xticklabels=ticklabels)
# plt.ylabel("Correct")
# plt.xlabel("Prediction")
# plt.tight_layout()
# plt.savefig('result/cnn_supplementary/confusion_matrix_cnn_supplementary.png')
# plt.close()
#
# # F1 micro/macro
# f1_macro = f1_score(y_test, np.argmax(y_pred, axis=1), average="macro")
# f1_micro = f1_score(y_test, np.argmax(y_pred, axis=1), average="micro")
# print(f"f1_macro:{f1_macro}")
# print(f"f1_miro:{f1_micro}")
|
normal
|
{
"blob_id": "ebc050544da69837cc2b8977f347380b94474bab",
"index": 576,
"step-1": "<mask token>\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n<mask token>\nmodel.summary()\n",
"step-3": "<mask token>\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nclasses = ['normal cells', 'blasts', 'blasts_highSSC_granulocytes',\n 'blasts_highSSC_middle_ugly', 'blasts_highSSC_upper_dead']\nnum_classes = len(classes)\nimage_size = 66\nimagefiles = np.load('imagefiles_supplementary.npz')\nX_train = imagefiles['X_train']\nX_test = imagefiles['X_test']\ny_train = imagefiles['y_train']\ny_test = imagefiles['y_test']\nX_train = X_train.reshape((-1, image_size, image_size, 1))\nX_test = X_test.reshape((-1, image_size, image_size, 1))\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_test = np_utils.to_categorical(y_test, num_classes)\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n_input = Input(X_train.shape[1:])\noutput = _build(_input, [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(112, (1, 1), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), (Conv2D(48, (3, 3),\n strides=(1, 1)), BatchNormalization(axis=-1, momentum=0.99, epsilon=\n 0.001, center=True, scale=True, beta_initializer='zeros',\n gamma_initializer='ones', moving_mean_initializer='zeros',\n moving_variance_initializer='ones', beta_regularizer=None,\n gamma_regularizer=None, beta_constraint=None, gamma_constraint=None),\n Activation('relu'))], Add(), [MaxPooling2D(pool_size=(2, 2), strides=\n None, padding='valid', data_format=None), Flatten(), Dense(256,\n activation='relu'), Dropout(0.5), Dense(num_classes, activation='softmax')]\n )\nmodel = Model(_input, output)\nmodel.summary()\n",
"step-4": "import os\nimport numpy as np\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Flatten, concatenate\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, f1_score\nimport seaborn as sns\nfrom keras.layers import Input, Dense, Add, Multiply\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nclasses = ['normal cells', 'blasts', 'blasts_highSSC_granulocytes',\n 'blasts_highSSC_middle_ugly', 'blasts_highSSC_upper_dead']\nnum_classes = len(classes)\nimage_size = 66\nimagefiles = np.load('imagefiles_supplementary.npz')\nX_train = imagefiles['X_train']\nX_test = imagefiles['X_test']\ny_train = imagefiles['y_train']\ny_test = imagefiles['y_test']\nX_train = X_train.reshape((-1, image_size, image_size, 1))\nX_test = X_test.reshape((-1, image_size, image_size, 1))\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_test = np_utils.to_categorical(y_test, num_classes)\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n_input = Input(X_train.shape[1:])\noutput = _build(_input, [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(112, (1, 1), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), (Conv2D(48, (3, 3),\n strides=(1, 1)), BatchNormalization(axis=-1, momentum=0.99, epsilon=\n 0.001, center=True, scale=True, beta_initializer='zeros',\n gamma_initializer='ones', moving_mean_initializer='zeros',\n moving_variance_initializer='ones', beta_regularizer=None,\n gamma_regularizer=None, beta_constraint=None, gamma_constraint=None),\n Activation('relu'))], Add(), [MaxPooling2D(pool_size=(2, 2), strides=\n None, padding='valid', data_format=None), Flatten(), Dense(256,\n activation='relu'), Dropout(0.5), Dense(num_classes, activation='softmax')]\n )\nmodel = Model(_input, output)\nmodel.summary()\n",
"step-5": "import os\n\n\nimport numpy as np\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Flatten, concatenate\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, f1_score\nimport seaborn as sns\nfrom keras.layers import Input, Dense, Add, Multiply\n\n# macOS特有の警告文を非表示(GPUがないからCPUでやるときに出る)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# パラメータの初期化\nclasses = [\n \"normal cells\",\n \"blasts\",\n \"blasts_highSSC_granulocytes\",\n \"blasts_highSSC_middle_ugly\",\n \"blasts_highSSC_upper_dead\",\n]\nnum_classes = len(classes)\nimage_size = 66\n\n# データの読み込み\nimagefiles = np.load(\"imagefiles_supplementary.npz\")\nX_train = imagefiles['X_train']\nX_test = imagefiles['X_test']\ny_train = imagefiles['y_train']\ny_test = imagefiles['y_test']\n# グレースケール画像をCNNに入力するための次元操作\nX_train = X_train.reshape((-1, image_size, image_size, 1))\nX_test = X_test.reshape((-1, image_size, image_size, 1))\n# データの正規化\nX_train = X_train.astype(\"float32\")\nX_test = X_test.astype(\"float32\")\n# OneHotVector化する(正解ラベルの位置に1がつく)\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_test = np_utils.to_categorical(y_test, num_classes)\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n_input = Input(X_train.shape[1:])\noutput = _build(\n _input,\n # Reduction dual-path module×3の定義\n # ---------------------------\n # 畳み込み層の追加(96:フィルタ数)\n # バッチ正規化\n # 活性化関数:ReLu\n # ---------------------------\n # MaxPooling\n # ---------------------------\n # Reduction dual-path module1\n [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu')),\n MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],\n # Reduction dual-path module2\n Add(),\n [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu')),\n MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],\n # Reduction dual-path module3\n Add(),\n [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu')),\n MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],\n\n # Dual-path modules×10の定義\n # ---------------------------\n # 畳み込み層の追加(112:フィルタ数)\n # バッチ正規化\n # 活性化関数:ReLu\n # ---------------------------\n # Dual-path modules2の定義\n # 畳み込み層の追加(48:フィルタ数)\n # バッチ正規化\n # 活性化関数:ReLu\n # ---------------------------\n # Dual-path modules1\n Add(),\n [(Conv2D(112, (1, 1), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu'),\n ),\n (Conv2D(48, (3, 3), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu'),\n )],\n # # Dual-path modules2\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules3\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules4\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules5\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules6\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules7\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules8\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules9\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules10\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # 全結合\n Add(),\n [MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None),\n Flatten(),\n Dense(256, activation='relu'),\n Dropout(0.5),\n Dense(num_classes, activation='softmax')\n ]\n)\nmodel = Model(_input, output)\nmodel.summary()\n\n# # 損失関数の設定\n# opt = SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)\n# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n#\n# # トレーニングの実施\n# # 学習\n# print(\"start training\")\n# hist = model.fit(X_train, y_train, batch_size=32, epochs=30, validation_data=(X_test, y_test))\n# # 評価\n# print(\"start eval\")\n# score = model.evaluate(X_test, y_test, batch_size=32, verbose=1) # verbose:途中結果表示\n# print('Test Loss: ', score[0])\n# print('Test Accuracy: ', score[1])\n#\n# model.save('leukemia_cnn_supplementary.h5')\n#\n# # 学習の様子をグラフへ描画\n# # 正解率の推移をプロット\n# fig = plt.figure()\n# plt.plot(hist.history['accuracy'])\n# plt.plot(hist.history['val_accuracy'])\n# plt.title('Accuracy')\n# plt.legend(['train', 'test'], loc='upper left')\n# fig.savefig('result/cnn_supplementary/cnn_accuracy_supplementary.png')\n# plt.close()\n# # ロスの推移をプロット\n# fig = plt.figure()\n# plt.plot(hist.history['loss'])\n# plt.plot(hist.history['val_loss'])\n# plt.title('Loss')\n# plt.legend(['train', 'test'], loc='upper left')\n# fig.savefig('result/cnn_supplementary/cnn_loss_supplementary.png')\n# plt.close()\n# # Confusion matrix作成\n# plt.figure()\n# y_pred = model.predict(X_test)\n# y_test = imagefiles['y_test'] # one hot vector化されているのでロードし直す\n# cm = confusion_matrix(y_test, np.argmax(y_pred, axis=1))\n# ticklabels = [\"blasts_highSSC_granulocytes\",\n# \"blasts_highSSC_middle_ugly\",\n# \"blasts\",\n# \"normal cells\",\n# \"blasts_highSSC_upper_dead\"]\n# sns.heatmap(cm, annot=True, cmap='Blues', yticklabels=ticklabels, xticklabels=ticklabels)\n# plt.ylabel(\"Correct\")\n# plt.xlabel(\"Prediction\")\n# plt.tight_layout()\n# plt.savefig('result/cnn_supplementary/confusion_matrix_cnn_supplementary.png')\n# plt.close()\n#\n# # F1 micro/macro\n# f1_macro = f1_score(y_test, np.argmax(y_pred, axis=1), average=\"macro\")\n# f1_micro = f1_score(y_test, np.argmax(y_pred, axis=1), average=\"micro\")\n# print(f\"f1_macro:{f1_macro}\")\n# print(f\"f1_miro:{f1_micro}\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#n = int(input())
#s = input()
n, m = map(int, input().split())
#s, t = input().split()
#n, m, l = map(int, input().split())
#s, t, r = input().split()
#a = map(int, input().split())
#a = input().split()
a = [int(input()) for _ in range(n)]
#a = [input() for _ in range(n)]
#t = input()
#m = int(input())
#p, q = map(int, input().split())
#p, q = input().split()
#p, q, r = map(int, input().split())
#p, q, r = input().split()
#b = map(int, input().split())
#b = input().split()
#b = [int(input()) for _ in range(m)]
#b = [input() for _ in range(m)]
cnt, ans, mx, mn = 0, m, 0, 100000000
for i in range(n - 1):
if a[i + 1] - a[i] < m:
ans += a[i + 1] - a[i]
else:
ans += m
print(ans)
|
normal
|
{
"blob_id": "a09bc84a14718422894127a519d67dc0c6b13bc9",
"index": 746,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n - 1):\n if a[i + 1] - a[i] < m:\n ans += a[i + 1] - a[i]\n else:\n ans += m\nprint(ans)\n",
"step-3": "n, m = map(int, input().split())\na = [int(input()) for _ in range(n)]\ncnt, ans, mx, mn = 0, m, 0, 100000000\nfor i in range(n - 1):\n if a[i + 1] - a[i] < m:\n ans += a[i + 1] - a[i]\n else:\n ans += m\nprint(ans)\n",
"step-4": "#n = int(input())\n#s = input()\nn, m = map(int, input().split())\n#s, t = input().split()\n#n, m, l = map(int, input().split())\n#s, t, r = input().split()\n#a = map(int, input().split())\n#a = input().split()\na = [int(input()) for _ in range(n)]\n#a = [input() for _ in range(n)]\n\n#t = input()\n#m = int(input())\n#p, q = map(int, input().split())\n#p, q = input().split()\n#p, q, r = map(int, input().split())\n#p, q, r = input().split()\n#b = map(int, input().split())\n#b = input().split()\n#b = [int(input()) for _ in range(m)]\n#b = [input() for _ in range(m)]\ncnt, ans, mx, mn = 0, m, 0, 100000000\nfor i in range(n - 1):\n if a[i + 1] - a[i] < m:\n ans += a[i + 1] - a[i]\n else:\n ans += m\nprint(ans)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
from os.path import join
import json
import pandas as pd
import time
import numpy as np
import torch
def str2bool(v):
# convert string to boolean type for argparser input
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str_or_none(v):
# convert string to boolean type for argparser input
if v is None:
return None
if v.lower() == 'none':
return None
else:
return v
# helper functions for LDA arguments
def dic2name(dic):
return '_'.join(["{}-{}".format(k, dic[k]) for k in sorted(dic)])
def name2dic(s):
return {x.split('-')[0]:x.split('-')[1] for x in s.split('_')}
def get_valid_types(TYPENAME):
with open(join(os.environ['BASEPATH'], 'configs', 'types.json'), 'r') as typefile:
valid_types = json.load(typefile)[TYPENAME]
return valid_types
def df_index_gen(f, table=False):
# merge locator and dataset_id to genearte index table_id
f.loc[:,'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x['dataset_id']]), axis = 1)
if not table:
f.loc[:,'field_id'] = f.apply(lambda x: x['field_id'].split(":")[-1], axis = 1)
f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')
return f
# load dataframe from pickle or create pickle file
def load_tmp_df(load_path, tmp_path, name, table=False):
start = time.time()
pkl_file = join(tmp_path, "{}.pkl".format(name))
if os.path.exists(pkl_file):
print("{} pickle file found, loading...".format(pkl_file))
df = pd.read_pickle(pkl_file)
else:
#process and save pkl
print("{} pickle file not found, creating...".format(pkl_file))
df = pd.read_csv(join(load_path, "{}.csv".format(name)))
df = df_index_gen(df, table)
df.to_pickle(pkl_file)
print("{} Load complete. Time {}".format(name, time.time()-start))
return df
def logSumExpTensor(vec):
# vec -> 16, tag_size
batch_size = vec.size()[0]
vec = vec.view(batch_size, -1)
max_score = torch.max(vec, 1)[0]
max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])
return max_score + \
torch.log(torch.sum(torch.exp(vec - max_score_broadcast), 1))
def logNormalizeTensor(a):
denom = logSumExpTensor(a)
if len(a.size())==2:
denom = denom.view(-1, 1).expand(-1, a.size()[1])
elif len(a.size())==3:
denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.size()[2])
return (a-denom)
def logNormalize(a):
denom = np.logaddexp.reduce(a, 1)
return (a.transpose()- denom).transpose()
def logDot(a, b):
# numeric stable way of calculating log (e^a, e^b)
max_a = np.amax(a)
max_b = np.amax(b)
C = np.dot(np.exp(a - max_a), np.exp(b - max_b))
np.log(C, out=C)
# else:
# np.log(C + 1e-300, out=C)
C += max_a + max_b
return C
|
normal
|
{
"blob_id": "a9302dbf724f9548411fbf2959f36b4cc5742ff8",
"index": 4999,
"step-1": "<mask token>\n\n\ndef str_or_none(v):\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n\n<mask token>\n\n\ndef name2dic(s):\n return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}\n\n\n<mask token>\n\n\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, '{}.pkl'.format(name))\n if os.path.exists(pkl_file):\n print('{} pickle file found, loading...'.format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n print('{} pickle file not found, creating...'.format(pkl_file))\n df = pd.read_csv(join(load_path, '{}.csv'.format(name)))\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print('{} Load complete. Time {}'.format(name, time.time() - start))\n return df\n\n\ndef logSumExpTensor(vec):\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec -\n max_score_broadcast), 1))\n\n\n<mask token>\n\n\ndef logNormalize(a):\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose() - denom).transpose()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef str_or_none(v):\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n\ndef dic2name(dic):\n return '_'.join(['{}-{}'.format(k, dic[k]) for k in sorted(dic)])\n\n\ndef name2dic(s):\n return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}\n\n\n<mask token>\n\n\ndef df_index_gen(f, table=False):\n f.loc[:, 'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x[\n 'dataset_id']]), axis=1)\n if not table:\n f.loc[:, 'field_id'] = f.apply(lambda x: x['field_id'].split(':')[-\n 1], axis=1)\n f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')\n return f\n\n\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, '{}.pkl'.format(name))\n if os.path.exists(pkl_file):\n print('{} pickle file found, loading...'.format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n print('{} pickle file not found, creating...'.format(pkl_file))\n df = pd.read_csv(join(load_path, '{}.csv'.format(name)))\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print('{} Load complete. Time {}'.format(name, time.time() - start))\n return df\n\n\ndef logSumExpTensor(vec):\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec -\n max_score_broadcast), 1))\n\n\n<mask token>\n\n\ndef logNormalize(a):\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose() - denom).transpose()\n\n\ndef logDot(a, b):\n max_a = np.amax(a)\n max_b = np.amax(b)\n C = np.dot(np.exp(a - max_a), np.exp(b - max_b))\n np.log(C, out=C)\n C += max_a + max_b\n return C\n",
"step-3": "<mask token>\n\n\ndef str_or_none(v):\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n\ndef dic2name(dic):\n return '_'.join(['{}-{}'.format(k, dic[k]) for k in sorted(dic)])\n\n\ndef name2dic(s):\n return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}\n\n\n<mask token>\n\n\ndef df_index_gen(f, table=False):\n f.loc[:, 'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x[\n 'dataset_id']]), axis=1)\n if not table:\n f.loc[:, 'field_id'] = f.apply(lambda x: x['field_id'].split(':')[-\n 1], axis=1)\n f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')\n return f\n\n\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, '{}.pkl'.format(name))\n if os.path.exists(pkl_file):\n print('{} pickle file found, loading...'.format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n print('{} pickle file not found, creating...'.format(pkl_file))\n df = pd.read_csv(join(load_path, '{}.csv'.format(name)))\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print('{} Load complete. Time {}'.format(name, time.time() - start))\n return df\n\n\ndef logSumExpTensor(vec):\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec -\n max_score_broadcast), 1))\n\n\ndef logNormalizeTensor(a):\n denom = logSumExpTensor(a)\n if len(a.size()) == 2:\n denom = denom.view(-1, 1).expand(-1, a.size()[1])\n elif len(a.size()) == 3:\n denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.\n size()[2])\n return a - denom\n\n\ndef logNormalize(a):\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose() - denom).transpose()\n\n\ndef logDot(a, b):\n max_a = np.amax(a)\n max_b = np.amax(b)\n C = np.dot(np.exp(a - max_a), np.exp(b - max_b))\n np.log(C, out=C)\n C += max_a + max_b\n return C\n",
"step-4": "import os\nfrom os.path import join\nimport json\nimport pandas as pd\nimport time\nimport numpy as np\nimport torch\n\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef str_or_none(v):\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n\ndef dic2name(dic):\n return '_'.join(['{}-{}'.format(k, dic[k]) for k in sorted(dic)])\n\n\ndef name2dic(s):\n return {x.split('-')[0]: x.split('-')[1] for x in s.split('_')}\n\n\ndef get_valid_types(TYPENAME):\n with open(join(os.environ['BASEPATH'], 'configs', 'types.json'), 'r'\n ) as typefile:\n valid_types = json.load(typefile)[TYPENAME]\n return valid_types\n\n\ndef df_index_gen(f, table=False):\n f.loc[:, 'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x[\n 'dataset_id']]), axis=1)\n if not table:\n f.loc[:, 'field_id'] = f.apply(lambda x: x['field_id'].split(':')[-\n 1], axis=1)\n f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')\n return f\n\n\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, '{}.pkl'.format(name))\n if os.path.exists(pkl_file):\n print('{} pickle file found, loading...'.format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n print('{} pickle file not found, creating...'.format(pkl_file))\n df = pd.read_csv(join(load_path, '{}.csv'.format(name)))\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print('{} Load complete. Time {}'.format(name, time.time() - start))\n return df\n\n\ndef logSumExpTensor(vec):\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec -\n max_score_broadcast), 1))\n\n\ndef logNormalizeTensor(a):\n denom = logSumExpTensor(a)\n if len(a.size()) == 2:\n denom = denom.view(-1, 1).expand(-1, a.size()[1])\n elif len(a.size()) == 3:\n denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.\n size()[2])\n return a - denom\n\n\ndef logNormalize(a):\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose() - denom).transpose()\n\n\ndef logDot(a, b):\n max_a = np.amax(a)\n max_b = np.amax(b)\n C = np.dot(np.exp(a - max_a), np.exp(b - max_b))\n np.log(C, out=C)\n C += max_a + max_b\n return C\n",
"step-5": "import os\nfrom os.path import join\nimport json\nimport pandas as pd\nimport time\nimport numpy as np\nimport torch \n\ndef str2bool(v):\n # convert string to boolean type for argparser input\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef str_or_none(v):\n # convert string to boolean type for argparser input\n if v is None:\n return None\n if v.lower() == 'none':\n return None\n else:\n return v\n\n# helper functions for LDA arguments\ndef dic2name(dic):\n return '_'.join([\"{}-{}\".format(k, dic[k]) for k in sorted(dic)])\n\ndef name2dic(s):\n return {x.split('-')[0]:x.split('-')[1] for x in s.split('_')}\n\n\ndef get_valid_types(TYPENAME):\n\n with open(join(os.environ['BASEPATH'], 'configs', 'types.json'), 'r') as typefile: \n valid_types = json.load(typefile)[TYPENAME]\n return valid_types\n\n\ndef df_index_gen(f, table=False):\n # merge locator and dataset_id to genearte index table_id\n f.loc[:,'table_id'] = f.apply(lambda x: '+'.join([x['locator'], x['dataset_id']]), axis = 1)\n if not table:\n f.loc[:,'field_id'] = f.apply(lambda x: x['field_id'].split(\":\")[-1], axis = 1)\n f = f.drop(columns=['locator', 'dataset_id']).set_index('table_id')\n return f\n\n\n# load dataframe from pickle or create pickle file\ndef load_tmp_df(load_path, tmp_path, name, table=False):\n start = time.time()\n pkl_file = join(tmp_path, \"{}.pkl\".format(name))\n if os.path.exists(pkl_file):\n print(\"{} pickle file found, loading...\".format(pkl_file))\n df = pd.read_pickle(pkl_file)\n else:\n #process and save pkl\n print(\"{} pickle file not found, creating...\".format(pkl_file))\n df = pd.read_csv(join(load_path, \"{}.csv\".format(name)))\n\n df = df_index_gen(df, table)\n df.to_pickle(pkl_file)\n print(\"{} Load complete. Time {}\".format(name, time.time()-start))\n return df\n\ndef logSumExpTensor(vec):\n # vec -> 16, tag_size\n batch_size = vec.size()[0]\n vec = vec.view(batch_size, -1)\n max_score = torch.max(vec, 1)[0]\n max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])\n return max_score + \\\n torch.log(torch.sum(torch.exp(vec - max_score_broadcast), 1))\n\ndef logNormalizeTensor(a):\n\n denom = logSumExpTensor(a)\n if len(a.size())==2:\n denom = denom.view(-1, 1).expand(-1, a.size()[1])\n elif len(a.size())==3:\n denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.size()[2])\n return (a-denom)\n\ndef logNormalize(a):\n\n denom = np.logaddexp.reduce(a, 1)\n return (a.transpose()- denom).transpose()\n\ndef logDot(a, b):\n\n # numeric stable way of calculating log (e^a, e^b)\n max_a = np.amax(a)\n max_b = np.amax(b)\n\n C = np.dot(np.exp(a - max_a), np.exp(b - max_b))\n np.log(C, out=C)\n # else:\n # np.log(C + 1e-300, out=C)\n\n C += max_a + max_b\n\n return C\n",
"step-ids": [
5,
8,
9,
12,
13
]
}
|
[
5,
8,
9,
12,
13
] |
'''
Условие
Дано два числа a и b. Выведите гипотенузу треугольника с заданными катетами.
'''
import math
a = int(input())
b = int(input())
print(math.sqrt(a * a + b * b))
|
normal
|
{
"blob_id": "c0348fc5f51e6f7a191fea6d0e3cb84c60b03e22",
"index": 597,
"step-1": "'''\nУсловие\nДано два числа a и b. Выведите гипотенузу треугольника с заданными катетами.\n'''\nimport math\na = int(input())\nb = int(input())\nprint(math.sqrt(a * a + b * b))",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python3
import cgitb
import sys
from auth import is_admin
cgitb.enable()
sys.stdout.write('Content-Type: application/octet-stream\n\n')
sys.stdout.write('yes' if is_admin() else 'no')
sys.stdout.flush()
|
normal
|
{
"blob_id": "be9972d899a167a8ca2728960e55cda538793cc5",
"index": 1576,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncgitb.enable()\nsys.stdout.write('Content-Type: application/octet-stream\\n\\n')\nsys.stdout.write('yes' if is_admin() else 'no')\nsys.stdout.flush()\n",
"step-3": "import cgitb\nimport sys\nfrom auth import is_admin\ncgitb.enable()\nsys.stdout.write('Content-Type: application/octet-stream\\n\\n')\nsys.stdout.write('yes' if is_admin() else 'no')\nsys.stdout.flush()\n",
"step-4": "#!/usr/bin/env python3\nimport cgitb\nimport sys\n\nfrom auth import is_admin\n\ncgitb.enable()\nsys.stdout.write('Content-Type: application/octet-stream\\n\\n')\nsys.stdout.write('yes' if is_admin() else 'no')\nsys.stdout.flush()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.2.7 on 2021-10-01 06:43
from django.db import migrations
import django_resized.forms
import event.models.event
import event.models.event_agenda
class Migration(migrations.Migration):
dependencies = [
('event', '0009_auto_20211001_0406'),
]
operations = [
migrations.AlterField(
model_name='event',
name='map',
field=django_resized.forms.ResizedImageField(blank=True, crop=None, force_format='JPEG', help_text='Mapa del evento', keep_meta=True, null=True, quality=90, size=[1920, 1080], upload_to=event.models.event.event_pictures, verbose_name='Mapa'),
),
migrations.AlterField(
model_name='eventagenda',
name='map',
field=django_resized.forms.ResizedImageField(blank=True, crop=None, force_format='JPEG', help_text='Mapa de la exposicion', keep_meta=True, null=True, quality=90, size=[1920, 1080], upload_to=event.models.event_agenda.event_pictures, verbose_name='Mapa'),
),
]
|
normal
|
{
"blob_id": "d0a053faccecddc84a9556aec3dff691b171df96",
"index": 9977,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('event', '0009_auto_20211001_0406')]\n operations = [migrations.AlterField(model_name='event', name='map',\n field=django_resized.forms.ResizedImageField(blank=True, crop=None,\n force_format='JPEG', help_text='Mapa del evento', keep_meta=True,\n null=True, quality=90, size=[1920, 1080], upload_to=event.models.\n event.event_pictures, verbose_name='Mapa')), migrations.AlterField(\n model_name='eventagenda', name='map', field=django_resized.forms.\n ResizedImageField(blank=True, crop=None, force_format='JPEG',\n help_text='Mapa de la exposicion', keep_meta=True, null=True,\n quality=90, size=[1920, 1080], upload_to=event.models.event_agenda.\n event_pictures, verbose_name='Mapa'))]\n",
"step-4": "from django.db import migrations\nimport django_resized.forms\nimport event.models.event\nimport event.models.event_agenda\n\n\nclass Migration(migrations.Migration):\n dependencies = [('event', '0009_auto_20211001_0406')]\n operations = [migrations.AlterField(model_name='event', name='map',\n field=django_resized.forms.ResizedImageField(blank=True, crop=None,\n force_format='JPEG', help_text='Mapa del evento', keep_meta=True,\n null=True, quality=90, size=[1920, 1080], upload_to=event.models.\n event.event_pictures, verbose_name='Mapa')), migrations.AlterField(\n model_name='eventagenda', name='map', field=django_resized.forms.\n ResizedImageField(blank=True, crop=None, force_format='JPEG',\n help_text='Mapa de la exposicion', keep_meta=True, null=True,\n quality=90, size=[1920, 1080], upload_to=event.models.event_agenda.\n event_pictures, verbose_name='Mapa'))]\n",
"step-5": "# Generated by Django 3.2.7 on 2021-10-01 06:43\n\nfrom django.db import migrations\nimport django_resized.forms\nimport event.models.event\nimport event.models.event_agenda\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('event', '0009_auto_20211001_0406'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='event',\n name='map',\n field=django_resized.forms.ResizedImageField(blank=True, crop=None, force_format='JPEG', help_text='Mapa del evento', keep_meta=True, null=True, quality=90, size=[1920, 1080], upload_to=event.models.event.event_pictures, verbose_name='Mapa'),\n ),\n migrations.AlterField(\n model_name='eventagenda',\n name='map',\n field=django_resized.forms.ResizedImageField(blank=True, crop=None, force_format='JPEG', help_text='Mapa de la exposicion', keep_meta=True, null=True, quality=90, size=[1920, 1080], upload_to=event.models.event_agenda.event_pictures, verbose_name='Mapa'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
a,b,c,d=map(int,input().split())
ans=0
if a>=0:
if c>=0:
ans=b*d
elif d>=0:
ans=b*d
else:
ans=a*d
elif b>=0:
if c>=0:
ans=b*d
elif d>=0:
ans=max(b*d,a*c)
else:
ans=a*c
else:
if c>=0:
ans=b*c
elif d>=0:
ans=a*c
else:
ans=a*c
print(ans)
|
normal
|
{
"blob_id": "be37a7596850050af58f735e60bdf13594715caf",
"index": 4928,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif a >= 0:\n if c >= 0:\n ans = b * d\n elif d >= 0:\n ans = b * d\n else:\n ans = a * d\nelif b >= 0:\n if c >= 0:\n ans = b * d\n elif d >= 0:\n ans = max(b * d, a * c)\n else:\n ans = a * c\nelif c >= 0:\n ans = b * c\nelif d >= 0:\n ans = a * c\nelse:\n ans = a * c\nprint(ans)\n",
"step-3": "a, b, c, d = map(int, input().split())\nans = 0\nif a >= 0:\n if c >= 0:\n ans = b * d\n elif d >= 0:\n ans = b * d\n else:\n ans = a * d\nelif b >= 0:\n if c >= 0:\n ans = b * d\n elif d >= 0:\n ans = max(b * d, a * c)\n else:\n ans = a * c\nelif c >= 0:\n ans = b * c\nelif d >= 0:\n ans = a * c\nelse:\n ans = a * c\nprint(ans)\n",
"step-4": "a,b,c,d=map(int,input().split())\nans=0\nif a>=0:\n if c>=0:\n ans=b*d\n elif d>=0:\n ans=b*d\n else:\n ans=a*d\nelif b>=0:\n if c>=0:\n ans=b*d\n elif d>=0:\n ans=max(b*d,a*c)\n else:\n ans=a*c\nelse:\n if c>=0:\n ans=b*c\n elif d>=0:\n ans=a*c\n else:\n ans=a*c\nprint(ans)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#This script reads through a Voyager import log and outputs duplicate bib IDs as well as the IDs of bibs, mfhds, and items created.
#import regular expressions and openpyxl
import re
import openpyxl
# prompt for file names
fname = input("Enter input file, including extension: ")
fout = input("Enter output file, without extension: ")
fh = open(fname, "r")
# set up lists
duplicates = [["Duplicate Bib ID"]]
bibs = [["Bib ID"]]
mfhds = [["MFHD ID"]]
items = [["Item ID"]]
# create and open workbook with two sheets
wb1=openpyxl.Workbook()
ws1=wb1.active
ws1.title = "Duplicate Bibs"
ws2 = wb1.create_sheet(index=1, title="IDs Added")
# read through file, extract the line after the line starting with BibID & rank and write to lists
with fh as f:
lines = f.readlines()
n_lines = len(lines)
for i, line in enumerate (lines) :
line = line.rstrip()
if line.startswith(" BibID & rank") and \
n_lines > i + 2 and lines[i + 2].startswith("") :
bibline = re.findall(r'\d+\s-\s', lines[i+1])
dupeid = re.findall(r'\d+', str(bibline))
duplicates.append(dupeid)
elif line.startswith(" Adding Bib") :
line = re.findall(r'\d+',str(line))
bibs.append(line)
elif line.startswith("MFHD_ID ") :
line = re.findall(r'\d+',str(line))
mfhds.append(line)
elif line.startswith("ITEM_ID ") :
line = re.findall(r'\d+',str(line))
items.append(line)
else :
continue
# write the lists to columns in the spreadsheet and save
for row in duplicates:
ws1.append(row)
for r in range(0,len(bibs)):
ws2.cell(row=r+1,column=1).value=bibs[r][0]
for r in range(0,len(mfhds)):
ws2.cell(row=r+1,column=2).value=mfhds[r][0]
for r in range(0,len(items)):
ws2.cell(row=r+1,column=3).value=items[r][0]
wb1.save(fout + ".xlsx")
|
normal
|
{
"blob_id": "fc06d8a26a99c16a4b38ad0b4bbb28a1dc522991",
"index": 6902,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith fh as f:\n lines = f.readlines()\n n_lines = len(lines)\n for i, line in enumerate(lines):\n line = line.rstrip()\n if line.startswith('\\tBibID & rank') and n_lines > i + 2 and lines[\n i + 2].startswith(''):\n bibline = re.findall('\\\\d+\\\\s-\\\\s', lines[i + 1])\n dupeid = re.findall('\\\\d+', str(bibline))\n duplicates.append(dupeid)\n elif line.startswith('\\tAdding Bib'):\n line = re.findall('\\\\d+', str(line))\n bibs.append(line)\n elif line.startswith('MFHD_ID '):\n line = re.findall('\\\\d+', str(line))\n mfhds.append(line)\n elif line.startswith('ITEM_ID '):\n line = re.findall('\\\\d+', str(line))\n items.append(line)\n else:\n continue\nfor row in duplicates:\n ws1.append(row)\nfor r in range(0, len(bibs)):\n ws2.cell(row=r + 1, column=1).value = bibs[r][0]\nfor r in range(0, len(mfhds)):\n ws2.cell(row=r + 1, column=2).value = mfhds[r][0]\nfor r in range(0, len(items)):\n ws2.cell(row=r + 1, column=3).value = items[r][0]\nwb1.save(fout + '.xlsx')\n",
"step-3": "<mask token>\nfname = input('Enter input file, including extension: ')\nfout = input('Enter output file, without extension: ')\nfh = open(fname, 'r')\nduplicates = [['Duplicate Bib ID']]\nbibs = [['Bib ID']]\nmfhds = [['MFHD ID']]\nitems = [['Item ID']]\nwb1 = openpyxl.Workbook()\nws1 = wb1.active\nws1.title = 'Duplicate Bibs'\nws2 = wb1.create_sheet(index=1, title='IDs Added')\nwith fh as f:\n lines = f.readlines()\n n_lines = len(lines)\n for i, line in enumerate(lines):\n line = line.rstrip()\n if line.startswith('\\tBibID & rank') and n_lines > i + 2 and lines[\n i + 2].startswith(''):\n bibline = re.findall('\\\\d+\\\\s-\\\\s', lines[i + 1])\n dupeid = re.findall('\\\\d+', str(bibline))\n duplicates.append(dupeid)\n elif line.startswith('\\tAdding Bib'):\n line = re.findall('\\\\d+', str(line))\n bibs.append(line)\n elif line.startswith('MFHD_ID '):\n line = re.findall('\\\\d+', str(line))\n mfhds.append(line)\n elif line.startswith('ITEM_ID '):\n line = re.findall('\\\\d+', str(line))\n items.append(line)\n else:\n continue\nfor row in duplicates:\n ws1.append(row)\nfor r in range(0, len(bibs)):\n ws2.cell(row=r + 1, column=1).value = bibs[r][0]\nfor r in range(0, len(mfhds)):\n ws2.cell(row=r + 1, column=2).value = mfhds[r][0]\nfor r in range(0, len(items)):\n ws2.cell(row=r + 1, column=3).value = items[r][0]\nwb1.save(fout + '.xlsx')\n",
"step-4": "import re\nimport openpyxl\nfname = input('Enter input file, including extension: ')\nfout = input('Enter output file, without extension: ')\nfh = open(fname, 'r')\nduplicates = [['Duplicate Bib ID']]\nbibs = [['Bib ID']]\nmfhds = [['MFHD ID']]\nitems = [['Item ID']]\nwb1 = openpyxl.Workbook()\nws1 = wb1.active\nws1.title = 'Duplicate Bibs'\nws2 = wb1.create_sheet(index=1, title='IDs Added')\nwith fh as f:\n lines = f.readlines()\n n_lines = len(lines)\n for i, line in enumerate(lines):\n line = line.rstrip()\n if line.startswith('\\tBibID & rank') and n_lines > i + 2 and lines[\n i + 2].startswith(''):\n bibline = re.findall('\\\\d+\\\\s-\\\\s', lines[i + 1])\n dupeid = re.findall('\\\\d+', str(bibline))\n duplicates.append(dupeid)\n elif line.startswith('\\tAdding Bib'):\n line = re.findall('\\\\d+', str(line))\n bibs.append(line)\n elif line.startswith('MFHD_ID '):\n line = re.findall('\\\\d+', str(line))\n mfhds.append(line)\n elif line.startswith('ITEM_ID '):\n line = re.findall('\\\\d+', str(line))\n items.append(line)\n else:\n continue\nfor row in duplicates:\n ws1.append(row)\nfor r in range(0, len(bibs)):\n ws2.cell(row=r + 1, column=1).value = bibs[r][0]\nfor r in range(0, len(mfhds)):\n ws2.cell(row=r + 1, column=2).value = mfhds[r][0]\nfor r in range(0, len(items)):\n ws2.cell(row=r + 1, column=3).value = items[r][0]\nwb1.save(fout + '.xlsx')\n",
"step-5": "#This script reads through a Voyager import log and outputs duplicate bib IDs as well as the IDs of bibs, mfhds, and items created.\n\n#import regular expressions and openpyxl\nimport re\nimport openpyxl\n\n# prompt for file names\nfname = input(\"Enter input file, including extension: \")\nfout = input(\"Enter output file, without extension: \")\nfh = open(fname, \"r\")\n\n# set up lists\nduplicates = [[\"Duplicate Bib ID\"]]\nbibs = [[\"Bib ID\"]]\nmfhds = [[\"MFHD ID\"]]\nitems = [[\"Item ID\"]]\n\n# create and open workbook with two sheets\nwb1=openpyxl.Workbook()\nws1=wb1.active\nws1.title = \"Duplicate Bibs\"\nws2 = wb1.create_sheet(index=1, title=\"IDs Added\")\n\n# read through file, extract the line after the line starting with BibID & rank and write to lists\nwith fh as f:\n lines = f.readlines()\n n_lines = len(lines)\n for i, line in enumerate (lines) :\n line = line.rstrip()\n if line.startswith(\"\tBibID & rank\") and \\\n n_lines > i + 2 and lines[i + 2].startswith(\"\") :\n bibline = re.findall(r'\\d+\\s-\\s', lines[i+1])\n dupeid = re.findall(r'\\d+', str(bibline))\n duplicates.append(dupeid)\n elif line.startswith(\"\tAdding Bib\") :\n line = re.findall(r'\\d+',str(line))\n bibs.append(line)\n elif line.startswith(\"MFHD_ID \") :\n line = re.findall(r'\\d+',str(line))\n mfhds.append(line)\n elif line.startswith(\"ITEM_ID \") :\n line = re.findall(r'\\d+',str(line))\n items.append(line)\n else :\n continue\n\n# write the lists to columns in the spreadsheet and save\nfor row in duplicates:\n ws1.append(row)\nfor r in range(0,len(bibs)):\n ws2.cell(row=r+1,column=1).value=bibs[r][0]\nfor r in range(0,len(mfhds)):\n ws2.cell(row=r+1,column=2).value=mfhds[r][0]\nfor r in range(0,len(items)):\n ws2.cell(row=r+1,column=3).value=items[r][0]\nwb1.save(fout + \".xlsx\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from .settings import *
# Heroku Configurations
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES = {'default': dj_database_url.config()}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# loading local_settings.py
try:
from .local_settings import *
except Exception as e:
pass
# ALLAUTH configuration
# Specific the login method to use
ACCOUNT_USERNAME_REQUIRED = False
# ACCOUNT_AUTHENTICATION_METHOD = "username", "email", "username_email"
# Determines the e-mail verification method during signup – choose one of “mandatory”, “optional”, or “none”.
# When set to “mandatory” the user is blocked from logging in until the email address is verified.
# Choose “optional” or “none” to allow logins with an unverified e-mail address.
# In case of “optional”, the e-mail verification mail is still sent,
# whereas in case of “none” no e-mail verification mails are sent.
ACCOUNT_EMAIL_VERIFICATION = "none"
# Determines whether or not the user is automatically logged out by a mere GET request.
# See documentation for the LogoutView for details.
ACCOUNT_LOGOUT_ON_GET = False
# Request e-mail address from 3rd import party account provider?
# E.g. using OpenID AX, or the Facebook “email” permission.
SOCIALACCOUNT_QUERY_EMAIL = True
# Dictionary containing provider specific settings.
SOCIALACCOUNT_PROVIDERS = {
'facebook': {
# we use facebook js_sdk instead od oauth2
'METHOD': 'js_sdk',
'SCOPE': ['email', 'public_profile', 'user_friends'],
# using AUTH_PARAMS to pass along other parametees
# to the FB.login JS SDK call
'AUTH_PARAMS': {'auth_type': 'reauthenticate'},
# field are fetch from the import Graph API
'FIELDS': ['first_name', 'last_name', 'email', 'birthday'],
# JS SDK return a short-lived token suitable for client-side use.
'EXCHANGE_TOKEN': True,
# Chose the current active language of the request
'LOCALE_FUNC': 'path.to.callable',
'VERIFIED_EMAIL': False,
# Facebook Graph API version
'VERSION': 'v2.7'
},
'linkedin': {
'SCOPE': ['r_emailaddress'],
'PROFILE_FIELDS': [
'id',
'first-name',
'last-name',
'email-address',
'public-profile-url'
]
}
}
# login redirect url
LOGIN_REDIRECT_URL = "/blog/jobs"
# Default settings
BOOTSTRAP3 = {
# The URL to the jQuery JavaScript file
'jquery_url': '//code.jquery.com/jquery.min.js',
# The Bootstrap base URL
'base_url': '//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/',
# The complete URL to the Bootstrap CSS file (None means derive it from base_url)
'css_url': None,
# The complete URL to the Bootstrap CSS file (None means no theme)
'theme_url': None,
# The complete URL to the Bootstrap JavaScript file (None means derive it from base_url)
'javascript_url': None,
# Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html)
'javascript_in_head': False,
# Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags)
'include_jquery': False,
# Label class to use in horizontal forms
'horizontal_label_class': 'col-md-3',
# Field class to use in horizontal forms
'horizontal_field_class': 'col-md-9',
# Set HTML required attribute on required fields
'set_required': True,
# Set HTML disabled attribute on disabled fields
'set_disabled': False,
# Set placeholder attributes to label if no placeholder is provided
'set_placeholder': True,
# Class to indicate required (better to set this in your Django form)
'required_css_class': '',
# Class to indicate error (better to set this in your Django form)
'error_css_class': 'has-error',
# Class to indicate success, meaning the field has valid input (better to set this in your Django form)
'success_css_class': 'has-success',
# Renderers (only set these if you have studied the source and understand the inner workings)
'formset_renderers':{
'default': 'bootstrap3.renderers.FormsetRenderer',
},
'form_renderers': {
'default': 'bootstrap3.renderers.FormRenderer',
},
'field_renderers': {
'default': 'bootstrap3.renderers.FieldRenderer',
'inline': 'bootstrap3.renderers.InlineFieldRenderer',
},
}
# Axes Configurations
# Number of login attempts allowed before a record is created for the failed logins.
AXES_LOGIN_FAILURE_LIMIT = 3
# After the number os allowed login attempts are exceeded, should we lock this IP (and optinal user agend)?
AXES_LOCK_OUT_AT_FAILURE = True
# If True, lock out / log based on an IP address AND a user agent. This means requests from different import user
# agents but from the import same IP are treated differently.
AXES_USE_USER_AGENT = True
# Defines a period of inactivity after which old failed login attempts will be forgotten. You can set to a
# python timedelta object or an integer, if you set it to be integer it will represent a number of hours
AXES_COOLOFF_TIME = 50
# Specifies a logging mechanism for axes to use
AXES_LOCKOUT_TEMPLATE = 'axes.watch_login'
# Specifies a template to render when a user is locked out. Template receives cooloff_time and failure_limit as
# context variables
AXES_LOCKOUT_TEMPLATE = None
# Specifies a URL to redirect to on lockout. If both AXES_LOCKOUT_TEMPLATE and AXES_LOCKOUT_URL are set, the template
# will be used
AXES_LOCKOUT_URL = None
# If Truem you'll see slightly more logging for Axes
AXES_VERBOSE = True
# The name of the for field that contains your usernames
# AXES_USERNAME_FORM_FIELD = username
# If True prevents to login from IP import under particular user if attempts limit exceed, otherwise lock out based on
# IP. Default: False
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False
# Crispy forms will use BOOTSTRAP3 TEMPLATE PACK
CRISPY_TEMPLATE_PACK = "bootstrap3"
# Signal Admins Configurations
ADMINS = (
("Petar Pilipovic", "[email protected]"),
)
# RESTframework Permission classes configuration
REST_FRAMEWORK = {
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly"
]
}
|
normal
|
{
"blob_id": "8bb86cae3387a0d4ce5987f3e3c458c8298174e0",
"index": 7342,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n from .local_settings import *\nexcept Exception as e:\n pass\n<mask token>\n",
"step-3": "<mask token>\nDATABASES = {'default': dj_database_url.config()}\nSECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'\ntry:\n from .local_settings import *\nexcept Exception as e:\n pass\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_EMAIL_VERIFICATION = 'none'\nACCOUNT_LOGOUT_ON_GET = False\nSOCIALACCOUNT_QUERY_EMAIL = True\nSOCIALACCOUNT_PROVIDERS = {'facebook': {'METHOD': 'js_sdk', 'SCOPE': [\n 'email', 'public_profile', 'user_friends'], 'AUTH_PARAMS': {'auth_type':\n 'reauthenticate'}, 'FIELDS': ['first_name', 'last_name', 'email',\n 'birthday'], 'EXCHANGE_TOKEN': True, 'LOCALE_FUNC': 'path.to.callable',\n 'VERIFIED_EMAIL': False, 'VERSION': 'v2.7'}, 'linkedin': {'SCOPE': [\n 'r_emailaddress'], 'PROFILE_FIELDS': ['id', 'first-name', 'last-name',\n 'email-address', 'public-profile-url']}}\nLOGIN_REDIRECT_URL = '/blog/jobs'\nBOOTSTRAP3 = {'jquery_url': '//code.jquery.com/jquery.min.js', 'base_url':\n '//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/', 'css_url': None,\n 'theme_url': None, 'javascript_url': None, 'javascript_in_head': False,\n 'include_jquery': False, 'horizontal_label_class': 'col-md-3',\n 'horizontal_field_class': 'col-md-9', 'set_required': True,\n 'set_disabled': False, 'set_placeholder': True, 'required_css_class':\n '', 'error_css_class': 'has-error', 'success_css_class': 'has-success',\n 'formset_renderers': {'default': 'bootstrap3.renderers.FormsetRenderer'\n }, 'form_renderers': {'default': 'bootstrap3.renderers.FormRenderer'},\n 'field_renderers': {'default': 'bootstrap3.renderers.FieldRenderer',\n 'inline': 'bootstrap3.renderers.InlineFieldRenderer'}}\nAXES_LOGIN_FAILURE_LIMIT = 3\nAXES_LOCK_OUT_AT_FAILURE = True\nAXES_USE_USER_AGENT = True\nAXES_COOLOFF_TIME = 50\nAXES_LOCKOUT_TEMPLATE = 'axes.watch_login'\nAXES_LOCKOUT_TEMPLATE = None\nAXES_LOCKOUT_URL = None\nAXES_VERBOSE = True\nAXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\nADMINS = ('Petar Pilipovic', '[email protected]'),\nREST_FRAMEWORK = {'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly']}\n",
"step-4": "from .settings import *\nimport dj_database_url\nDATABASES = {'default': dj_database_url.config()}\nSECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'\ntry:\n from .local_settings import *\nexcept Exception as e:\n pass\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_EMAIL_VERIFICATION = 'none'\nACCOUNT_LOGOUT_ON_GET = False\nSOCIALACCOUNT_QUERY_EMAIL = True\nSOCIALACCOUNT_PROVIDERS = {'facebook': {'METHOD': 'js_sdk', 'SCOPE': [\n 'email', 'public_profile', 'user_friends'], 'AUTH_PARAMS': {'auth_type':\n 'reauthenticate'}, 'FIELDS': ['first_name', 'last_name', 'email',\n 'birthday'], 'EXCHANGE_TOKEN': True, 'LOCALE_FUNC': 'path.to.callable',\n 'VERIFIED_EMAIL': False, 'VERSION': 'v2.7'}, 'linkedin': {'SCOPE': [\n 'r_emailaddress'], 'PROFILE_FIELDS': ['id', 'first-name', 'last-name',\n 'email-address', 'public-profile-url']}}\nLOGIN_REDIRECT_URL = '/blog/jobs'\nBOOTSTRAP3 = {'jquery_url': '//code.jquery.com/jquery.min.js', 'base_url':\n '//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/', 'css_url': None,\n 'theme_url': None, 'javascript_url': None, 'javascript_in_head': False,\n 'include_jquery': False, 'horizontal_label_class': 'col-md-3',\n 'horizontal_field_class': 'col-md-9', 'set_required': True,\n 'set_disabled': False, 'set_placeholder': True, 'required_css_class':\n '', 'error_css_class': 'has-error', 'success_css_class': 'has-success',\n 'formset_renderers': {'default': 'bootstrap3.renderers.FormsetRenderer'\n }, 'form_renderers': {'default': 'bootstrap3.renderers.FormRenderer'},\n 'field_renderers': {'default': 'bootstrap3.renderers.FieldRenderer',\n 'inline': 'bootstrap3.renderers.InlineFieldRenderer'}}\nAXES_LOGIN_FAILURE_LIMIT = 3\nAXES_LOCK_OUT_AT_FAILURE = True\nAXES_USE_USER_AGENT = True\nAXES_COOLOFF_TIME = 50\nAXES_LOCKOUT_TEMPLATE = 'axes.watch_login'\nAXES_LOCKOUT_TEMPLATE = None\nAXES_LOCKOUT_URL = None\nAXES_VERBOSE = True\nAXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\nADMINS = ('Petar Pilipovic', '[email protected]'),\nREST_FRAMEWORK = {'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly']}\n",
"step-5": "from .settings import *\n\n\n\n# Heroku Configurations\n# Parse database configuration from $DATABASE_URL\nimport dj_database_url\n\nDATABASES = {'default': dj_database_url.config()}\n\n# Honor the 'X-Forwarded-Proto' header for request.is_secure()\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n# loading local_settings.py\ntry:\n from .local_settings import *\nexcept Exception as e:\n pass\n\n# ALLAUTH configuration\n\n\n# Specific the login method to use\nACCOUNT_USERNAME_REQUIRED = False\n# ACCOUNT_AUTHENTICATION_METHOD = \"username\", \"email\", \"username_email\"\n\n# Determines the e-mail verification method during signup – choose one of “mandatory”, “optional”, or “none”.\n# When set to “mandatory” the user is blocked from logging in until the email address is verified.\n# Choose “optional” or “none” to allow logins with an unverified e-mail address.\n# In case of “optional”, the e-mail verification mail is still sent,\n# whereas in case of “none” no e-mail verification mails are sent.\nACCOUNT_EMAIL_VERIFICATION = \"none\"\n\n# Determines whether or not the user is automatically logged out by a mere GET request.\n# See documentation for the LogoutView for details.\nACCOUNT_LOGOUT_ON_GET = False\n\n# Request e-mail address from 3rd import party account provider?\n# E.g. using OpenID AX, or the Facebook “email” permission.\nSOCIALACCOUNT_QUERY_EMAIL = True\n\n# Dictionary containing provider specific settings.\nSOCIALACCOUNT_PROVIDERS = {\n 'facebook': {\n # we use facebook js_sdk instead od oauth2\n 'METHOD': 'js_sdk',\n 'SCOPE': ['email', 'public_profile', 'user_friends'],\n # using AUTH_PARAMS to pass along other parametees\n # to the FB.login JS SDK call\n 'AUTH_PARAMS': {'auth_type': 'reauthenticate'},\n # field are fetch from the import Graph API\n 'FIELDS': ['first_name', 'last_name', 'email', 'birthday'],\n # JS SDK return a short-lived token suitable for client-side use.\n 'EXCHANGE_TOKEN': True,\n # Chose the current active language of the request\n 'LOCALE_FUNC': 'path.to.callable',\n 'VERIFIED_EMAIL': False,\n # Facebook Graph API version\n 'VERSION': 'v2.7'\n },\n 'linkedin': {\n 'SCOPE': ['r_emailaddress'],\n 'PROFILE_FIELDS': [\n 'id',\n 'first-name',\n 'last-name',\n 'email-address',\n 'public-profile-url'\n ]\n }\n}\n\n# login redirect url\nLOGIN_REDIRECT_URL = \"/blog/jobs\"\n\n# Default settings\nBOOTSTRAP3 = {\n\n # The URL to the jQuery JavaScript file\n 'jquery_url': '//code.jquery.com/jquery.min.js',\n\n # The Bootstrap base URL\n 'base_url': '//maxcdn.bootstrapcdn.com/bootstrap/3.3.7/',\n\n # The complete URL to the Bootstrap CSS file (None means derive it from base_url)\n 'css_url': None,\n\n # The complete URL to the Bootstrap CSS file (None means no theme)\n 'theme_url': None,\n\n # The complete URL to the Bootstrap JavaScript file (None means derive it from base_url)\n 'javascript_url': None,\n\n # Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html)\n 'javascript_in_head': False,\n\n # Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags)\n 'include_jquery': False,\n\n # Label class to use in horizontal forms\n 'horizontal_label_class': 'col-md-3',\n\n # Field class to use in horizontal forms\n 'horizontal_field_class': 'col-md-9',\n\n # Set HTML required attribute on required fields\n 'set_required': True,\n\n # Set HTML disabled attribute on disabled fields\n 'set_disabled': False,\n\n # Set placeholder attributes to label if no placeholder is provided\n 'set_placeholder': True,\n\n # Class to indicate required (better to set this in your Django form)\n 'required_css_class': '',\n\n # Class to indicate error (better to set this in your Django form)\n 'error_css_class': 'has-error',\n\n # Class to indicate success, meaning the field has valid input (better to set this in your Django form)\n 'success_css_class': 'has-success',\n\n # Renderers (only set these if you have studied the source and understand the inner workings)\n 'formset_renderers':{\n 'default': 'bootstrap3.renderers.FormsetRenderer',\n },\n 'form_renderers': {\n 'default': 'bootstrap3.renderers.FormRenderer',\n },\n 'field_renderers': {\n 'default': 'bootstrap3.renderers.FieldRenderer',\n 'inline': 'bootstrap3.renderers.InlineFieldRenderer',\n },\n}\n\n# Axes Configurations\n# Number of login attempts allowed before a record is created for the failed logins.\nAXES_LOGIN_FAILURE_LIMIT = 3\n\n# After the number os allowed login attempts are exceeded, should we lock this IP (and optinal user agend)?\nAXES_LOCK_OUT_AT_FAILURE = True\n\n# If True, lock out / log based on an IP address AND a user agent. This means requests from different import user\n# agents but from the import same IP are treated differently.\nAXES_USE_USER_AGENT = True\n\n# Defines a period of inactivity after which old failed login attempts will be forgotten. You can set to a\n# python timedelta object or an integer, if you set it to be integer it will represent a number of hours\nAXES_COOLOFF_TIME = 50\n\n# Specifies a logging mechanism for axes to use\nAXES_LOCKOUT_TEMPLATE = 'axes.watch_login'\n\n# Specifies a template to render when a user is locked out. Template receives cooloff_time and failure_limit as\n# context variables\nAXES_LOCKOUT_TEMPLATE = None\n\n# Specifies a URL to redirect to on lockout. If both AXES_LOCKOUT_TEMPLATE and AXES_LOCKOUT_URL are set, the template\n# will be used\nAXES_LOCKOUT_URL = None\n\n# If Truem you'll see slightly more logging for Axes\nAXES_VERBOSE = True\n\n# The name of the for field that contains your usernames\n# AXES_USERNAME_FORM_FIELD = username\n\n# If True prevents to login from IP import under particular user if attempts limit exceed, otherwise lock out based on\n# IP. Default: False\nAXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False\n\n# Crispy forms will use BOOTSTRAP3 TEMPLATE PACK\nCRISPY_TEMPLATE_PACK = \"bootstrap3\"\n\n# Signal Admins Configurations\nADMINS = (\n (\"Petar Pilipovic\", \"[email protected]\"),\n)\n\n# RESTframework Permission classes configuration\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": [\n \"rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly\"\n ]\n}\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
K = input()
mat = "".join(raw_input() for i in xrange(4))
print ("YES", "NO")[max(mat.count(str(i)) for i in xrange(1, 10)) > K*2]
|
normal
|
{
"blob_id": "879f7503f7f427f92109024b4646d1dc7f15d63d",
"index": 2153,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('YES', 'NO')[max(mat.count(str(i)) for i in xrange(1, 10)) > K * 2]\n",
"step-3": "K = input()\nmat = ''.join(raw_input() for i in xrange(4))\nprint('YES', 'NO')[max(mat.count(str(i)) for i in xrange(1, 10)) > K * 2]\n",
"step-4": "K = input()\nmat = \"\".join(raw_input() for i in xrange(4))\nprint (\"YES\", \"NO\")[max(mat.count(str(i)) for i in xrange(1, 10)) > K*2]\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from typing import Any, Optional
from aiogram import types
from aiogram.dispatcher.middlewares import BaseMiddleware
from scene_manager.loader.loader import Loader
from scene_manager.utils import content_type_checker
class ScenesMiddleware(BaseMiddleware):
def __init__(self, *, loader: Optional[Loader] = None, default_scene_name: Optional[str] = None):
self._default_scene_name = default_scene_name or "start"
self._loader = loader or Loader.get_current()
if self._loader is None:
self._loader = Loader()
if not self._loader.is_scenes_loaded:
self._loader.load_scenes()
self._storage = self._loader.data_storage
super().__init__()
async def on_post_process_message(self, message: types.Message, results: tuple, data: dict):
if data:
return
user_scene_name = await self._get_scene_name(message)
for scene_model in self._loader.handlers_storage.get_message_scene(user_scene_name):
if content_type_checker(message, scene_model.config.get("content_types")):
await scene_model.handler(message)
else:
otherwise_handler = scene_model.config.get("otherwise_handler")
if otherwise_handler is not None:
await otherwise_handler(message)
async def on_post_process_callback_query(
self, callback_query: types.CallbackQuery, results: tuple, data: dict
):
if data:
return
user_scene_name = await self._get_scene_name(callback_query)
for scene_model in self._loader.handlers_storage.get_callback_query_scene(user_scene_name):
await scene_model.handler(callback_query)
async def _get_scene_name(self, ctx) -> Any:
user_id = ctx.from_user.id
user_scene = await self._storage.get(user_id)
if user_scene is None:
await self._storage.put(user_id, self._default_scene_name)
user_scene = self._default_scene_name
return user_scene
|
normal
|
{
"blob_id": "11db76cba3dd76cad0d660a0e189d3e4c465071b",
"index": 8836,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ScenesMiddleware(BaseMiddleware):\n <mask token>\n\n async def on_post_process_message(self, message: types.Message, results:\n tuple, data: dict):\n if data:\n return\n user_scene_name = await self._get_scene_name(message)\n for scene_model in self._loader.handlers_storage.get_message_scene(\n user_scene_name):\n if content_type_checker(message, scene_model.config.get(\n 'content_types')):\n await scene_model.handler(message)\n else:\n otherwise_handler = scene_model.config.get('otherwise_handler')\n if otherwise_handler is not None:\n await otherwise_handler(message)\n\n async def on_post_process_callback_query(self, callback_query: types.\n CallbackQuery, results: tuple, data: dict):\n if data:\n return\n user_scene_name = await self._get_scene_name(callback_query)\n for scene_model in self._loader.handlers_storage.get_callback_query_scene(\n user_scene_name):\n await scene_model.handler(callback_query)\n\n async def _get_scene_name(self, ctx) ->Any:\n user_id = ctx.from_user.id\n user_scene = await self._storage.get(user_id)\n if user_scene is None:\n await self._storage.put(user_id, self._default_scene_name)\n user_scene = self._default_scene_name\n return user_scene\n",
"step-3": "<mask token>\n\n\nclass ScenesMiddleware(BaseMiddleware):\n\n def __init__(self, *, loader: Optional[Loader]=None, default_scene_name:\n Optional[str]=None):\n self._default_scene_name = default_scene_name or 'start'\n self._loader = loader or Loader.get_current()\n if self._loader is None:\n self._loader = Loader()\n if not self._loader.is_scenes_loaded:\n self._loader.load_scenes()\n self._storage = self._loader.data_storage\n super().__init__()\n\n async def on_post_process_message(self, message: types.Message, results:\n tuple, data: dict):\n if data:\n return\n user_scene_name = await self._get_scene_name(message)\n for scene_model in self._loader.handlers_storage.get_message_scene(\n user_scene_name):\n if content_type_checker(message, scene_model.config.get(\n 'content_types')):\n await scene_model.handler(message)\n else:\n otherwise_handler = scene_model.config.get('otherwise_handler')\n if otherwise_handler is not None:\n await otherwise_handler(message)\n\n async def on_post_process_callback_query(self, callback_query: types.\n CallbackQuery, results: tuple, data: dict):\n if data:\n return\n user_scene_name = await self._get_scene_name(callback_query)\n for scene_model in self._loader.handlers_storage.get_callback_query_scene(\n user_scene_name):\n await scene_model.handler(callback_query)\n\n async def _get_scene_name(self, ctx) ->Any:\n user_id = ctx.from_user.id\n user_scene = await self._storage.get(user_id)\n if user_scene is None:\n await self._storage.put(user_id, self._default_scene_name)\n user_scene = self._default_scene_name\n return user_scene\n",
"step-4": "from typing import Any, Optional\nfrom aiogram import types\nfrom aiogram.dispatcher.middlewares import BaseMiddleware\nfrom scene_manager.loader.loader import Loader\nfrom scene_manager.utils import content_type_checker\n\n\nclass ScenesMiddleware(BaseMiddleware):\n\n def __init__(self, *, loader: Optional[Loader]=None, default_scene_name:\n Optional[str]=None):\n self._default_scene_name = default_scene_name or 'start'\n self._loader = loader or Loader.get_current()\n if self._loader is None:\n self._loader = Loader()\n if not self._loader.is_scenes_loaded:\n self._loader.load_scenes()\n self._storage = self._loader.data_storage\n super().__init__()\n\n async def on_post_process_message(self, message: types.Message, results:\n tuple, data: dict):\n if data:\n return\n user_scene_name = await self._get_scene_name(message)\n for scene_model in self._loader.handlers_storage.get_message_scene(\n user_scene_name):\n if content_type_checker(message, scene_model.config.get(\n 'content_types')):\n await scene_model.handler(message)\n else:\n otherwise_handler = scene_model.config.get('otherwise_handler')\n if otherwise_handler is not None:\n await otherwise_handler(message)\n\n async def on_post_process_callback_query(self, callback_query: types.\n CallbackQuery, results: tuple, data: dict):\n if data:\n return\n user_scene_name = await self._get_scene_name(callback_query)\n for scene_model in self._loader.handlers_storage.get_callback_query_scene(\n user_scene_name):\n await scene_model.handler(callback_query)\n\n async def _get_scene_name(self, ctx) ->Any:\n user_id = ctx.from_user.id\n user_scene = await self._storage.get(user_id)\n if user_scene is None:\n await self._storage.put(user_id, self._default_scene_name)\n user_scene = self._default_scene_name\n return user_scene\n",
"step-5": "from typing import Any, Optional\n\nfrom aiogram import types\nfrom aiogram.dispatcher.middlewares import BaseMiddleware\n\nfrom scene_manager.loader.loader import Loader\nfrom scene_manager.utils import content_type_checker\n\n\nclass ScenesMiddleware(BaseMiddleware):\n def __init__(self, *, loader: Optional[Loader] = None, default_scene_name: Optional[str] = None):\n self._default_scene_name = default_scene_name or \"start\"\n self._loader = loader or Loader.get_current()\n if self._loader is None:\n self._loader = Loader()\n if not self._loader.is_scenes_loaded:\n self._loader.load_scenes()\n self._storage = self._loader.data_storage\n super().__init__()\n\n async def on_post_process_message(self, message: types.Message, results: tuple, data: dict):\n if data:\n return\n user_scene_name = await self._get_scene_name(message)\n for scene_model in self._loader.handlers_storage.get_message_scene(user_scene_name):\n if content_type_checker(message, scene_model.config.get(\"content_types\")):\n await scene_model.handler(message)\n else:\n otherwise_handler = scene_model.config.get(\"otherwise_handler\")\n if otherwise_handler is not None:\n await otherwise_handler(message)\n\n async def on_post_process_callback_query(\n self, callback_query: types.CallbackQuery, results: tuple, data: dict\n ):\n if data:\n return\n user_scene_name = await self._get_scene_name(callback_query)\n for scene_model in self._loader.handlers_storage.get_callback_query_scene(user_scene_name):\n await scene_model.handler(callback_query)\n\n async def _get_scene_name(self, ctx) -> Any:\n user_id = ctx.from_user.id\n user_scene = await self._storage.get(user_id)\n if user_scene is None:\n await self._storage.put(user_id, self._default_scene_name)\n user_scene = self._default_scene_name\n return user_scene\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
Encontrar el valor mas alto el mas rapido, el mas lento
para eso son los algoritmos de optimizacion
Para eso debemos pensar en una funcion que queramos maximizar o minimizar
Se aplican mas que todo para empresas como despegar, en donde se pueden generar buenas empresas
Empresas a la optimizacion
#############################################33
Traveling Sales Man
Cual es la ruta mas eficiente para recorrer todas las ciudades
Resolver el algoritmo de sales man
Turing Prize
'''
|
normal
|
{
"blob_id": "7163be250ae3a22931de037cb6896c2e6d5f00a8",
"index": 584,
"step-1": "<mask token>\n",
"step-2": "'''\n Encontrar el valor mas alto el mas rapido, el mas lento\n para eso son los algoritmos de optimizacion\n Para eso debemos pensar en una funcion que queramos maximizar o minimizar\n Se aplican mas que todo para empresas como despegar, en donde se pueden generar buenas empresas\n Empresas a la optimizacion \n #############################################33\n Traveling Sales Man\n Cual es la ruta mas eficiente para recorrer todas las ciudades\n Resolver el algoritmo de sales man \n Turing Prize\n'''\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .authority import *
from .ca_pool import *
from .ca_pool_iam_binding import *
from .ca_pool_iam_member import *
from .ca_pool_iam_policy import *
from .certificate import *
from .certificate_template import *
from .certificate_template_iam_binding import *
from .certificate_template_iam_member import *
from .certificate_template_iam_policy import *
from .get_authority import *
from .get_ca_pool_iam_policy import *
from .get_certificate_template_iam_policy import *
from ._inputs import *
from . import outputs
|
normal
|
{
"blob_id": "4ca4d4bd684802b056417be4ee3d7d10e8f5dc85",
"index": 8842,
"step-1": "<mask token>\n",
"step-2": "from .. import _utilities\nimport typing\nfrom .authority import *\nfrom .ca_pool import *\nfrom .ca_pool_iam_binding import *\nfrom .ca_pool_iam_member import *\nfrom .ca_pool_iam_policy import *\nfrom .certificate import *\nfrom .certificate_template import *\nfrom .certificate_template_iam_binding import *\nfrom .certificate_template_iam_member import *\nfrom .certificate_template_iam_policy import *\nfrom .get_authority import *\nfrom .get_ca_pool_iam_policy import *\nfrom .get_certificate_template_iam_policy import *\nfrom ._inputs import *\nfrom . import outputs\n",
"step-3": "# coding=utf-8\n# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nfrom .. import _utilities\nimport typing\n# Export this package's modules as members:\nfrom .authority import *\nfrom .ca_pool import *\nfrom .ca_pool_iam_binding import *\nfrom .ca_pool_iam_member import *\nfrom .ca_pool_iam_policy import *\nfrom .certificate import *\nfrom .certificate_template import *\nfrom .certificate_template_iam_binding import *\nfrom .certificate_template_iam_member import *\nfrom .certificate_template_iam_policy import *\nfrom .get_authority import *\nfrom .get_ca_pool_iam_policy import *\nfrom .get_certificate_template_iam_policy import *\nfrom ._inputs import *\nfrom . import outputs\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import flask
import flask_sqlalchemy
app = flask.Flask(__name__)
app.config.from_pyfile('settings.py')
db = flask_sqlalchemy.SQLAlchemy(app)
|
normal
|
{
"blob_id": "2ed0ae48e8fec2c92effcbb3e495a1a9f4636c27",
"index": 6777,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp.config.from_pyfile('settings.py')\n<mask token>\n",
"step-3": "<mask token>\napp = flask.Flask(__name__)\napp.config.from_pyfile('settings.py')\ndb = flask_sqlalchemy.SQLAlchemy(app)\n",
"step-4": "import flask\nimport flask_sqlalchemy\napp = flask.Flask(__name__)\napp.config.from_pyfile('settings.py')\ndb = flask_sqlalchemy.SQLAlchemy(app)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import re
import datetime
from django import forms
from django.utils.translation import ugettext as _
from vcg.util.forms import mobile_number_validation
from vcg.company_management.models import ConfigurationContact, ConfigurationLogo, ConfigurationHomepage, ConfigurationLocation
class ConfigurationContactForm(forms.ModelForm):
class Meta:
model = ConfigurationContact
def __init__(self, *args, **kwargs):
super(ConfigurationContactForm, self).__init__(*args, **kwargs)
self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'
self.fields['name_of_institution'].widget.attrs['class'] = 'form-text'
self.fields['email_external'].widget.attrs['class'] = 'form-text'
self.fields['country_code_external'].widget.attrs['class'] = 'form-text-small'
self.fields['phone_number_external'].widget.attrs['class'] = 'form-text-phone'
self.fields['email_internal'].widget.attrs['class'] = 'form-text'
self.fields['country_code_internal'].widget.attrs['class'] = 'form-text-small'
self.fields['phone_number_internal'].widget.attrs['class'] = 'form-text-phone'
if 'instance' in kwargs:
self.id = kwargs['instance'].id
else:
self.id = ""
def clean(self):
phone_number_external = self.cleaned_data.get("phone_number_external")
country_code_external = self.cleaned_data.get("country_code_external")
phone_number_internal = self.cleaned_data.get("phone_number_internal")
country_code_internal = self.cleaned_data.get("country_code_internal")
if phone_number_external and not country_code_external:
raise forms.ValidationError(_('External Country code Field is required .'))
if country_code_external and not phone_number_external:
raise forms.ValidationError(_('External Phone Number Field is required .'))
if phone_number_internal and not country_code_internal:
raise forms.ValidationError(_('Internal Country code Field is required .'))
if country_code_internal and not phone_number_internal:
raise forms.ValidationError(_('Internal Phone Number Field is required .'))
return self.cleaned_data
def clean_name_of_institution(self):
name_of_institution = self.cleaned_data['name_of_institution']
if name_of_institution:
if len(name_of_institution) < 3:
raise forms.ValidationError(_('Enter minimum 3 characters.'))
elif re.match(r'^[\s]*$', name_of_institution):
raise forms.ValidationError(_("Enter a valid name."))
return name_of_institution
def clean_country_code_external(self):
country_code_external = self.cleaned_data['country_code_external']
if country_code_external:
if len(str(country_code_external)) > 5:
raise forms.ValidationError(_('maximum 5 characters.'))
return country_code_external
def clean_phone_number_external(self):
phone_number_external = self.cleaned_data['phone_number_external']
if phone_number_external:
phone_number_external = mobile_number_validation(phone_number_external)
if not phone_number_external:
raise forms.ValidationError(_("Enter a valid contact number"))
return phone_number_external
def clean_country_code_internal(self):
country_code_internal = self.cleaned_data['country_code_internal']
if country_code_internal:
if len(str(country_code_internal)) > 5:
raise forms.ValidationError(_('maximum 5 characters.'))
return country_code_internal
def clean_phone_number_internal(self):
phone_number_internal = self.cleaned_data['phone_number_internal']
if phone_number_internal:
phone_number_internal = mobile_number_validation(phone_number_internal)
if not phone_number_internal:
raise forms.ValidationError(_("Enter a valid contact number"))
return phone_number_internal
class ConfigurationLogoForm(forms.ModelForm):
class Meta:
model = ConfigurationLogo
def __init__(self, *args, **kwargs):
super(ConfigurationLogoForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs:
self.id = kwargs['instance'].id
else:
self.id = ""
class ConfigurationHomepageForm(forms.ModelForm):
class Meta:
model = ConfigurationHomepage
def __init__(self, *args, **kwargs):
super(ConfigurationHomepageForm, self).__init__(*args, **kwargs)
self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'
self.fields['header'].widget.attrs['class'] = 'form-text'
self.fields['introduction'].widget.attrs['class'] = 'form-textarea'
if 'instance' in kwargs:
self.id = kwargs['instance'].id
else:
self.id = ""
def clean_header(self):
header = self.cleaned_data['header']
if header:
if len(header) < 3:
raise forms.ValidationError(_('Enter minimum 3 characters.'))
elif re.match(r'^[\s]*$', header):
raise forms.ValidationError(_("Enter a valid name."))
return header
def clean_introduction(self):
introduction = self.cleaned_data['introduction']
if introduction:
if len(introduction) < 10:
raise forms.ValidationError(_('Enter minimum 10 characters.'))
elif re.match(r'^[\s]*$', introduction):
raise forms.ValidationError(_("Enter a valid address."))
return introduction
class ConfigurationLocationForm(forms.ModelForm):
class Meta:
model = ConfigurationLocation
def __init__(self, *args, **kwargs):
super(ConfigurationLocationForm, self).__init__(*args, **kwargs)
self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'
self.fields['country'].widget.attrs['class'] = 'form-dropdownfield'
self.fields['continent'].widget.attrs['class'] = 'form-dropdownfield'
if 'instance' in kwargs:
self.id = kwargs['instance'].id
else:
self.id = ""
|
normal
|
{
"blob_id": "f6f1cd95e4aaa5e434c3cf3cff0d46b45fc7b830",
"index": 6190,
"step-1": "<mask token>\n\n\nclass ConfigurationContactForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationContact\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def clean_phone_number_external(self):\n phone_number_external = self.cleaned_data['phone_number_external']\n if phone_number_external:\n phone_number_external = mobile_number_validation(\n phone_number_external)\n if not phone_number_external:\n raise forms.ValidationError(_('Enter a valid contact number'))\n return phone_number_external\n <mask token>\n\n def clean_phone_number_internal(self):\n phone_number_internal = self.cleaned_data['phone_number_internal']\n if phone_number_internal:\n phone_number_internal = mobile_number_validation(\n phone_number_internal)\n if not phone_number_internal:\n raise forms.ValidationError(_('Enter a valid contact number'))\n return phone_number_internal\n\n\nclass ConfigurationLogoForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationLogo\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationLogoForm, self).__init__(*args, **kwargs)\n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = ''\n\n\nclass ConfigurationHomepageForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationHomepage\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationHomepageForm, self).__init__(*args, **kwargs)\n self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['header'].widget.attrs['class'] = 'form-text'\n self.fields['introduction'].widget.attrs['class'] = 'form-textarea'\n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = ''\n\n def clean_header(self):\n header = self.cleaned_data['header']\n if header:\n if len(header) < 3:\n raise forms.ValidationError(_('Enter minimum 3 characters.'))\n elif re.match('^[\\\\s]*$', header):\n raise forms.ValidationError(_('Enter a valid name.'))\n return header\n\n def clean_introduction(self):\n introduction = self.cleaned_data['introduction']\n if introduction:\n if len(introduction) < 10:\n raise forms.ValidationError(_('Enter minimum 10 characters.'))\n elif re.match('^[\\\\s]*$', introduction):\n raise forms.ValidationError(_('Enter a valid address.'))\n return introduction\n\n\nclass ConfigurationLocationForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationLocation\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationLocationForm, self).__init__(*args, **kwargs)\n self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['country'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['continent'].widget.attrs['class'] = 'form-dropdownfield'\n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = ''\n",
"step-2": "<mask token>\n\n\nclass ConfigurationContactForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationContact\n <mask token>\n <mask token>\n\n def clean_name_of_institution(self):\n name_of_institution = self.cleaned_data['name_of_institution']\n if name_of_institution:\n if len(name_of_institution) < 3:\n raise forms.ValidationError(_('Enter minimum 3 characters.'))\n elif re.match('^[\\\\s]*$', name_of_institution):\n raise forms.ValidationError(_('Enter a valid name.'))\n return name_of_institution\n\n def clean_country_code_external(self):\n country_code_external = self.cleaned_data['country_code_external']\n if country_code_external:\n if len(str(country_code_external)) > 5:\n raise forms.ValidationError(_('maximum 5 characters.'))\n return country_code_external\n\n def clean_phone_number_external(self):\n phone_number_external = self.cleaned_data['phone_number_external']\n if phone_number_external:\n phone_number_external = mobile_number_validation(\n phone_number_external)\n if not phone_number_external:\n raise forms.ValidationError(_('Enter a valid contact number'))\n return phone_number_external\n <mask token>\n\n def clean_phone_number_internal(self):\n phone_number_internal = self.cleaned_data['phone_number_internal']\n if phone_number_internal:\n phone_number_internal = mobile_number_validation(\n phone_number_internal)\n if not phone_number_internal:\n raise forms.ValidationError(_('Enter a valid contact number'))\n return phone_number_internal\n\n\nclass ConfigurationLogoForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationLogo\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationLogoForm, self).__init__(*args, **kwargs)\n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = ''\n\n\nclass ConfigurationHomepageForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationHomepage\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationHomepageForm, self).__init__(*args, **kwargs)\n self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['header'].widget.attrs['class'] = 'form-text'\n self.fields['introduction'].widget.attrs['class'] = 'form-textarea'\n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = ''\n\n def clean_header(self):\n header = self.cleaned_data['header']\n if header:\n if len(header) < 3:\n raise forms.ValidationError(_('Enter minimum 3 characters.'))\n elif re.match('^[\\\\s]*$', header):\n raise forms.ValidationError(_('Enter a valid name.'))\n return header\n\n def clean_introduction(self):\n introduction = self.cleaned_data['introduction']\n if introduction:\n if len(introduction) < 10:\n raise forms.ValidationError(_('Enter minimum 10 characters.'))\n elif re.match('^[\\\\s]*$', introduction):\n raise forms.ValidationError(_('Enter a valid address.'))\n return introduction\n\n\nclass ConfigurationLocationForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationLocation\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationLocationForm, self).__init__(*args, **kwargs)\n self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['country'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['continent'].widget.attrs['class'] = 'form-dropdownfield'\n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = ''\n",
"step-3": "<mask token>\n\n\nclass ConfigurationContactForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationContact\n <mask token>\n\n def clean(self):\n phone_number_external = self.cleaned_data.get('phone_number_external')\n country_code_external = self.cleaned_data.get('country_code_external')\n phone_number_internal = self.cleaned_data.get('phone_number_internal')\n country_code_internal = self.cleaned_data.get('country_code_internal')\n if phone_number_external and not country_code_external:\n raise forms.ValidationError(_(\n 'External Country code Field is required .'))\n if country_code_external and not phone_number_external:\n raise forms.ValidationError(_(\n 'External Phone Number Field is required .'))\n if phone_number_internal and not country_code_internal:\n raise forms.ValidationError(_(\n 'Internal Country code Field is required .'))\n if country_code_internal and not phone_number_internal:\n raise forms.ValidationError(_(\n 'Internal Phone Number Field is required .'))\n return self.cleaned_data\n\n def clean_name_of_institution(self):\n name_of_institution = self.cleaned_data['name_of_institution']\n if name_of_institution:\n if len(name_of_institution) < 3:\n raise forms.ValidationError(_('Enter minimum 3 characters.'))\n elif re.match('^[\\\\s]*$', name_of_institution):\n raise forms.ValidationError(_('Enter a valid name.'))\n return name_of_institution\n\n def clean_country_code_external(self):\n country_code_external = self.cleaned_data['country_code_external']\n if country_code_external:\n if len(str(country_code_external)) > 5:\n raise forms.ValidationError(_('maximum 5 characters.'))\n return country_code_external\n\n def clean_phone_number_external(self):\n phone_number_external = self.cleaned_data['phone_number_external']\n if phone_number_external:\n phone_number_external = mobile_number_validation(\n phone_number_external)\n if not phone_number_external:\n raise forms.ValidationError(_('Enter a valid contact number'))\n return phone_number_external\n\n def clean_country_code_internal(self):\n country_code_internal = self.cleaned_data['country_code_internal']\n if country_code_internal:\n if len(str(country_code_internal)) > 5:\n raise forms.ValidationError(_('maximum 5 characters.'))\n return country_code_internal\n\n def clean_phone_number_internal(self):\n phone_number_internal = self.cleaned_data['phone_number_internal']\n if phone_number_internal:\n phone_number_internal = mobile_number_validation(\n phone_number_internal)\n if not phone_number_internal:\n raise forms.ValidationError(_('Enter a valid contact number'))\n return phone_number_internal\n\n\nclass ConfigurationLogoForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationLogo\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationLogoForm, self).__init__(*args, **kwargs)\n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = ''\n\n\nclass ConfigurationHomepageForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationHomepage\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationHomepageForm, self).__init__(*args, **kwargs)\n self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['header'].widget.attrs['class'] = 'form-text'\n self.fields['introduction'].widget.attrs['class'] = 'form-textarea'\n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = ''\n\n def clean_header(self):\n header = self.cleaned_data['header']\n if header:\n if len(header) < 3:\n raise forms.ValidationError(_('Enter minimum 3 characters.'))\n elif re.match('^[\\\\s]*$', header):\n raise forms.ValidationError(_('Enter a valid name.'))\n return header\n\n def clean_introduction(self):\n introduction = self.cleaned_data['introduction']\n if introduction:\n if len(introduction) < 10:\n raise forms.ValidationError(_('Enter minimum 10 characters.'))\n elif re.match('^[\\\\s]*$', introduction):\n raise forms.ValidationError(_('Enter a valid address.'))\n return introduction\n\n\nclass ConfigurationLocationForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationLocation\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationLocationForm, self).__init__(*args, **kwargs)\n self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['country'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['continent'].widget.attrs['class'] = 'form-dropdownfield'\n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = ''\n",
"step-4": "<mask token>\n\n\nclass ConfigurationContactForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationContact\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationContactForm, self).__init__(*args, **kwargs)\n self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['name_of_institution'].widget.attrs['class'] = 'form-text'\n self.fields['email_external'].widget.attrs['class'] = 'form-text'\n self.fields['country_code_external'].widget.attrs['class'\n ] = 'form-text-small'\n self.fields['phone_number_external'].widget.attrs['class'\n ] = 'form-text-phone'\n self.fields['email_internal'].widget.attrs['class'] = 'form-text'\n self.fields['country_code_internal'].widget.attrs['class'\n ] = 'form-text-small'\n self.fields['phone_number_internal'].widget.attrs['class'\n ] = 'form-text-phone'\n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = ''\n\n def clean(self):\n phone_number_external = self.cleaned_data.get('phone_number_external')\n country_code_external = self.cleaned_data.get('country_code_external')\n phone_number_internal = self.cleaned_data.get('phone_number_internal')\n country_code_internal = self.cleaned_data.get('country_code_internal')\n if phone_number_external and not country_code_external:\n raise forms.ValidationError(_(\n 'External Country code Field is required .'))\n if country_code_external and not phone_number_external:\n raise forms.ValidationError(_(\n 'External Phone Number Field is required .'))\n if phone_number_internal and not country_code_internal:\n raise forms.ValidationError(_(\n 'Internal Country code Field is required .'))\n if country_code_internal and not phone_number_internal:\n raise forms.ValidationError(_(\n 'Internal Phone Number Field is required .'))\n return self.cleaned_data\n\n def clean_name_of_institution(self):\n name_of_institution = self.cleaned_data['name_of_institution']\n if name_of_institution:\n if len(name_of_institution) < 3:\n raise forms.ValidationError(_('Enter minimum 3 characters.'))\n elif re.match('^[\\\\s]*$', name_of_institution):\n raise forms.ValidationError(_('Enter a valid name.'))\n return name_of_institution\n\n def clean_country_code_external(self):\n country_code_external = self.cleaned_data['country_code_external']\n if country_code_external:\n if len(str(country_code_external)) > 5:\n raise forms.ValidationError(_('maximum 5 characters.'))\n return country_code_external\n\n def clean_phone_number_external(self):\n phone_number_external = self.cleaned_data['phone_number_external']\n if phone_number_external:\n phone_number_external = mobile_number_validation(\n phone_number_external)\n if not phone_number_external:\n raise forms.ValidationError(_('Enter a valid contact number'))\n return phone_number_external\n\n def clean_country_code_internal(self):\n country_code_internal = self.cleaned_data['country_code_internal']\n if country_code_internal:\n if len(str(country_code_internal)) > 5:\n raise forms.ValidationError(_('maximum 5 characters.'))\n return country_code_internal\n\n def clean_phone_number_internal(self):\n phone_number_internal = self.cleaned_data['phone_number_internal']\n if phone_number_internal:\n phone_number_internal = mobile_number_validation(\n phone_number_internal)\n if not phone_number_internal:\n raise forms.ValidationError(_('Enter a valid contact number'))\n return phone_number_internal\n\n\nclass ConfigurationLogoForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationLogo\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationLogoForm, self).__init__(*args, **kwargs)\n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = ''\n\n\nclass ConfigurationHomepageForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationHomepage\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationHomepageForm, self).__init__(*args, **kwargs)\n self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['header'].widget.attrs['class'] = 'form-text'\n self.fields['introduction'].widget.attrs['class'] = 'form-textarea'\n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = ''\n\n def clean_header(self):\n header = self.cleaned_data['header']\n if header:\n if len(header) < 3:\n raise forms.ValidationError(_('Enter minimum 3 characters.'))\n elif re.match('^[\\\\s]*$', header):\n raise forms.ValidationError(_('Enter a valid name.'))\n return header\n\n def clean_introduction(self):\n introduction = self.cleaned_data['introduction']\n if introduction:\n if len(introduction) < 10:\n raise forms.ValidationError(_('Enter minimum 10 characters.'))\n elif re.match('^[\\\\s]*$', introduction):\n raise forms.ValidationError(_('Enter a valid address.'))\n return introduction\n\n\nclass ConfigurationLocationForm(forms.ModelForm):\n\n\n class Meta:\n model = ConfigurationLocation\n\n def __init__(self, *args, **kwargs):\n super(ConfigurationLocationForm, self).__init__(*args, **kwargs)\n self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['country'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['continent'].widget.attrs['class'] = 'form-dropdownfield'\n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = ''\n",
"step-5": "import re \nimport datetime\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _\n\nfrom vcg.util.forms import mobile_number_validation\nfrom vcg.company_management.models import ConfigurationContact, ConfigurationLogo, ConfigurationHomepage, ConfigurationLocation\n\nclass ConfigurationContactForm(forms.ModelForm):\n class Meta:\n model = ConfigurationContact\n \n def __init__(self, *args, **kwargs):\n super(ConfigurationContactForm, self).__init__(*args, **kwargs)\n \n self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['name_of_institution'].widget.attrs['class'] = 'form-text'\n self.fields['email_external'].widget.attrs['class'] = 'form-text'\n self.fields['country_code_external'].widget.attrs['class'] = 'form-text-small'\n self.fields['phone_number_external'].widget.attrs['class'] = 'form-text-phone'\n self.fields['email_internal'].widget.attrs['class'] = 'form-text'\n self.fields['country_code_internal'].widget.attrs['class'] = 'form-text-small'\n self.fields['phone_number_internal'].widget.attrs['class'] = 'form-text-phone'\n\n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = \"\" \n def clean(self):\n phone_number_external = self.cleaned_data.get(\"phone_number_external\")\n country_code_external = self.cleaned_data.get(\"country_code_external\")\n \n phone_number_internal = self.cleaned_data.get(\"phone_number_internal\")\n country_code_internal = self.cleaned_data.get(\"country_code_internal\")\n \n if phone_number_external and not country_code_external:\n raise forms.ValidationError(_('External Country code Field is required .')) \n if country_code_external and not phone_number_external:\n raise forms.ValidationError(_('External Phone Number Field is required .')) \n\n if phone_number_internal and not country_code_internal:\n raise forms.ValidationError(_('Internal Country code Field is required .')) \n if country_code_internal and not phone_number_internal:\n raise forms.ValidationError(_('Internal Phone Number Field is required .')) \n \n return self.cleaned_data \n \n def clean_name_of_institution(self):\n name_of_institution = self.cleaned_data['name_of_institution']\n if name_of_institution:\n if len(name_of_institution) < 3:\n raise forms.ValidationError(_('Enter minimum 3 characters.'))\n elif re.match(r'^[\\s]*$', name_of_institution):\n raise forms.ValidationError(_(\"Enter a valid name.\"))\n return name_of_institution \n\n def clean_country_code_external(self):\n country_code_external = self.cleaned_data['country_code_external']\n if country_code_external:\n if len(str(country_code_external)) > 5:\n raise forms.ValidationError(_('maximum 5 characters.'))\n return country_code_external \n \n def clean_phone_number_external(self):\n phone_number_external = self.cleaned_data['phone_number_external']\n if phone_number_external:\n phone_number_external = mobile_number_validation(phone_number_external)\n if not phone_number_external:\n raise forms.ValidationError(_(\"Enter a valid contact number\"))\n return phone_number_external \n\n def clean_country_code_internal(self):\n country_code_internal = self.cleaned_data['country_code_internal']\n if country_code_internal:\n if len(str(country_code_internal)) > 5:\n raise forms.ValidationError(_('maximum 5 characters.'))\n return country_code_internal \n \n def clean_phone_number_internal(self):\n phone_number_internal = self.cleaned_data['phone_number_internal']\n if phone_number_internal:\n phone_number_internal = mobile_number_validation(phone_number_internal)\n if not phone_number_internal:\n raise forms.ValidationError(_(\"Enter a valid contact number\"))\n return phone_number_internal \n\nclass ConfigurationLogoForm(forms.ModelForm):\n class Meta:\n model = ConfigurationLogo\n \n def __init__(self, *args, **kwargs):\n super(ConfigurationLogoForm, self).__init__(*args, **kwargs)\n \n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = \"\" \n \nclass ConfigurationHomepageForm(forms.ModelForm):\n class Meta:\n model = ConfigurationHomepage\n \n def __init__(self, *args, **kwargs):\n super(ConfigurationHomepageForm, self).__init__(*args, **kwargs)\n \n self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['header'].widget.attrs['class'] = 'form-text'\n self.fields['introduction'].widget.attrs['class'] = 'form-textarea'\n \n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = \"\"\n\n def clean_header(self):\n header = self.cleaned_data['header']\n if header:\n if len(header) < 3:\n raise forms.ValidationError(_('Enter minimum 3 characters.'))\n elif re.match(r'^[\\s]*$', header):\n raise forms.ValidationError(_(\"Enter a valid name.\"))\n return header\n\n def clean_introduction(self):\n introduction = self.cleaned_data['introduction']\n if introduction:\n if len(introduction) < 10:\n raise forms.ValidationError(_('Enter minimum 10 characters.'))\n elif re.match(r'^[\\s]*$', introduction):\n raise forms.ValidationError(_(\"Enter a valid address.\"))\n return introduction \n \nclass ConfigurationLocationForm(forms.ModelForm):\n class Meta:\n model = ConfigurationLocation\n \n def __init__(self, *args, **kwargs):\n super(ConfigurationLocationForm, self).__init__(*args, **kwargs)\n \n self.fields['company'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['country'].widget.attrs['class'] = 'form-dropdownfield'\n self.fields['continent'].widget.attrs['class'] = 'form-dropdownfield'\n \n if 'instance' in kwargs:\n self.id = kwargs['instance'].id\n else:\n self.id = \"\" ",
"step-ids": [
11,
13,
15,
16,
18
]
}
|
[
11,
13,
15,
16,
18
] |
# -*- coding: utf-8 -*-
from django.shortcuts import render_to_response
from django.views.generic import TemplateView
from django.core.context_processors import csrf
from django.template import RequestContext
from django.views.generic import DetailView, ListView , CreateView , UpdateView , DeleteView , FormView , View
from .models import Contact
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse
from django.shortcuts import render_to_response
# Create your views here.
#def home(request):
# posts = Post.objects.all()
# contexto = {'posts' : ''}
# return render_to_response("home.html" , contexto)
class Home(TemplateView):
def get(self, request , *args , **kwargs):
return render_to_response('home.html')
class AddContact(CreateView):
model = Contact
success_url = reverse_lazy('home')
# return render_to_response("home.html" , contexto)
class ListContact(ListView):
model = Contact
|
normal
|
{
"blob_id": "8a3694f96203ae8d1e306e1c9a5a47bfe26abeb1",
"index": 5178,
"step-1": "<mask token>\n\n\nclass ListContact(ListView):\n model = Contact\n",
"step-2": "<mask token>\n\n\nclass AddContact(CreateView):\n model = Contact\n success_url = reverse_lazy('home')\n\n\nclass ListContact(ListView):\n model = Contact\n",
"step-3": "<mask token>\n\n\nclass Home(TemplateView):\n <mask token>\n\n\nclass AddContact(CreateView):\n model = Contact\n success_url = reverse_lazy('home')\n\n\nclass ListContact(ListView):\n model = Contact\n",
"step-4": "<mask token>\n\n\nclass Home(TemplateView):\n\n def get(self, request, *args, **kwargs):\n return render_to_response('home.html')\n\n\nclass AddContact(CreateView):\n model = Contact\n success_url = reverse_lazy('home')\n\n\nclass ListContact(ListView):\n model = Contact\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom django.shortcuts import render_to_response\nfrom django.views.generic import TemplateView\nfrom django.core.context_processors import csrf\nfrom django.template import RequestContext\nfrom django.views.generic import DetailView, ListView , CreateView , UpdateView , DeleteView , FormView , View\nfrom .models import Contact\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response\n\n# Create your views here.\n\n#def home(request):\n # posts = Post.objects.all()\n# contexto = {'posts' : ''}\n# return render_to_response(\"home.html\" , contexto)\n\n\n\nclass Home(TemplateView):\n def get(self, request , *args , **kwargs):\n return render_to_response('home.html')\n\n\nclass AddContact(CreateView):\n model = Contact\n success_url = reverse_lazy('home')\n # return render_to_response(\"home.html\" , contexto)\n\nclass ListContact(ListView):\n model = Contact\n\n",
"step-ids": [
2,
4,
5,
6,
8
]
}
|
[
2,
4,
5,
6,
8
] |
from django.db import models
import eav
from django.utils import timezone
class RiskType(models.Model):
"""A model class used for storing data
about risk types
"""
name = models.CharField(max_length=255)
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
eav.register(RiskType)
|
normal
|
{
"blob_id": "635b75bc12718bccdfb9d04a54476c93fa4685ce",
"index": 4661,
"step-1": "<mask token>\n\n\nclass RiskType(models.Model):\n <mask token>\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n\n class Meta:\n ordering = 'name',\n\n def __str__(self):\n return self.name\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RiskType(models.Model):\n \"\"\"A model class used for storing data\n about risk types\n \"\"\"\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n\n class Meta:\n ordering = 'name',\n\n def __str__(self):\n return self.name\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RiskType(models.Model):\n \"\"\"A model class used for storing data\n about risk types\n \"\"\"\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n\n class Meta:\n ordering = 'name',\n\n def __str__(self):\n return self.name\n\n\neav.register(RiskType)\n",
"step-4": "from django.db import models\nimport eav\nfrom django.utils import timezone\n\n\nclass RiskType(models.Model):\n \"\"\"A model class used for storing data\n about risk types\n \"\"\"\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n\n class Meta:\n ordering = 'name',\n\n def __str__(self):\n return self.name\n\n\neav.register(RiskType)\n",
"step-5": "from django.db import models\nimport eav\nfrom django.utils import timezone\n\n\nclass RiskType(models.Model):\n \"\"\"A model class used for storing data\n about risk types\n \"\"\"\n name = models.CharField(max_length=255)\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(auto_now=True)\n\n class Meta:\n ordering = ('name',)\n\n def __str__(self):\n return self.name\n\n\neav.register(RiskType)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
NUM_CLASSES = 31
AUDIO_SR = 16000
AUDIO_LENGTH = 16000
LIBROSA_AUDIO_LENGTH = 22050
EPOCHS = 25
categories = {
'stop': 0,
'nine': 1,
'off': 2,
'four': 3,
'right': 4,
'eight': 5,
'one': 6,
'bird': 7,
'dog': 8,
'no': 9,
'on': 10,
'seven': 11,
'cat': 12,
'left': 13,
'three': 14,
'tree': 15,
'bed': 16,
'zero': 17,
'happy': 18,
'sheila': 19,
'five': 20,
'down': 21,
'marvin': 22,
'six': 23,
'up': 24,
'wow': 25,
'house': 26,
'go': 27,
'yes': 28,
'two': 29,
'_background_noise_': 30,
}
inv_categories = {
0: 'stop',
1: 'nine',
2: 'off',
3: 'four',
4: 'right',
5: 'eight',
6: 'one',
7: 'bird',
8: 'dog',
9: 'no',
10: 'on',
11: 'seven',
12: 'cat',
13: 'left',
14: 'three',
15: 'tree',
16: 'bed',
17: 'zero',
18: 'happy',
19: 'sheila',
20: 'five',
21: 'down',
22: 'marvin',
23: 'six',
24: 'up',
25: 'wow',
26: 'house',
27: 'go',
28: 'yes',
29: 'two',
30: '_background_noise_'
}
# Marvin model
INPUT_SHAPE = (99, 40)
TARGET_SHAPE = (99, 40, 1)
PARSE_PARAMS = (0.025, 0.01, 40)
filters = [16, 32, 64, 128, 256]
DROPOUT = 0.25
KERNEL_SIZE = (3, 3)
POOL_SIZE = (2, 2)
DENSE_1 = 512
DENSE_2 = 256
BATCH_SIZE = 128
PATIENCE = 5
LEARNING_RATE = 0.001
|
normal
|
{
"blob_id": "6a9e18cde94258b01a37f459eceaac58118b4976",
"index": 5813,
"step-1": "<mask token>\n",
"step-2": "NUM_CLASSES = 31\nAUDIO_SR = 16000\nAUDIO_LENGTH = 16000\nLIBROSA_AUDIO_LENGTH = 22050\nEPOCHS = 25\ncategories = {'stop': 0, 'nine': 1, 'off': 2, 'four': 3, 'right': 4,\n 'eight': 5, 'one': 6, 'bird': 7, 'dog': 8, 'no': 9, 'on': 10, 'seven': \n 11, 'cat': 12, 'left': 13, 'three': 14, 'tree': 15, 'bed': 16, 'zero': \n 17, 'happy': 18, 'sheila': 19, 'five': 20, 'down': 21, 'marvin': 22,\n 'six': 23, 'up': 24, 'wow': 25, 'house': 26, 'go': 27, 'yes': 28, 'two':\n 29, '_background_noise_': 30}\ninv_categories = {(0): 'stop', (1): 'nine', (2): 'off', (3): 'four', (4):\n 'right', (5): 'eight', (6): 'one', (7): 'bird', (8): 'dog', (9): 'no',\n (10): 'on', (11): 'seven', (12): 'cat', (13): 'left', (14): 'three', (\n 15): 'tree', (16): 'bed', (17): 'zero', (18): 'happy', (19): 'sheila',\n (20): 'five', (21): 'down', (22): 'marvin', (23): 'six', (24): 'up', (\n 25): 'wow', (26): 'house', (27): 'go', (28): 'yes', (29): 'two', (30):\n '_background_noise_'}\nINPUT_SHAPE = 99, 40\nTARGET_SHAPE = 99, 40, 1\nPARSE_PARAMS = 0.025, 0.01, 40\nfilters = [16, 32, 64, 128, 256]\nDROPOUT = 0.25\nKERNEL_SIZE = 3, 3\nPOOL_SIZE = 2, 2\nDENSE_1 = 512\nDENSE_2 = 256\nBATCH_SIZE = 128\nPATIENCE = 5\nLEARNING_RATE = 0.001\n",
"step-3": "NUM_CLASSES = 31\n\nAUDIO_SR = 16000\nAUDIO_LENGTH = 16000\nLIBROSA_AUDIO_LENGTH = 22050\n\nEPOCHS = 25\n\ncategories = {\n 'stop': 0,\n 'nine': 1,\n 'off': 2,\n 'four': 3,\n 'right': 4,\n 'eight': 5,\n 'one': 6,\n 'bird': 7,\n 'dog': 8,\n 'no': 9,\n 'on': 10,\n 'seven': 11,\n 'cat': 12,\n 'left': 13,\n 'three': 14,\n 'tree': 15,\n 'bed': 16,\n 'zero': 17,\n 'happy': 18,\n 'sheila': 19,\n 'five': 20,\n 'down': 21,\n 'marvin': 22,\n 'six': 23,\n 'up': 24,\n 'wow': 25,\n 'house': 26,\n 'go': 27,\n 'yes': 28,\n 'two': 29,\n '_background_noise_': 30,\n}\n\n\ninv_categories = {\n 0: 'stop',\n 1: 'nine',\n 2: 'off',\n 3: 'four',\n 4: 'right',\n 5: 'eight',\n 6: 'one',\n 7: 'bird',\n 8: 'dog',\n 9: 'no',\n 10: 'on',\n 11: 'seven',\n 12: 'cat',\n 13: 'left',\n 14: 'three',\n 15: 'tree',\n 16: 'bed',\n 17: 'zero',\n 18: 'happy',\n 19: 'sheila',\n 20: 'five',\n 21: 'down',\n 22: 'marvin',\n 23: 'six',\n 24: 'up',\n 25: 'wow',\n 26: 'house',\n 27: 'go',\n 28: 'yes',\n 29: 'two',\n 30: '_background_noise_'\n }\n\n# Marvin model\nINPUT_SHAPE = (99, 40)\nTARGET_SHAPE = (99, 40, 1)\nPARSE_PARAMS = (0.025, 0.01, 40)\nfilters = [16, 32, 64, 128, 256]\n\nDROPOUT = 0.25\nKERNEL_SIZE = (3, 3)\nPOOL_SIZE = (2, 2)\nDENSE_1 = 512\nDENSE_2 = 256\n\nBATCH_SIZE = 128\nPATIENCE = 5\nLEARNING_RATE = 0.001\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""
Utilities used by other modules.
"""
import csv
import datetime
import hashlib
import json
import re
import string
import subprocess
import uuid
import xml.etree.ElementTree as ET
from alta import ConfigurationFromYamlFile
from pkg_resources import resource_filename
from ..__details__ import __appname__
from appdirs import *
from comoda import ensure_dir
from shutil import copyfile
SAMPLES_WITHOUT_BARCODES = [2, 8]
DEFAULT_INDEX_CYCLES = dict(index='8', index1='8')
PROGRESS_STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')
class IEMRunInfoReader:
"""
Illumina Experimental Manager RunInfo xml reader.
"""
def __init__(self, f):
self.xml_file = f
self.tree = ET.parse(self.xml_file)
self.root = self.tree.getroot()
def get_reads(self):
reads = [r.attrib for r in self.root.iter('Read')]
return reads
def get_indexed_reads(self):
reads = self.get_reads()
return filter(lambda item: item["IsIndexedRead"] == "Y", reads)
def get_index_cycles(self):
indexed_reads = self.get_indexed_reads()
return dict(
index=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] == "2"), None),
index1=next((item['NumCycles'] for item in indexed_reads
if item["IsIndexedRead"] == "Y" and item['Number'] != "2"), None))
@staticmethod
def get_default_index_cycles():
return DEFAULT_INDEX_CYCLES
def set_index_cycles(self, index_cycles, write=True):
for read in self.root.iter('Read'):
if read.attrib["IsIndexedRead"] == "Y":
if read.attrib['Number'] == '2':
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
else:
read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))
if write:
self.tree.write(self.xml_file)
def is_paired_end_sequencing(self):
reads = self.get_reads()
reads = filter(lambda item: item["IsIndexedRead"] == "N", reads)
if len(reads) == 1:
return False
return True
class LogBook:
"""
Logbook manager
"""
def __init__(self, filename):
self.filename = filename
self.logfile = None
self.logbook = dict()
def dump(self):
a = []
if not os.path.isfile(self.filename):
a.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(a, indent=4, sort_keys=True, default=str))
else:
with open(self.filename) as feedsjson:
feeds = json.load(feedsjson)
feeds.append(self.logbook)
with open(self.filename, mode='w') as f:
f.write(json.dumps(feeds, indent=4, sort_keys=True, default=str))
def start(self, task_name, args=None):
self.logbook.update(task_name=task_name)
self.logbook.update(args=args)
self.logbook.update(start_time=datetime.datetime.now())
def end(self):
self.logbook.update(end_time=datetime.datetime.now())
execution_time = self.logbook.get('end_time') - self.logbook.get('start_time')
self.logbook.update(execution_time=execution_time)
self.dump()
class IEMSampleSheetReader(csv.DictReader):
"""
Illumina Experimental Manager SampleSheet reader.
"""
def __init__(self, f):
csv.DictReader.__init__(self, f, delimiter=',')
self.header = ''
self.data = ''
first_line = f.readline()
if not first_line.startswith('[Header]'):
raise ValueError('%s is not an IEM samplesheet'.format(f.name))
header = [first_line.strip()]
l = f.readline()
while not l.startswith('[Data]'):
header.append(l.strip()) # ms-dos
l = f.readline()
else:
header.append(l.strip())
self.header = header
self.data = csv.DictReader(f.readlines(), delimiter=',')
def barcodes_have_the_same_size(self):
return False if self.get_barcode_mask() is None else True
def get_body(self, label='Sample_Name', new_value='', replace=True):
def sanitize(mystr):
"""
Sanitize string in accordance with Illumina's documentation
bcl2fastq2 Conversion Software v2.17 Guide
"""
retainlist = "_-"
return re.sub(r'[^\w' + retainlist + ']', '_', mystr)
body = []
for i in self.header:
body.append(i)
body.append('\n')
body.append(string.join(self.data.fieldnames, ','))
body.append('\n')
to_be_sanitized = ['Sample_Project', 'Sample_Name']
for row in self.data:
for f in self.data.fieldnames:
if replace and f == label:
body.append(new_value)
else:
if f in to_be_sanitized and row[f]:
body.append(sanitize(row[f]))
else:
body.append(row[f])
body.append(',')
body.append('\n')
return body
def get_barcode_mask(self):
barcodes_mask = dict()
for row in self.data:
index = len(row['index']) if 'index' in row else None
index1 = None
if 'index1' in row or 'index2' in row:
index1 = len(row['index2']) if 'index2' in row else len(row['index1'])
if row['Lane'] not in barcodes_mask:
barcodes_mask[row['Lane']] = dict(
index=index,
index1=index1,
)
else:
if index != barcodes_mask[row['Lane']]['index'] or index1 != barcodes_mask[row['Lane']]['index1']:
return None
return barcodes_mask
class WeightedPath(object):
def __init__(self, path, weight):
self.path = path
self.weight = weight
def __repr__(self):
return '{}: {} {}'.format(self.__class__.__name__,
self.path,
self.weight)
def __cmp__(self, other):
if hasattr(other, 'weight'):
return self.weight.__cmp__(other.weight)
def get_conf(logger, config_file_from_cli=None, profile=None):
profiles = {'presta': 'presta_config.yml',
'celery': 'celery_config.yml'}
default_config_file_label = profiles.get(profile, profiles['presta'])
config_file_path = config_file_setup(logger, default_config_file_label,
cf_from_cli=config_file_from_cli)
# Load YAML configuration file
return ConfigurationFromYamlFile(config_file_path)
def path_exists(path, logger, force=True):
def file_missing(path, logger, force):
msg = "path - {} - doesn't exists".format(path)
if force:
logger.error(msg)
sys.exit()
logger.warning(msg)
return False
return True if os.path.exists(os.path.expanduser(path)) else file_missing(path,
logger,
force)
def sanitize_filename(filename):
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
return ''.join(c for c in filename if c in valid_chars)
def format_dataset_filename(sample_label, lane=None, read=None, ext=None, uid=False):
filename = sanitize_filename(sample_label)
if read:
filename = '_'.join(
[filename, lane, read]) if lane else '_'.join(
[filename, read])
if uid:
filename = '.'.join([filename, str(uuid.uuid4())])
if ext:
filename = '.'.join([filename, ext])
return sanitize_filename(filename)
def config_file_setup(logger, cf_label, cf_from_cli=None):
"""
Create a config file if does not exists, copying it from the package
default into the user_config_dir.
Return a configuration file path from cli args if present, otherwise return
a path from the user_config_dir
:param logger: logger
:param cf_label: label of the configuration file (required)
:param cf_from_cli: path to configuration file from cli arg
:return: Path
"""
presta_config_dir = os.path.join(user_config_dir(__appname__))
config_file_from_home = os.path.join(presta_config_dir, cf_label)
if not path_exists(config_file_from_home, logger, force=False):
logger.info('Creating config path {}'.format(presta_config_dir))
ensure_dir(presta_config_dir)
config_file_path = '/'.join(['config', cf_label])
config_file_from_package = resource_filename(__appname__,
config_file_path)
copyfile(config_file_from_package, config_file_from_home)
config_file_paths = []
if cf_from_cli and path_exists(cf_from_cli, logger, force=False):
config_file_paths.append(WeightedPath(cf_from_cli, 0))
if path_exists(config_file_from_home, logger, force=False):
config_file_paths.append(WeightedPath(config_file_from_home, 1))
logger.debug("config file paths: {}".format(config_file_paths))
config_file_path = sorted(config_file_paths)[0].path
logger.info('Reading configuration from {}'.format(config_file_path))
return config_file_path
def touch(path, logger):
try:
with open(path, 'a'):
os.utime(path, None)
except IOError as e:
logger.error("While touching {} file: {}".format(path, e.strerror))
def read_chunks(file_handle, chunk_size=8192):
while True:
data = file_handle.read(chunk_size)
if not data:
break
yield data
def get_md5(file_handle):
hasher = hashlib.md5()
for chunk in read_chunks(file_handle):
hasher.update(chunk)
return hasher.hexdigest()
def check_progress_status(root_path, started_file, completed_file):
localroot, dirnames, filenames = os.walk(root_path).next()
if started_file not in filenames:
return PROGRESS_STATUS.get('TODO')
elif completed_file not in filenames:
return PROGRESS_STATUS.get('STARTED')
else:
started_file = os.path.join(root_path, started_file)
completed_file = os.path.join(root_path, completed_file)
if os.path.getmtime(started_file) > os.path.getmtime(completed_file):
return PROGRESS_STATUS.get('STARTED')
return PROGRESS_STATUS.get('COMPLETED')
def runJob(cmd, logger):
try:
# subprocess.check_output(cmd)
process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = process.communicate()[0]
ret = process.wait()
return True
except subprocess.CalledProcessError as e:
logger.info(e)
if e.output:
logger.info("command output: %s", e.output)
else:
logger.info("no command output available")
return False
|
normal
|
{
"blob_id": "b16c847912944e0563492d35768b5b5bf3a506c7",
"index": 1569,
"step-1": "<mask token>\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(index=next((item['NumCycles'] for item in indexed_reads if\n item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),\n index1=next((item['NumCycles'] for item in indexed_reads if \n item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n for read in self.root.iter('Read'):\n if read.attrib['IsIndexedRead'] == 'Y':\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)\n if len(reads) == 1:\n return False\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default\n =str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get(\n 'start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip())\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = '_-'\n return re.sub('[^\\\\w' + retainlist + ']', '_', mystr)\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n elif f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row\n ['index1'])\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(index=index, index1=index1)\n elif index != barcodes_mask[row['Lane']]['index'\n ] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n return barcodes_mask\n\n\nclass WeightedPath(object):\n\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__, self.path, self.\n weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(index=next((item['NumCycles'] for item in indexed_reads if\n item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),\n index1=next((item['NumCycles'] for item in indexed_reads if \n item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n for read in self.root.iter('Read'):\n if read.attrib['IsIndexedRead'] == 'Y':\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)\n if len(reads) == 1:\n return False\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default\n =str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get(\n 'start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip())\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = '_-'\n return re.sub('[^\\\\w' + retainlist + ']', '_', mystr)\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n elif f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row\n ['index1'])\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(index=index, index1=index1)\n elif index != barcodes_mask[row['Lane']]['index'\n ] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n return barcodes_mask\n\n\nclass WeightedPath(object):\n\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__, self.path, self.\n weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\n<mask token>\n\n\ndef format_dataset_filename(sample_label, lane=None, read=None, ext=None,\n uid=False):\n filename = sanitize_filename(sample_label)\n if read:\n filename = '_'.join([filename, lane, read]) if lane else '_'.join([\n filename, read])\n if uid:\n filename = '.'.join([filename, str(uuid.uuid4())])\n if ext:\n filename = '.'.join([filename, ext])\n return sanitize_filename(filename)\n\n\n<mask token>\n\n\ndef touch(path, logger):\n try:\n with open(path, 'a'):\n os.utime(path, None)\n except IOError as e:\n logger.error('While touching {} file: {}'.format(path, e.strerror))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(index=next((item['NumCycles'] for item in indexed_reads if\n item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),\n index1=next((item['NumCycles'] for item in indexed_reads if \n item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n for read in self.root.iter('Read'):\n if read.attrib['IsIndexedRead'] == 'Y':\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)\n if len(reads) == 1:\n return False\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default\n =str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get(\n 'start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip())\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = '_-'\n return re.sub('[^\\\\w' + retainlist + ']', '_', mystr)\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n elif f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row\n ['index1'])\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(index=index, index1=index1)\n elif index != barcodes_mask[row['Lane']]['index'\n ] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n return barcodes_mask\n\n\nclass WeightedPath(object):\n\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__, self.path, self.\n weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\n<mask token>\n\n\ndef format_dataset_filename(sample_label, lane=None, read=None, ext=None,\n uid=False):\n filename = sanitize_filename(sample_label)\n if read:\n filename = '_'.join([filename, lane, read]) if lane else '_'.join([\n filename, read])\n if uid:\n filename = '.'.join([filename, str(uuid.uuid4())])\n if ext:\n filename = '.'.join([filename, ext])\n return sanitize_filename(filename)\n\n\ndef config_file_setup(logger, cf_label, cf_from_cli=None):\n \"\"\"\n Create a config file if does not exists, copying it from the package\n default into the user_config_dir.\n Return a configuration file path from cli args if present, otherwise return\n a path from the user_config_dir\n :param logger: logger\n :param cf_label: label of the configuration file (required)\n :param cf_from_cli: path to configuration file from cli arg\n :return: Path\n \"\"\"\n presta_config_dir = os.path.join(user_config_dir(__appname__))\n config_file_from_home = os.path.join(presta_config_dir, cf_label)\n if not path_exists(config_file_from_home, logger, force=False):\n logger.info('Creating config path {}'.format(presta_config_dir))\n ensure_dir(presta_config_dir)\n config_file_path = '/'.join(['config', cf_label])\n config_file_from_package = resource_filename(__appname__,\n config_file_path)\n copyfile(config_file_from_package, config_file_from_home)\n config_file_paths = []\n if cf_from_cli and path_exists(cf_from_cli, logger, force=False):\n config_file_paths.append(WeightedPath(cf_from_cli, 0))\n if path_exists(config_file_from_home, logger, force=False):\n config_file_paths.append(WeightedPath(config_file_from_home, 1))\n logger.debug('config file paths: {}'.format(config_file_paths))\n config_file_path = sorted(config_file_paths)[0].path\n logger.info('Reading configuration from {}'.format(config_file_path))\n return config_file_path\n\n\ndef touch(path, logger):\n try:\n with open(path, 'a'):\n os.utime(path, None)\n except IOError as e:\n logger.error('While touching {} file: {}'.format(path, e.strerror))\n\n\n<mask token>\n",
"step-4": "<mask token>\nSAMPLES_WITHOUT_BARCODES = [2, 8]\nDEFAULT_INDEX_CYCLES = dict(index='8', index1='8')\nPROGRESS_STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item['IsIndexedRead'] == 'Y', reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(index=next((item['NumCycles'] for item in indexed_reads if\n item['IsIndexedRead'] == 'Y' and item['Number'] == '2'), None),\n index1=next((item['NumCycles'] for item in indexed_reads if \n item['IsIndexedRead'] == 'Y' and item['Number'] != '2'), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n for read in self.root.iter('Read'):\n if read.attrib['IsIndexedRead'] == 'Y':\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index',\n DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item['IsIndexedRead'] == 'N', reads)\n if len(reads) == 1:\n return False\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default\n =str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get(\n 'start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip())\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = '_-'\n return re.sub('[^\\\\w' + retainlist + ']', '_', mystr)\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n elif f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row\n ['index1'])\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(index=index, index1=index1)\n elif index != barcodes_mask[row['Lane']]['index'\n ] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n return barcodes_mask\n\n\nclass WeightedPath(object):\n\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__, self.path, self.\n weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\ndef get_conf(logger, config_file_from_cli=None, profile=None):\n profiles = {'presta': 'presta_config.yml', 'celery': 'celery_config.yml'}\n default_config_file_label = profiles.get(profile, profiles['presta'])\n config_file_path = config_file_setup(logger, default_config_file_label,\n cf_from_cli=config_file_from_cli)\n return ConfigurationFromYamlFile(config_file_path)\n\n\ndef path_exists(path, logger, force=True):\n\n def file_missing(path, logger, force):\n msg = \"path - {} - doesn't exists\".format(path)\n if force:\n logger.error(msg)\n sys.exit()\n logger.warning(msg)\n return False\n return True if os.path.exists(os.path.expanduser(path)) else file_missing(\n path, logger, force)\n\n\ndef sanitize_filename(filename):\n valid_chars = '-_.%s%s' % (string.ascii_letters, string.digits)\n return ''.join(c for c in filename if c in valid_chars)\n\n\ndef format_dataset_filename(sample_label, lane=None, read=None, ext=None,\n uid=False):\n filename = sanitize_filename(sample_label)\n if read:\n filename = '_'.join([filename, lane, read]) if lane else '_'.join([\n filename, read])\n if uid:\n filename = '.'.join([filename, str(uuid.uuid4())])\n if ext:\n filename = '.'.join([filename, ext])\n return sanitize_filename(filename)\n\n\ndef config_file_setup(logger, cf_label, cf_from_cli=None):\n \"\"\"\n Create a config file if does not exists, copying it from the package\n default into the user_config_dir.\n Return a configuration file path from cli args if present, otherwise return\n a path from the user_config_dir\n :param logger: logger\n :param cf_label: label of the configuration file (required)\n :param cf_from_cli: path to configuration file from cli arg\n :return: Path\n \"\"\"\n presta_config_dir = os.path.join(user_config_dir(__appname__))\n config_file_from_home = os.path.join(presta_config_dir, cf_label)\n if not path_exists(config_file_from_home, logger, force=False):\n logger.info('Creating config path {}'.format(presta_config_dir))\n ensure_dir(presta_config_dir)\n config_file_path = '/'.join(['config', cf_label])\n config_file_from_package = resource_filename(__appname__,\n config_file_path)\n copyfile(config_file_from_package, config_file_from_home)\n config_file_paths = []\n if cf_from_cli and path_exists(cf_from_cli, logger, force=False):\n config_file_paths.append(WeightedPath(cf_from_cli, 0))\n if path_exists(config_file_from_home, logger, force=False):\n config_file_paths.append(WeightedPath(config_file_from_home, 1))\n logger.debug('config file paths: {}'.format(config_file_paths))\n config_file_path = sorted(config_file_paths)[0].path\n logger.info('Reading configuration from {}'.format(config_file_path))\n return config_file_path\n\n\ndef touch(path, logger):\n try:\n with open(path, 'a'):\n os.utime(path, None)\n except IOError as e:\n logger.error('While touching {} file: {}'.format(path, e.strerror))\n\n\ndef read_chunks(file_handle, chunk_size=8192):\n while True:\n data = file_handle.read(chunk_size)\n if not data:\n break\n yield data\n\n\ndef get_md5(file_handle):\n hasher = hashlib.md5()\n for chunk in read_chunks(file_handle):\n hasher.update(chunk)\n return hasher.hexdigest()\n\n\ndef check_progress_status(root_path, started_file, completed_file):\n localroot, dirnames, filenames = os.walk(root_path).next()\n if started_file not in filenames:\n return PROGRESS_STATUS.get('TODO')\n elif completed_file not in filenames:\n return PROGRESS_STATUS.get('STARTED')\n else:\n started_file = os.path.join(root_path, started_file)\n completed_file = os.path.join(root_path, completed_file)\n if os.path.getmtime(started_file) > os.path.getmtime(completed_file):\n return PROGRESS_STATUS.get('STARTED')\n return PROGRESS_STATUS.get('COMPLETED')\n\n\ndef runJob(cmd, logger):\n try:\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT)\n output = process.communicate()[0]\n ret = process.wait()\n return True\n except subprocess.CalledProcessError as e:\n logger.info(e)\n if e.output:\n logger.info('command output: %s', e.output)\n else:\n logger.info('no command output available')\n return False\n",
"step-5": "\"\"\"\nUtilities used by other modules.\n\"\"\"\n\nimport csv\nimport datetime\nimport hashlib\nimport json\nimport re\nimport string\nimport subprocess\nimport uuid\n\nimport xml.etree.ElementTree as ET\nfrom alta import ConfigurationFromYamlFile\nfrom pkg_resources import resource_filename\nfrom ..__details__ import __appname__\nfrom appdirs import *\nfrom comoda import ensure_dir\nfrom shutil import copyfile\n\n\nSAMPLES_WITHOUT_BARCODES = [2, 8]\nDEFAULT_INDEX_CYCLES = dict(index='8', index1='8')\nPROGRESS_STATUS = dict(COMPLETED='completed', STARTED='started', TODO='todo')\n\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item[\"IsIndexedRead\"] == \"Y\", reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(\n index=next((item['NumCycles'] for item in indexed_reads\n if item[\"IsIndexedRead\"] == \"Y\" and item['Number'] == \"2\"), None),\n index1=next((item['NumCycles'] for item in indexed_reads\n if item[\"IsIndexedRead\"] == \"Y\" and item['Number'] != \"2\"), None))\n\n @staticmethod\n def get_default_index_cycles():\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n\n for read in self.root.iter('Read'):\n if read.attrib[\"IsIndexedRead\"] == \"Y\":\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n def is_paired_end_sequencing(self):\n reads = self.get_reads()\n reads = filter(lambda item: item[\"IsIndexedRead\"] == \"N\", reads)\n\n if len(reads) == 1:\n return False\n\n return True\n\n\nclass LogBook:\n \"\"\"\n Logbook manager\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n self.logfile = None\n self.logbook = dict()\n\n def dump(self):\n a = []\n if not os.path.isfile(self.filename):\n a.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(a, indent=4, sort_keys=True, default=str))\n else:\n with open(self.filename) as feedsjson:\n feeds = json.load(feedsjson)\n\n feeds.append(self.logbook)\n with open(self.filename, mode='w') as f:\n f.write(json.dumps(feeds, indent=4, sort_keys=True, default=str))\n\n def start(self, task_name, args=None):\n self.logbook.update(task_name=task_name)\n self.logbook.update(args=args)\n self.logbook.update(start_time=datetime.datetime.now())\n\n def end(self):\n self.logbook.update(end_time=datetime.datetime.now())\n execution_time = self.logbook.get('end_time') - self.logbook.get('start_time')\n self.logbook.update(execution_time=execution_time)\n self.dump()\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip()) # ms-dos\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n return False if self.get_barcode_mask() is None else True\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = \"_-\"\n return re.sub(r'[^\\w' + retainlist + ']', '_', mystr)\n\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n else:\n if f in to_be_sanitized and row[f]:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n\n for row in self.data:\n index = len(row['index']) if 'index' in row else None\n index1 = None\n\n if 'index1' in row or 'index2' in row:\n index1 = len(row['index2']) if 'index2' in row else len(row['index1'])\n\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(\n index=index,\n index1=index1,\n )\n else:\n if index != barcodes_mask[row['Lane']]['index'] or index1 != barcodes_mask[row['Lane']]['index1']:\n return None\n\n return barcodes_mask\n\n\nclass WeightedPath(object):\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__,\n self.path,\n self.weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n\n\ndef get_conf(logger, config_file_from_cli=None, profile=None):\n profiles = {'presta': 'presta_config.yml',\n 'celery': 'celery_config.yml'}\n default_config_file_label = profiles.get(profile, profiles['presta'])\n\n config_file_path = config_file_setup(logger, default_config_file_label,\n cf_from_cli=config_file_from_cli)\n\n # Load YAML configuration file\n return ConfigurationFromYamlFile(config_file_path)\n\n\ndef path_exists(path, logger, force=True):\n def file_missing(path, logger, force):\n msg = \"path - {} - doesn't exists\".format(path)\n if force:\n logger.error(msg)\n sys.exit()\n logger.warning(msg)\n return False\n\n return True if os.path.exists(os.path.expanduser(path)) else file_missing(path,\n logger,\n force)\n\n\ndef sanitize_filename(filename):\n valid_chars = \"-_.%s%s\" % (string.ascii_letters, string.digits)\n return ''.join(c for c in filename if c in valid_chars)\n\n\ndef format_dataset_filename(sample_label, lane=None, read=None, ext=None, uid=False):\n filename = sanitize_filename(sample_label)\n\n if read:\n filename = '_'.join(\n [filename, lane, read]) if lane else '_'.join(\n [filename, read])\n\n if uid:\n filename = '.'.join([filename, str(uuid.uuid4())])\n\n if ext:\n filename = '.'.join([filename, ext])\n\n return sanitize_filename(filename)\n\n\ndef config_file_setup(logger, cf_label, cf_from_cli=None):\n \"\"\"\n Create a config file if does not exists, copying it from the package\n default into the user_config_dir.\n Return a configuration file path from cli args if present, otherwise return\n a path from the user_config_dir\n :param logger: logger\n :param cf_label: label of the configuration file (required)\n :param cf_from_cli: path to configuration file from cli arg\n :return: Path\n \"\"\"\n presta_config_dir = os.path.join(user_config_dir(__appname__))\n config_file_from_home = os.path.join(presta_config_dir, cf_label)\n\n if not path_exists(config_file_from_home, logger, force=False):\n logger.info('Creating config path {}'.format(presta_config_dir))\n ensure_dir(presta_config_dir)\n config_file_path = '/'.join(['config', cf_label])\n config_file_from_package = resource_filename(__appname__,\n config_file_path)\n copyfile(config_file_from_package, config_file_from_home)\n\n config_file_paths = []\n if cf_from_cli and path_exists(cf_from_cli, logger, force=False):\n config_file_paths.append(WeightedPath(cf_from_cli, 0))\n if path_exists(config_file_from_home, logger, force=False):\n config_file_paths.append(WeightedPath(config_file_from_home, 1))\n\n logger.debug(\"config file paths: {}\".format(config_file_paths))\n\n config_file_path = sorted(config_file_paths)[0].path\n logger.info('Reading configuration from {}'.format(config_file_path))\n return config_file_path\n\n\ndef touch(path, logger):\n try:\n with open(path, 'a'):\n os.utime(path, None)\n except IOError as e:\n logger.error(\"While touching {} file: {}\".format(path, e.strerror))\n\n\ndef read_chunks(file_handle, chunk_size=8192):\n while True:\n data = file_handle.read(chunk_size)\n if not data:\n break\n yield data\n\n\ndef get_md5(file_handle):\n hasher = hashlib.md5()\n for chunk in read_chunks(file_handle):\n hasher.update(chunk)\n return hasher.hexdigest()\n\n\ndef check_progress_status(root_path, started_file, completed_file):\n localroot, dirnames, filenames = os.walk(root_path).next()\n\n if started_file not in filenames:\n return PROGRESS_STATUS.get('TODO')\n elif completed_file not in filenames:\n return PROGRESS_STATUS.get('STARTED')\n else:\n started_file = os.path.join(root_path, started_file)\n completed_file = os.path.join(root_path, completed_file)\n\n if os.path.getmtime(started_file) > os.path.getmtime(completed_file):\n return PROGRESS_STATUS.get('STARTED')\n\n return PROGRESS_STATUS.get('COMPLETED')\n\n\ndef runJob(cmd, logger):\n try:\n # subprocess.check_output(cmd)\n process = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n output = process.communicate()[0]\n ret = process.wait()\n return True\n except subprocess.CalledProcessError as e:\n logger.info(e)\n if e.output:\n logger.info(\"command output: %s\", e.output)\n else:\n logger.info(\"no command output available\")\n return False\n\n",
"step-ids": [
25,
27,
28,
36,
38
]
}
|
[
25,
27,
28,
36,
38
] |
# -*- coding: utf-8 -*-
import pytest
from bravado.client import ResourceDecorator
from bravado.client import SwaggerClient
def test_resource_exists(petstore_client):
assert type(petstore_client.pet) == ResourceDecorator
def test_resource_not_found(petstore_client):
with pytest.raises(AttributeError) as excinfo:
petstore_client.foo
assert 'foo not found' in str(excinfo.value)
@pytest.fixture
def client_tags_with_spaces():
return SwaggerClient.from_spec({
'swagger': '2.0',
'info': {
'version': '',
'title': 'API'
},
'paths': {
'/ping': {
'get': {
'operationId': 'ping',
'responses': {
'200': {
'description': 'ping'
}
},
'tags': [
'my tag'
]
}
}
}
})
def test_get_resource(client_tags_with_spaces):
assert type(client_tags_with_spaces._get_resource('my tag')) == ResourceDecorator
|
normal
|
{
"blob_id": "5ee1d8ef7ec4b191e0789ceb9c6dd2d58af526a0",
"index": 7875,
"step-1": "<mask token>\n\n\ndef test_resource_not_found(petstore_client):\n with pytest.raises(AttributeError) as excinfo:\n petstore_client.foo\n assert 'foo not found' in str(excinfo.value)\n\n\[email protected]\ndef client_tags_with_spaces():\n return SwaggerClient.from_spec({'swagger': '2.0', 'info': {'version':\n '', 'title': 'API'}, 'paths': {'/ping': {'get': {'operationId':\n 'ping', 'responses': {'200': {'description': 'ping'}}, 'tags': [\n 'my tag']}}}})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_resource_exists(petstore_client):\n assert type(petstore_client.pet) == ResourceDecorator\n\n\ndef test_resource_not_found(petstore_client):\n with pytest.raises(AttributeError) as excinfo:\n petstore_client.foo\n assert 'foo not found' in str(excinfo.value)\n\n\[email protected]\ndef client_tags_with_spaces():\n return SwaggerClient.from_spec({'swagger': '2.0', 'info': {'version':\n '', 'title': 'API'}, 'paths': {'/ping': {'get': {'operationId':\n 'ping', 'responses': {'200': {'description': 'ping'}}, 'tags': [\n 'my tag']}}}})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_resource_exists(petstore_client):\n assert type(petstore_client.pet) == ResourceDecorator\n\n\ndef test_resource_not_found(petstore_client):\n with pytest.raises(AttributeError) as excinfo:\n petstore_client.foo\n assert 'foo not found' in str(excinfo.value)\n\n\[email protected]\ndef client_tags_with_spaces():\n return SwaggerClient.from_spec({'swagger': '2.0', 'info': {'version':\n '', 'title': 'API'}, 'paths': {'/ping': {'get': {'operationId':\n 'ping', 'responses': {'200': {'description': 'ping'}}, 'tags': [\n 'my tag']}}}})\n\n\ndef test_get_resource(client_tags_with_spaces):\n assert type(client_tags_with_spaces._get_resource('my tag')\n ) == ResourceDecorator\n",
"step-4": "import pytest\nfrom bravado.client import ResourceDecorator\nfrom bravado.client import SwaggerClient\n\n\ndef test_resource_exists(petstore_client):\n assert type(petstore_client.pet) == ResourceDecorator\n\n\ndef test_resource_not_found(petstore_client):\n with pytest.raises(AttributeError) as excinfo:\n petstore_client.foo\n assert 'foo not found' in str(excinfo.value)\n\n\[email protected]\ndef client_tags_with_spaces():\n return SwaggerClient.from_spec({'swagger': '2.0', 'info': {'version':\n '', 'title': 'API'}, 'paths': {'/ping': {'get': {'operationId':\n 'ping', 'responses': {'200': {'description': 'ping'}}, 'tags': [\n 'my tag']}}}})\n\n\ndef test_get_resource(client_tags_with_spaces):\n assert type(client_tags_with_spaces._get_resource('my tag')\n ) == ResourceDecorator\n",
"step-5": "# -*- coding: utf-8 -*-\nimport pytest\n\nfrom bravado.client import ResourceDecorator\nfrom bravado.client import SwaggerClient\n\n\ndef test_resource_exists(petstore_client):\n assert type(petstore_client.pet) == ResourceDecorator\n\n\ndef test_resource_not_found(petstore_client):\n with pytest.raises(AttributeError) as excinfo:\n petstore_client.foo\n assert 'foo not found' in str(excinfo.value)\n\n\[email protected]\ndef client_tags_with_spaces():\n return SwaggerClient.from_spec({\n 'swagger': '2.0',\n 'info': {\n 'version': '',\n 'title': 'API'\n },\n 'paths': {\n '/ping': {\n 'get': {\n 'operationId': 'ping',\n 'responses': {\n '200': {\n 'description': 'ping'\n }\n },\n 'tags': [\n 'my tag'\n ]\n }\n }\n }\n })\n\n\ndef test_get_resource(client_tags_with_spaces):\n assert type(client_tags_with_spaces._get_resource('my tag')) == ResourceDecorator\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""
Stirng - Liste - Dosya
- Fonksiyon yazıyoruz.
- Bu fonksiyon iki parametre alacak. (dosya, string)
1. sorun : Dosyanın içinde string var ise True döndürecek yok ise False
2. sorun : Dosyanın içinde string bulunursa ilk bulunduğu konumu return edecek
3. sorun : Dosyanın içerisinde yazdığımız strinng kaç kere var onu liste halinde return eden fonksiyon
"""
def fonkString(text, string):
if string in text:
print("TRUE")
print(text.index(string), ". sirada ilk", string, "bulundu")
print(text.count(string),"tane",string, "var")
liste = []
for i in range(len(text)):
if(text[i] == string):
liste.append(i)
for x in liste:
print(x)
else:
print("FALSE")
fonkString("Programlama laboratuvari calisma sorulari dosya string liste kullanma ", "m")
|
normal
|
{
"blob_id": "0d3cc85cd18ee197b24c8b01b71afe82110bfad2",
"index": 3487,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fonkString(text, string):\n if string in text:\n print('TRUE')\n print(text.index(string), '. sirada ilk', string, 'bulundu')\n print(text.count(string), 'tane', string, 'var')\n liste = []\n for i in range(len(text)):\n if text[i] == string:\n liste.append(i)\n for x in liste:\n print(x)\n else:\n print('FALSE')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fonkString(text, string):\n if string in text:\n print('TRUE')\n print(text.index(string), '. sirada ilk', string, 'bulundu')\n print(text.count(string), 'tane', string, 'var')\n liste = []\n for i in range(len(text)):\n if text[i] == string:\n liste.append(i)\n for x in liste:\n print(x)\n else:\n print('FALSE')\n\n\nfonkString(\n 'Programlama laboratuvari calisma sorulari dosya string liste kullanma ',\n 'm')\n",
"step-4": "\"\"\"\nStirng - Liste - Dosya\n - Fonksiyon yazıyoruz.\n - Bu fonksiyon iki parametre alacak. (dosya, string)\n 1. sorun : Dosyanın içinde string var ise True döndürecek yok ise False \n 2. sorun : Dosyanın içinde string bulunursa ilk bulunduğu konumu return edecek\n 3. sorun : Dosyanın içerisinde yazdığımız strinng kaç kere var onu liste halinde return eden fonksiyon\n \n\"\"\"\n\ndef fonkString(text, string):\n if string in text:\n print(\"TRUE\")\n print(text.index(string), \". sirada ilk\", string, \"bulundu\")\n print(text.count(string),\"tane\",string, \"var\")\n\n liste = []\n\n for i in range(len(text)):\n if(text[i] == string):\n liste.append(i)\n for x in liste:\n print(x)\n else:\n print(\"FALSE\")\n\nfonkString(\"Programlama laboratuvari calisma sorulari dosya string liste kullanma \", \"m\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.contenttypes.models import ContentType
from User.forms import EditProfileForm
from User import forms
from django.db.models import Q
from django.contrib import messages
from django.urls import reverse
from django.http import HttpResponseRedirect
from posts.forms import *
# Create your views here.
from .models import Post
from comments.models import *
from comments.forms import *
def post_create(request):
form = PostForm(request.POST or None, request.FILES or None)
if request.method == "POST":
user= request.POST.get("user")
title = request.POST.get("title")
content = request.POST.get("content")
PostStudent.objects.create(user=user, title=title,content=content)
messages.success(request, "Successfully Posted")
#if form.is_valid():
#instance = form.save(commit=False)
#instance.save()
context = {
"form": form,
}
return render(request, "post/create_post.html", context)
def temp_post(request):
return render(request, 'post/Posts.html', {})
def temp_allpost(request):
obj = Post.objects.all()
context = {'obj': obj}
return render(request, 'post/All_Post.html', context)
def allpoststudents(request):
if not request.user.is_staff or request.user.is_staff:
obj = PostStudent.objects.all().order_by("-timestamp")
query = request.GET.get("q")
if query:
obj = obj.filter(
Q(title__icontains=query)|
Q(content__icontains=query)|
Q(user__icontains=query)|
Q(timestamp__icontains=query)
).distinct()
context = {'obj': obj}
return render(request, 'post/All_Post_Students.html', context)
def post_update(request, id=None):
instance = get_object_or_404(Post, id=id)
form = PostForm(request.POST or None, instance=instance)
if form.is_valid():
instance = form.save(commit=False)
instance.save()
messages.success(request, "<a href='#'>Item </a>Saved", extra_tags='html_safe')
return HttpResponseRedirect(instance.get_absolute_url())
context = {
"title": instance.title,
"instance": instance,
"form": form,
}
return render(request, "post/create_post.html", context)
def post_details(request, id=None):
instance = get_object_or_404(Post, id=id)
content_type = ContentType.objects.get_for_model(Post)
obj_id = instance.id
comments = Comment.objects.filter(content_type=content_type, object_id=obj_id)
initial_data = {
"content_type": content_type,
"object_id": instance.id
}
form = CommentForm(request.POST or None, initial= initial_data)
if form.is_valid():
c_type = form.cleaned_data.get("content_type")
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get("object_id")
content_data = form.cleaned_data.get("content")
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists():
parent_obj = parent_qs.first()
new_comment, created = Comment.objects.get_or_create(
user = request.user,
content_type = content_type,
object_id = obj_id,
content = content_data,
parent = parent_obj,
)
context = {
"title":instance.title,
"instance":instance,
"comments": comments,
"form": form,
"obj_id": obj_id,
}
return render(request, "post/Posts.html", context)
def post_details_student(request, id=None):
instance = get_object_or_404(PostStudent, id=id)
content_type = ContentType.objects.get_for_model(PostStudent)
obj_id = instance.id
comments = CommentStudent.objects.filter(content_type=content_type, object_id=obj_id)
initial_data = {
"content_type": content_type,
"object_id": instance.id
}
form = CommentForm(request.POST or None, initial=initial_data)
if form.is_valid():
c_type = form.cleaned_data.get("content_type")
content_type = ContentType.objects.get(model=c_type)
obj_id = form.cleaned_data.get("object_id")
content_data = form.cleaned_data.get("content")
parent_obj = None
try:
parent_id = int(request.POST.get("parent_id"))
except:
parent_id = None
if parent_id:
parent_qs = Comment.objects.filter(id=parent_id)
if parent_qs.exists():
parent_obj = parent_qs.first()
new_comment, created = CommentStudent.objects.get_or_create(
user=request.user,
content_type=content_type,
object_id=obj_id,
content=content_data,
parent=parent_obj,
)
context = {
"title": instance.title,
"instance": instance,
"comments": comments,
"form": form,
"obj_id": obj_id,
}
return render(request, "post/post_details_student.html", context)
def post_delete(request, id=None):
instance = get_object_or_404(PostStudent, id=id)
instance.delete()
messages.success(request, "Successfully deleted")
return render(request, 'post/All_Post_Students.html', {})
|
normal
|
{
"blob_id": "e9fab2bb49cfda00b8cfedafab0009f691d11ec9",
"index": 9924,
"step-1": "<mask token>\n\n\ndef post_create(request):\n form = PostForm(request.POST or None, request.FILES or None)\n if request.method == 'POST':\n user = request.POST.get('user')\n title = request.POST.get('title')\n content = request.POST.get('content')\n PostStudent.objects.create(user=user, title=title, content=content)\n messages.success(request, 'Successfully Posted')\n context = {'form': form}\n return render(request, 'post/create_post.html', context)\n\n\ndef temp_post(request):\n return render(request, 'post/Posts.html', {})\n\n\n<mask token>\n\n\ndef allpoststudents(request):\n if not request.user.is_staff or request.user.is_staff:\n obj = PostStudent.objects.all().order_by('-timestamp')\n query = request.GET.get('q')\n if query:\n obj = obj.filter(Q(title__icontains=query) | Q(content__icontains=\n query) | Q(user__icontains=query) | Q(timestamp__icontains=query)\n ).distinct()\n context = {'obj': obj}\n return render(request, 'post/All_Post_Students.html', context)\n\n\n<mask token>\n\n\ndef post_details(request, id=None):\n instance = get_object_or_404(Post, id=id)\n content_type = ContentType.objects.get_for_model(Post)\n obj_id = instance.id\n comments = Comment.objects.filter(content_type=content_type, object_id=\n obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = Comment.objects.get_or_create(user=request.\n user, content_type=content_type, object_id=obj_id, content=\n content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/Posts.html', context)\n\n\ndef post_details_student(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n content_type = ContentType.objects.get_for_model(PostStudent)\n obj_id = instance.id\n comments = CommentStudent.objects.filter(content_type=content_type,\n object_id=obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = CommentStudent.objects.get_or_create(user=\n request.user, content_type=content_type, object_id=obj_id,\n content=content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/post_details_student.html', context)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef post_create(request):\n form = PostForm(request.POST or None, request.FILES or None)\n if request.method == 'POST':\n user = request.POST.get('user')\n title = request.POST.get('title')\n content = request.POST.get('content')\n PostStudent.objects.create(user=user, title=title, content=content)\n messages.success(request, 'Successfully Posted')\n context = {'form': form}\n return render(request, 'post/create_post.html', context)\n\n\ndef temp_post(request):\n return render(request, 'post/Posts.html', {})\n\n\n<mask token>\n\n\ndef allpoststudents(request):\n if not request.user.is_staff or request.user.is_staff:\n obj = PostStudent.objects.all().order_by('-timestamp')\n query = request.GET.get('q')\n if query:\n obj = obj.filter(Q(title__icontains=query) | Q(content__icontains=\n query) | Q(user__icontains=query) | Q(timestamp__icontains=query)\n ).distinct()\n context = {'obj': obj}\n return render(request, 'post/All_Post_Students.html', context)\n\n\ndef post_update(request, id=None):\n instance = get_object_or_404(Post, id=id)\n form = PostForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, \"<a href='#'>Item </a>Saved\", extra_tags=\n 'html_safe')\n return HttpResponseRedirect(instance.get_absolute_url())\n context = {'title': instance.title, 'instance': instance, 'form': form}\n return render(request, 'post/create_post.html', context)\n\n\ndef post_details(request, id=None):\n instance = get_object_or_404(Post, id=id)\n content_type = ContentType.objects.get_for_model(Post)\n obj_id = instance.id\n comments = Comment.objects.filter(content_type=content_type, object_id=\n obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = Comment.objects.get_or_create(user=request.\n user, content_type=content_type, object_id=obj_id, content=\n content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/Posts.html', context)\n\n\ndef post_details_student(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n content_type = ContentType.objects.get_for_model(PostStudent)\n obj_id = instance.id\n comments = CommentStudent.objects.filter(content_type=content_type,\n object_id=obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = CommentStudent.objects.get_or_create(user=\n request.user, content_type=content_type, object_id=obj_id,\n content=content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/post_details_student.html', context)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef post_create(request):\n form = PostForm(request.POST or None, request.FILES or None)\n if request.method == 'POST':\n user = request.POST.get('user')\n title = request.POST.get('title')\n content = request.POST.get('content')\n PostStudent.objects.create(user=user, title=title, content=content)\n messages.success(request, 'Successfully Posted')\n context = {'form': form}\n return render(request, 'post/create_post.html', context)\n\n\ndef temp_post(request):\n return render(request, 'post/Posts.html', {})\n\n\n<mask token>\n\n\ndef allpoststudents(request):\n if not request.user.is_staff or request.user.is_staff:\n obj = PostStudent.objects.all().order_by('-timestamp')\n query = request.GET.get('q')\n if query:\n obj = obj.filter(Q(title__icontains=query) | Q(content__icontains=\n query) | Q(user__icontains=query) | Q(timestamp__icontains=query)\n ).distinct()\n context = {'obj': obj}\n return render(request, 'post/All_Post_Students.html', context)\n\n\ndef post_update(request, id=None):\n instance = get_object_or_404(Post, id=id)\n form = PostForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, \"<a href='#'>Item </a>Saved\", extra_tags=\n 'html_safe')\n return HttpResponseRedirect(instance.get_absolute_url())\n context = {'title': instance.title, 'instance': instance, 'form': form}\n return render(request, 'post/create_post.html', context)\n\n\ndef post_details(request, id=None):\n instance = get_object_or_404(Post, id=id)\n content_type = ContentType.objects.get_for_model(Post)\n obj_id = instance.id\n comments = Comment.objects.filter(content_type=content_type, object_id=\n obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = Comment.objects.get_or_create(user=request.\n user, content_type=content_type, object_id=obj_id, content=\n content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/Posts.html', context)\n\n\ndef post_details_student(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n content_type = ContentType.objects.get_for_model(PostStudent)\n obj_id = instance.id\n comments = CommentStudent.objects.filter(content_type=content_type,\n object_id=obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = CommentStudent.objects.get_or_create(user=\n request.user, content_type=content_type, object_id=obj_id,\n content=content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/post_details_student.html', context)\n\n\ndef post_delete(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n instance.delete()\n messages.success(request, 'Successfully deleted')\n return render(request, 'post/All_Post_Students.html', {})\n",
"step-4": "<mask token>\n\n\ndef post_create(request):\n form = PostForm(request.POST or None, request.FILES or None)\n if request.method == 'POST':\n user = request.POST.get('user')\n title = request.POST.get('title')\n content = request.POST.get('content')\n PostStudent.objects.create(user=user, title=title, content=content)\n messages.success(request, 'Successfully Posted')\n context = {'form': form}\n return render(request, 'post/create_post.html', context)\n\n\ndef temp_post(request):\n return render(request, 'post/Posts.html', {})\n\n\ndef temp_allpost(request):\n obj = Post.objects.all()\n context = {'obj': obj}\n return render(request, 'post/All_Post.html', context)\n\n\ndef allpoststudents(request):\n if not request.user.is_staff or request.user.is_staff:\n obj = PostStudent.objects.all().order_by('-timestamp')\n query = request.GET.get('q')\n if query:\n obj = obj.filter(Q(title__icontains=query) | Q(content__icontains=\n query) | Q(user__icontains=query) | Q(timestamp__icontains=query)\n ).distinct()\n context = {'obj': obj}\n return render(request, 'post/All_Post_Students.html', context)\n\n\ndef post_update(request, id=None):\n instance = get_object_or_404(Post, id=id)\n form = PostForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, \"<a href='#'>Item </a>Saved\", extra_tags=\n 'html_safe')\n return HttpResponseRedirect(instance.get_absolute_url())\n context = {'title': instance.title, 'instance': instance, 'form': form}\n return render(request, 'post/create_post.html', context)\n\n\ndef post_details(request, id=None):\n instance = get_object_or_404(Post, id=id)\n content_type = ContentType.objects.get_for_model(Post)\n obj_id = instance.id\n comments = Comment.objects.filter(content_type=content_type, object_id=\n obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = Comment.objects.get_or_create(user=request.\n user, content_type=content_type, object_id=obj_id, content=\n content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/Posts.html', context)\n\n\ndef post_details_student(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n content_type = ContentType.objects.get_for_model(PostStudent)\n obj_id = instance.id\n comments = CommentStudent.objects.filter(content_type=content_type,\n object_id=obj_id)\n initial_data = {'content_type': content_type, 'object_id': instance.id}\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get('content_type')\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get('object_id')\n content_data = form.cleaned_data.get('content')\n parent_obj = None\n try:\n parent_id = int(request.POST.get('parent_id'))\n except:\n parent_id = None\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n new_comment, created = CommentStudent.objects.get_or_create(user=\n request.user, content_type=content_type, object_id=obj_id,\n content=content_data, parent=parent_obj)\n context = {'title': instance.title, 'instance': instance, 'comments':\n comments, 'form': form, 'obj_id': obj_id}\n return render(request, 'post/post_details_student.html', context)\n\n\ndef post_delete(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n instance.delete()\n messages.success(request, 'Successfully deleted')\n return render(request, 'post/All_Post_Students.html', {})\n",
"step-5": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.contenttypes.models import ContentType\nfrom User.forms import EditProfileForm\nfrom User import forms\nfrom django.db.models import Q\nfrom django.contrib import messages\nfrom django.urls import reverse\nfrom django.http import HttpResponseRedirect\nfrom posts.forms import *\n\n\n# Create your views here.\nfrom .models import Post\nfrom comments.models import *\nfrom comments.forms import *\n\n\ndef post_create(request):\n form = PostForm(request.POST or None, request.FILES or None)\n if request.method == \"POST\":\n user= request.POST.get(\"user\")\n title = request.POST.get(\"title\")\n content = request.POST.get(\"content\")\n PostStudent.objects.create(user=user, title=title,content=content)\n messages.success(request, \"Successfully Posted\")\n #if form.is_valid():\n #instance = form.save(commit=False)\n #instance.save()\n context = {\n \"form\": form,\n\n }\n return render(request, \"post/create_post.html\", context)\n\n\ndef temp_post(request):\n return render(request, 'post/Posts.html', {})\n\n\ndef temp_allpost(request):\n obj = Post.objects.all()\n context = {'obj': obj}\n return render(request, 'post/All_Post.html', context)\n\n\ndef allpoststudents(request):\n if not request.user.is_staff or request.user.is_staff:\n obj = PostStudent.objects.all().order_by(\"-timestamp\")\n query = request.GET.get(\"q\")\n if query:\n obj = obj.filter(\n Q(title__icontains=query)|\n Q(content__icontains=query)|\n Q(user__icontains=query)|\n Q(timestamp__icontains=query)\n ).distinct()\n context = {'obj': obj}\n return render(request, 'post/All_Post_Students.html', context)\n\n\ndef post_update(request, id=None):\n instance = get_object_or_404(Post, id=id)\n form = PostForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request, \"<a href='#'>Item </a>Saved\", extra_tags='html_safe')\n return HttpResponseRedirect(instance.get_absolute_url())\n\n context = {\n \"title\": instance.title,\n \"instance\": instance,\n \"form\": form,\n }\n return render(request, \"post/create_post.html\", context)\n\n\ndef post_details(request, id=None):\n instance = get_object_or_404(Post, id=id)\n content_type = ContentType.objects.get_for_model(Post)\n obj_id = instance.id\n comments = Comment.objects.filter(content_type=content_type, object_id=obj_id)\n initial_data = {\n \"content_type\": content_type,\n \"object_id\": instance.id\n }\n form = CommentForm(request.POST or None, initial= initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get(\"content_type\")\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get(\"object_id\")\n content_data = form.cleaned_data.get(\"content\")\n parent_obj = None\n try:\n parent_id = int(request.POST.get(\"parent_id\"))\n except:\n parent_id = None\n\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n\n new_comment, created = Comment.objects.get_or_create(\n user = request.user,\n content_type = content_type,\n object_id = obj_id,\n content = content_data,\n parent = parent_obj,\n )\n\n\n\n context = {\n \"title\":instance.title,\n \"instance\":instance,\n \"comments\": comments,\n \"form\": form,\n \"obj_id\": obj_id,\n }\n return render(request, \"post/Posts.html\", context)\n\n\ndef post_details_student(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n content_type = ContentType.objects.get_for_model(PostStudent)\n obj_id = instance.id\n comments = CommentStudent.objects.filter(content_type=content_type, object_id=obj_id)\n initial_data = {\n \"content_type\": content_type,\n \"object_id\": instance.id\n }\n form = CommentForm(request.POST or None, initial=initial_data)\n if form.is_valid():\n c_type = form.cleaned_data.get(\"content_type\")\n content_type = ContentType.objects.get(model=c_type)\n obj_id = form.cleaned_data.get(\"object_id\")\n content_data = form.cleaned_data.get(\"content\")\n parent_obj = None\n try:\n parent_id = int(request.POST.get(\"parent_id\"))\n except:\n parent_id = None\n\n if parent_id:\n parent_qs = Comment.objects.filter(id=parent_id)\n if parent_qs.exists():\n parent_obj = parent_qs.first()\n\n new_comment, created = CommentStudent.objects.get_or_create(\n user=request.user,\n content_type=content_type,\n object_id=obj_id,\n content=content_data,\n parent=parent_obj,\n )\n\n context = {\n \"title\": instance.title,\n \"instance\": instance,\n \"comments\": comments,\n \"form\": form,\n \"obj_id\": obj_id,\n }\n return render(request, \"post/post_details_student.html\", context)\n\n\ndef post_delete(request, id=None):\n instance = get_object_or_404(PostStudent, id=id)\n instance.delete()\n messages.success(request, \"Successfully deleted\")\n return render(request, 'post/All_Post_Students.html', {})\n",
"step-ids": [
5,
6,
7,
8,
10
]
}
|
[
5,
6,
7,
8,
10
] |
#!/usr/bin/env python
# coding=utf-8
operators = ['-', '~', '++', '--', '*', '!', '/', '*', '%', '+', '-',
'>', '>=', '<', '<=', '==', '!=', '&&', '||', '=']
types = ['int ', 'double ', 'float ', 'char ']
toDelete = types + ['struct ']
toRepleace = [('printf(', 'print('), ('++', ' += 1'), ('--', ' -= 1'),
('/*', "'''"), ('*/', "'''"), ('//','#'),
('&&', 'and'), ('||', 'or')]
def isDigit(c):
return c > '0' and c < '9'
def isChar(c):
return (c > 'a' and c < 'z') or (c > 'A' and c < 'Z')
def isOperator(c):
return c in operators
def isDefun(line):
return '(' in line and ')' in line and sum([i in line for i in toDelete])
def isDefStruct(line):
return 'struct ' in line and len(line.split(' ')) == 2
def isUseStruct(line):
return 'struct ' in line and len(line.split(' ')) == 3
def isClarify(line):
return sum([line.startswith(i) for i in types]) and '=' not in line
def isPoint(line):
index = line.index('*') if '*' in line else -1
return index != -1 and len(line) > (index + 1) and isChar(line[index + 1]) and \
(sum([line.startswith(i) for i in types]) or '=' in line)
def isList(line):
return sum([line.startswith(i) for i in types]) and '[' in line and ']' in line
def parseInt(s, start=0):
tmp = ''
while start < len(s):
if isDigit(s[start]):
tmp += s[start]
elif len(tmp):
break
start += 1
return int(tmp), start - len(tmp)
def parseVar(s, start=0):
tmp = ''
while start < len(s):
if isChar(s[start]):
tmp += s[start]
elif isDigit(s[start]) and len(tmp):
break
start += 1
return tmp, start - len(tmp)
def parseOperator(s, start=0):
tmp = ''
while start < len(s):
if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':
tmp += s[start]
elif len(tmp) and isOperator(tmp):
return tmp, start - len(tmp)
else:
tmp = ''
start += 1
def main1(filename, output=None):
with open(filename, 'r') as f:
lines = f.readlines()
if not output:
output = filename + '.py'
f = open(output, 'w')
indent = ''
instruct = False
inFor = ''
for line in lines:
line = line.lstrip(' ').rstrip(';\n')
if line.startswith('#'):
continue
if '{' in line:
if instruct:
f.write(indent + '{\n')
indent += ' '
elif '}' in line:
if inFor:
f.write('%s%s\n' % (indent, inFor))
inFor = ''
indent = indent[:-4]
if instruct:
instruct = False
f.write(indent + '}\n')
# indent = indent[:-4]
else:
s = indent
if line.startswith('//'):
s += '{}'
elif isDefun(line):
s += 'def {}:'
elif isUseStruct(line):
l = line.split(' ')[1:]
s += ('{} = [{}.copy() for i in range({})]'
'').format(l[1][:l[1].index('[')],
l[0], parseInt(l[1], l[1].index('['))[0])
s += '{}'
line = ''
elif isDefStruct(line):
# indent += ' '
# s += 'class {}:\n' + indent + 'def __init__(self):'
s += '{} = \\'
instruct = True
elif 'if' in line or 'while ' in line:
s += '{}:'
elif 'printf' in line and '%' in line:
s += '{})'
first_comma = line.index(',')
line = line[:first_comma] + ' % (' + line[first_comma + 2:]
elif 'for' in line:
line = line[3:].replace('(', '').replace(')', '').strip()
line = [l.strip() for l in line.split(';')]
if line[0] and line[1]:
s += '%s\n%swhile %s:{}' % (line[0], s, line[1])
if not line[0] and line[1]:
s += 'while %s:{}' % (line[1])
if line[0] and not line[1]:
s += '%s\n%swhile 1:{}' % (line[0], s)
if not line[0] and not line[1]:
s += 'while 1:{}'
inFor = line[2]
line = ''
elif instruct:
# s += 'self.{} = None'
s += '"{}": None,'
elif isClarify(line):
s += '# Clarify `{}` is skiped'
else:
s += '{}'
if isPoint(line):
index = -1
for i in range(line.count('*')):
index = line.index('*', index + 1)
if isChar(line[index + 1]):
line = line[:index] + 'p_' + line[index + 1:]
s = s.format(line.strip())
for i, j in toRepleace:
while i in s:
s = s.replace(i, j)
if not s.strip().startswith('#'):
for i in toDelete:
while i in s:
s = s.replace(i, '')
f.write(s + '\n')
f.write('if __name__ == "__main__":\n main()')
f.close()
def main2(filename, output=None):
with open(filename, 'r') as f:
lines = f.readlines()
if not output:
output = filename + '.py'
f = open(output, 'w')
rst = []
for line in lines:
line = line.lstrip(' ').rstrip(';\n')
if line.startswith('#'):
continue
f.close()
if __name__ == '__main__':
main1('test.c', output='replace.py')
# main2('test.c', output='list.py')
|
normal
|
{
"blob_id": "082e3350c5827ff2ca909084f2d6a206ae21a7e6",
"index": 3240,
"step-1": "<mask token>\n\n\ndef isChar(c):\n return c > 'a' and c < 'z' or c > 'A' and c < 'Z'\n\n\ndef isOperator(c):\n return c in operators\n\n\ndef isDefun(line):\n return '(' in line and ')' in line and sum([(i in line) for i in toDelete])\n\n\ndef isDefStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 2\n\n\ndef isUseStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 3\n\n\n<mask token>\n\n\ndef isPoint(line):\n index = line.index('*') if '*' in line else -1\n return index != -1 and len(line) > index + 1 and isChar(line[index + 1]\n ) and (sum([line.startswith(i) for i in types]) or '=' in line)\n\n\ndef isList(line):\n return sum([line.startswith(i) for i in types]\n ) and '[' in line and ']' in line\n\n\n<mask token>\n\n\ndef parseVar(s, start=0):\n tmp = ''\n while start < len(s):\n if isChar(s[start]):\n tmp += s[start]\n elif isDigit(s[start]) and len(tmp):\n break\n start += 1\n return tmp, start - len(tmp)\n\n\ndef parseOperator(s, start=0):\n tmp = ''\n while start < len(s):\n if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':\n tmp += s[start]\n elif len(tmp) and isOperator(tmp):\n return tmp, start - len(tmp)\n else:\n tmp = ''\n start += 1\n\n\n<mask token>\n\n\ndef main2(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n rst = []\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n f.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef isChar(c):\n return c > 'a' and c < 'z' or c > 'A' and c < 'Z'\n\n\ndef isOperator(c):\n return c in operators\n\n\ndef isDefun(line):\n return '(' in line and ')' in line and sum([(i in line) for i in toDelete])\n\n\ndef isDefStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 2\n\n\ndef isUseStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 3\n\n\n<mask token>\n\n\ndef isPoint(line):\n index = line.index('*') if '*' in line else -1\n return index != -1 and len(line) > index + 1 and isChar(line[index + 1]\n ) and (sum([line.startswith(i) for i in types]) or '=' in line)\n\n\ndef isList(line):\n return sum([line.startswith(i) for i in types]\n ) and '[' in line and ']' in line\n\n\ndef parseInt(s, start=0):\n tmp = ''\n while start < len(s):\n if isDigit(s[start]):\n tmp += s[start]\n elif len(tmp):\n break\n start += 1\n return int(tmp), start - len(tmp)\n\n\ndef parseVar(s, start=0):\n tmp = ''\n while start < len(s):\n if isChar(s[start]):\n tmp += s[start]\n elif isDigit(s[start]) and len(tmp):\n break\n start += 1\n return tmp, start - len(tmp)\n\n\ndef parseOperator(s, start=0):\n tmp = ''\n while start < len(s):\n if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':\n tmp += s[start]\n elif len(tmp) and isOperator(tmp):\n return tmp, start - len(tmp)\n else:\n tmp = ''\n start += 1\n\n\ndef main1(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n indent = ''\n instruct = False\n inFor = ''\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n if '{' in line:\n if instruct:\n f.write(indent + '{\\n')\n indent += ' '\n elif '}' in line:\n if inFor:\n f.write('%s%s\\n' % (indent, inFor))\n inFor = ''\n indent = indent[:-4]\n if instruct:\n instruct = False\n f.write(indent + '}\\n')\n else:\n s = indent\n if line.startswith('//'):\n s += '{}'\n elif isDefun(line):\n s += 'def {}:'\n elif isUseStruct(line):\n l = line.split(' ')[1:]\n s += '{} = [{}.copy() for i in range({})]'.format(l[1][:l[1\n ].index('[')], l[0], parseInt(l[1], l[1].index('['))[0])\n s += '{}'\n line = ''\n elif isDefStruct(line):\n s += '{} = \\\\'\n instruct = True\n elif 'if' in line or 'while ' in line:\n s += '{}:'\n elif 'printf' in line and '%' in line:\n s += '{})'\n first_comma = line.index(',')\n line = line[:first_comma] + ' % (' + line[first_comma + 2:]\n elif 'for' in line:\n line = line[3:].replace('(', '').replace(')', '').strip()\n line = [l.strip() for l in line.split(';')]\n if line[0] and line[1]:\n s += '%s\\n%swhile %s:{}' % (line[0], s, line[1])\n if not line[0] and line[1]:\n s += 'while %s:{}' % line[1]\n if line[0] and not line[1]:\n s += '%s\\n%swhile 1:{}' % (line[0], s)\n if not line[0] and not line[1]:\n s += 'while 1:{}'\n inFor = line[2]\n line = ''\n elif instruct:\n s += '\"{}\": None,'\n elif isClarify(line):\n s += '# Clarify `{}` is skiped'\n else:\n s += '{}'\n if isPoint(line):\n index = -1\n for i in range(line.count('*')):\n index = line.index('*', index + 1)\n if isChar(line[index + 1]):\n line = line[:index] + 'p_' + line[index + 1:]\n s = s.format(line.strip())\n for i, j in toRepleace:\n while i in s:\n s = s.replace(i, j)\n if not s.strip().startswith('#'):\n for i in toDelete:\n while i in s:\n s = s.replace(i, '')\n f.write(s + '\\n')\n f.write(\"\"\"if __name__ == \"__main__\":\n main()\"\"\")\n f.close()\n\n\ndef main2(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n rst = []\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n f.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef isDigit(c):\n return c > '0' and c < '9'\n\n\ndef isChar(c):\n return c > 'a' and c < 'z' or c > 'A' and c < 'Z'\n\n\ndef isOperator(c):\n return c in operators\n\n\ndef isDefun(line):\n return '(' in line and ')' in line and sum([(i in line) for i in toDelete])\n\n\ndef isDefStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 2\n\n\ndef isUseStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 3\n\n\ndef isClarify(line):\n return sum([line.startswith(i) for i in types]) and '=' not in line\n\n\ndef isPoint(line):\n index = line.index('*') if '*' in line else -1\n return index != -1 and len(line) > index + 1 and isChar(line[index + 1]\n ) and (sum([line.startswith(i) for i in types]) or '=' in line)\n\n\ndef isList(line):\n return sum([line.startswith(i) for i in types]\n ) and '[' in line and ']' in line\n\n\ndef parseInt(s, start=0):\n tmp = ''\n while start < len(s):\n if isDigit(s[start]):\n tmp += s[start]\n elif len(tmp):\n break\n start += 1\n return int(tmp), start - len(tmp)\n\n\ndef parseVar(s, start=0):\n tmp = ''\n while start < len(s):\n if isChar(s[start]):\n tmp += s[start]\n elif isDigit(s[start]) and len(tmp):\n break\n start += 1\n return tmp, start - len(tmp)\n\n\ndef parseOperator(s, start=0):\n tmp = ''\n while start < len(s):\n if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':\n tmp += s[start]\n elif len(tmp) and isOperator(tmp):\n return tmp, start - len(tmp)\n else:\n tmp = ''\n start += 1\n\n\ndef main1(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n indent = ''\n instruct = False\n inFor = ''\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n if '{' in line:\n if instruct:\n f.write(indent + '{\\n')\n indent += ' '\n elif '}' in line:\n if inFor:\n f.write('%s%s\\n' % (indent, inFor))\n inFor = ''\n indent = indent[:-4]\n if instruct:\n instruct = False\n f.write(indent + '}\\n')\n else:\n s = indent\n if line.startswith('//'):\n s += '{}'\n elif isDefun(line):\n s += 'def {}:'\n elif isUseStruct(line):\n l = line.split(' ')[1:]\n s += '{} = [{}.copy() for i in range({})]'.format(l[1][:l[1\n ].index('[')], l[0], parseInt(l[1], l[1].index('['))[0])\n s += '{}'\n line = ''\n elif isDefStruct(line):\n s += '{} = \\\\'\n instruct = True\n elif 'if' in line or 'while ' in line:\n s += '{}:'\n elif 'printf' in line and '%' in line:\n s += '{})'\n first_comma = line.index(',')\n line = line[:first_comma] + ' % (' + line[first_comma + 2:]\n elif 'for' in line:\n line = line[3:].replace('(', '').replace(')', '').strip()\n line = [l.strip() for l in line.split(';')]\n if line[0] and line[1]:\n s += '%s\\n%swhile %s:{}' % (line[0], s, line[1])\n if not line[0] and line[1]:\n s += 'while %s:{}' % line[1]\n if line[0] and not line[1]:\n s += '%s\\n%swhile 1:{}' % (line[0], s)\n if not line[0] and not line[1]:\n s += 'while 1:{}'\n inFor = line[2]\n line = ''\n elif instruct:\n s += '\"{}\": None,'\n elif isClarify(line):\n s += '# Clarify `{}` is skiped'\n else:\n s += '{}'\n if isPoint(line):\n index = -1\n for i in range(line.count('*')):\n index = line.index('*', index + 1)\n if isChar(line[index + 1]):\n line = line[:index] + 'p_' + line[index + 1:]\n s = s.format(line.strip())\n for i, j in toRepleace:\n while i in s:\n s = s.replace(i, j)\n if not s.strip().startswith('#'):\n for i in toDelete:\n while i in s:\n s = s.replace(i, '')\n f.write(s + '\\n')\n f.write(\"\"\"if __name__ == \"__main__\":\n main()\"\"\")\n f.close()\n\n\ndef main2(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n rst = []\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n f.close()\n\n\nif __name__ == '__main__':\n main1('test.c', output='replace.py')\n",
"step-4": "operators = ['-', '~', '++', '--', '*', '!', '/', '*', '%', '+', '-', '>',\n '>=', '<', '<=', '==', '!=', '&&', '||', '=']\ntypes = ['int ', 'double ', 'float ', 'char ']\ntoDelete = types + ['struct ']\ntoRepleace = [('printf(', 'print('), ('++', ' += 1'), ('--', ' -= 1'), (\n '/*', \"'''\"), ('*/', \"'''\"), ('//', '#'), ('&&', 'and'), ('||', 'or')]\n\n\ndef isDigit(c):\n return c > '0' and c < '9'\n\n\ndef isChar(c):\n return c > 'a' and c < 'z' or c > 'A' and c < 'Z'\n\n\ndef isOperator(c):\n return c in operators\n\n\ndef isDefun(line):\n return '(' in line and ')' in line and sum([(i in line) for i in toDelete])\n\n\ndef isDefStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 2\n\n\ndef isUseStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 3\n\n\ndef isClarify(line):\n return sum([line.startswith(i) for i in types]) and '=' not in line\n\n\ndef isPoint(line):\n index = line.index('*') if '*' in line else -1\n return index != -1 and len(line) > index + 1 and isChar(line[index + 1]\n ) and (sum([line.startswith(i) for i in types]) or '=' in line)\n\n\ndef isList(line):\n return sum([line.startswith(i) for i in types]\n ) and '[' in line and ']' in line\n\n\ndef parseInt(s, start=0):\n tmp = ''\n while start < len(s):\n if isDigit(s[start]):\n tmp += s[start]\n elif len(tmp):\n break\n start += 1\n return int(tmp), start - len(tmp)\n\n\ndef parseVar(s, start=0):\n tmp = ''\n while start < len(s):\n if isChar(s[start]):\n tmp += s[start]\n elif isDigit(s[start]) and len(tmp):\n break\n start += 1\n return tmp, start - len(tmp)\n\n\ndef parseOperator(s, start=0):\n tmp = ''\n while start < len(s):\n if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':\n tmp += s[start]\n elif len(tmp) and isOperator(tmp):\n return tmp, start - len(tmp)\n else:\n tmp = ''\n start += 1\n\n\ndef main1(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n indent = ''\n instruct = False\n inFor = ''\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n if '{' in line:\n if instruct:\n f.write(indent + '{\\n')\n indent += ' '\n elif '}' in line:\n if inFor:\n f.write('%s%s\\n' % (indent, inFor))\n inFor = ''\n indent = indent[:-4]\n if instruct:\n instruct = False\n f.write(indent + '}\\n')\n else:\n s = indent\n if line.startswith('//'):\n s += '{}'\n elif isDefun(line):\n s += 'def {}:'\n elif isUseStruct(line):\n l = line.split(' ')[1:]\n s += '{} = [{}.copy() for i in range({})]'.format(l[1][:l[1\n ].index('[')], l[0], parseInt(l[1], l[1].index('['))[0])\n s += '{}'\n line = ''\n elif isDefStruct(line):\n s += '{} = \\\\'\n instruct = True\n elif 'if' in line or 'while ' in line:\n s += '{}:'\n elif 'printf' in line and '%' in line:\n s += '{})'\n first_comma = line.index(',')\n line = line[:first_comma] + ' % (' + line[first_comma + 2:]\n elif 'for' in line:\n line = line[3:].replace('(', '').replace(')', '').strip()\n line = [l.strip() for l in line.split(';')]\n if line[0] and line[1]:\n s += '%s\\n%swhile %s:{}' % (line[0], s, line[1])\n if not line[0] and line[1]:\n s += 'while %s:{}' % line[1]\n if line[0] and not line[1]:\n s += '%s\\n%swhile 1:{}' % (line[0], s)\n if not line[0] and not line[1]:\n s += 'while 1:{}'\n inFor = line[2]\n line = ''\n elif instruct:\n s += '\"{}\": None,'\n elif isClarify(line):\n s += '# Clarify `{}` is skiped'\n else:\n s += '{}'\n if isPoint(line):\n index = -1\n for i in range(line.count('*')):\n index = line.index('*', index + 1)\n if isChar(line[index + 1]):\n line = line[:index] + 'p_' + line[index + 1:]\n s = s.format(line.strip())\n for i, j in toRepleace:\n while i in s:\n s = s.replace(i, j)\n if not s.strip().startswith('#'):\n for i in toDelete:\n while i in s:\n s = s.replace(i, '')\n f.write(s + '\\n')\n f.write(\"\"\"if __name__ == \"__main__\":\n main()\"\"\")\n f.close()\n\n\ndef main2(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n rst = []\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n f.close()\n\n\nif __name__ == '__main__':\n main1('test.c', output='replace.py')\n",
"step-5": "#!/usr/bin/env python\n# coding=utf-8\n\noperators = ['-', '~', '++', '--', '*', '!', '/', '*', '%', '+', '-', \n '>', '>=', '<', '<=', '==', '!=', '&&', '||', '=']\ntypes = ['int ', 'double ', 'float ', 'char ']\ntoDelete = types + ['struct ']\ntoRepleace = [('printf(', 'print('), ('++', ' += 1'), ('--', ' -= 1'),\n ('/*', \"'''\"), ('*/', \"'''\"), ('//','#'),\n ('&&', 'and'), ('||', 'or')]\n\ndef isDigit(c):\n return c > '0' and c < '9'\n\ndef isChar(c):\n return (c > 'a' and c < 'z') or (c > 'A' and c < 'Z')\n\ndef isOperator(c):\n return c in operators\n\ndef isDefun(line):\n return '(' in line and ')' in line and sum([i in line for i in toDelete])\n\ndef isDefStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 2\n\ndef isUseStruct(line):\n return 'struct ' in line and len(line.split(' ')) == 3\n\ndef isClarify(line):\n return sum([line.startswith(i) for i in types]) and '=' not in line\n\ndef isPoint(line):\n index = line.index('*') if '*' in line else -1\n return index != -1 and len(line) > (index + 1) and isChar(line[index + 1]) and \\\n (sum([line.startswith(i) for i in types]) or '=' in line)\n \n\ndef isList(line):\n return sum([line.startswith(i) for i in types]) and '[' in line and ']' in line\n\ndef parseInt(s, start=0):\n tmp = ''\n while start < len(s):\n if isDigit(s[start]):\n tmp += s[start]\n elif len(tmp):\n break\n start += 1\n return int(tmp), start - len(tmp)\n\ndef parseVar(s, start=0):\n tmp = ''\n while start < len(s):\n if isChar(s[start]):\n tmp += s[start]\n elif isDigit(s[start]) and len(tmp):\n break\n start += 1\n return tmp, start - len(tmp)\n\ndef parseOperator(s, start=0):\n tmp = ''\n while start < len(s):\n if not isDigit(s[start]) and not isChar(s[start]) and s[start] != ' ':\n tmp += s[start]\n elif len(tmp) and isOperator(tmp):\n return tmp, start - len(tmp)\n else:\n tmp = ''\n start += 1\n \ndef main1(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n indent = ''\n instruct = False\n inFor = ''\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n if '{' in line:\n if instruct:\n f.write(indent + '{\\n')\n indent += ' '\n elif '}' in line:\n if inFor:\n f.write('%s%s\\n' % (indent, inFor))\n inFor = ''\n indent = indent[:-4]\n if instruct:\n instruct = False\n f.write(indent + '}\\n')\n# indent = indent[:-4]\n else:\n s = indent\n if line.startswith('//'):\n s += '{}'\n elif isDefun(line):\n s += 'def {}:'\n elif isUseStruct(line):\n l = line.split(' ')[1:]\n s += ('{} = [{}.copy() for i in range({})]'\n '').format(l[1][:l[1].index('[')],\n l[0], parseInt(l[1], l[1].index('['))[0])\n s += '{}'\n line = ''\n elif isDefStruct(line):\n# indent += ' '\n# s += 'class {}:\\n' + indent + 'def __init__(self):'\n s += '{} = \\\\'\n instruct = True\n elif 'if' in line or 'while ' in line:\n s += '{}:'\n elif 'printf' in line and '%' in line:\n s += '{})'\n first_comma = line.index(',')\n line = line[:first_comma] + ' % (' + line[first_comma + 2:]\n elif 'for' in line:\n line = line[3:].replace('(', '').replace(')', '').strip()\n line = [l.strip() for l in line.split(';')]\n if line[0] and line[1]:\n s += '%s\\n%swhile %s:{}' % (line[0], s, line[1])\n if not line[0] and line[1]:\n s += 'while %s:{}' % (line[1])\n if line[0] and not line[1]:\n s += '%s\\n%swhile 1:{}' % (line[0], s)\n if not line[0] and not line[1]:\n s += 'while 1:{}'\n inFor = line[2]\n line = ''\n elif instruct:\n# s += 'self.{} = None'\n s += '\"{}\": None,'\n elif isClarify(line):\n s += '# Clarify `{}` is skiped'\n else:\n s += '{}'\n if isPoint(line):\n index = -1\n for i in range(line.count('*')):\n index = line.index('*', index + 1)\n if isChar(line[index + 1]):\n line = line[:index] + 'p_' + line[index + 1:]\n s = s.format(line.strip())\n for i, j in toRepleace:\n while i in s:\n s = s.replace(i, j)\n if not s.strip().startswith('#'):\n for i in toDelete:\n while i in s:\n s = s.replace(i, '')\n f.write(s + '\\n')\n f.write('if __name__ == \"__main__\":\\n main()')\n f.close()\n \ndef main2(filename, output=None):\n with open(filename, 'r') as f:\n lines = f.readlines()\n if not output:\n output = filename + '.py'\n f = open(output, 'w')\n rst = []\n for line in lines:\n line = line.lstrip(' ').rstrip(';\\n')\n if line.startswith('#'):\n continue\n \n \n f.close() \n \nif __name__ == '__main__':\n main1('test.c', output='replace.py')\n# main2('test.c', output='list.py')\n ",
"step-ids": [
10,
12,
15,
16,
17
]
}
|
[
10,
12,
15,
16,
17
] |
from translit import convert_input
def openfile(name):
f = open(name, 'r', encoding = 'utf-8')
text = f.readlines()
f.close()
return text
def makedict(text):
A = []
for line in text:
if 'lex:' in line:
a = []
a.append(line[6:].replace('\n',''))
elif 'gramm:' in line:
a.append(line[8:].replace('\n',''))
elif 'trans_ru:' in line:
a.append(line[11:].replace('\n',''))
A.append(a)
return A
def writefile(name, text):
fw = open(name, 'w', encoding = 'utf-8')
fw.write(text)
fw.close()
#alf = 'абвгдежзийклмнопрстуфхцчшыьёюяӧӝӟӵ'
#trans = list('abvgdežzijklmnoprstufxcčšə')
#trans.append('ə̂')
#trans.append('ə̈əɤ')
def dictionary():
A = []
for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V']:
A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))
transl = []
for el in A:
a = []
a.append(convert_input(el[0], 'cyr'))
a += el
transl.append(a)
return transl
def dict_split(transl):
D = {k:[] for k in ['N', 'IMIT', 'V']}
row = '%s\t%s\t%s\t%s\n'
for line in dictionary():
parts = []
if line[2] == 'N' or 'ADJ' in line[2]:
parts.append(line[2])
elif 'N-persn' in line[2] or 'N,' in line[2]:
parts.append('N')
elif 'V,' in line[2]:
parts.append('V')
if 'ADV' in line[2]:
parts.append('ADV')
if 'POST' in line[2]:
parts.append('POST')
if 'PRO' in line[2]:
parts.append('PRO')
if 'NUM' in line[2]:
parts.append('NUM')
if 'INTRJ' in line[2]:
parts.append('INTRJ')
if 'CNJ' in line[2]:
parts.append('CNJ')
if 'IMIT' in line[2]:
parts.append('IMIT')
if 'PART' in line[2]:
parts.append('PART')
if 'N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or 'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts:
D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))
if 'V' in parts or 'PRAED' in parts:
D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))
if 'IMIT' in parts:
D['IMIT'].append(row % (line[0], line[1], ', '.join(parts), line[3]))
return D
def main():
D = dict_split(dictionary())
for k in D:
D[k] = set(D[k])
fw = open('udmlex_' + k + '.tsv', 'w', encoding = 'utf-8')
fw.write(''.join(D[k]))
fw.close()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "29e54a9ec0d65965645ac4aabf8c247a8857a25f",
"index": 3778,
"step-1": "<mask token>\n\n\ndef openfile(name):\n f = open(name, 'r', encoding='utf-8')\n text = f.readlines()\n f.close()\n return text\n\n\ndef makedict(text):\n A = []\n for line in text:\n if 'lex:' in line:\n a = []\n a.append(line[6:].replace('\\n', ''))\n elif 'gramm:' in line:\n a.append(line[8:].replace('\\n', ''))\n elif 'trans_ru:' in line:\n a.append(line[11:].replace('\\n', ''))\n A.append(a)\n return A\n\n\n<mask token>\n\n\ndef dictionary():\n A = []\n for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V'\n ]:\n A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))\n transl = []\n for el in A:\n a = []\n a.append(convert_input(el[0], 'cyr'))\n a += el\n transl.append(a)\n return transl\n\n\ndef dict_split(transl):\n D = {k: [] for k in ['N', 'IMIT', 'V']}\n row = '%s\\t%s\\t%s\\t%s\\n'\n for line in dictionary():\n parts = []\n if line[2] == 'N' or 'ADJ' in line[2]:\n parts.append(line[2])\n elif 'N-persn' in line[2] or 'N,' in line[2]:\n parts.append('N')\n elif 'V,' in line[2]:\n parts.append('V')\n if 'ADV' in line[2]:\n parts.append('ADV')\n if 'POST' in line[2]:\n parts.append('POST')\n if 'PRO' in line[2]:\n parts.append('PRO')\n if 'NUM' in line[2]:\n parts.append('NUM')\n if 'INTRJ' in line[2]:\n parts.append('INTRJ')\n if 'CNJ' in line[2]:\n parts.append('CNJ')\n if 'IMIT' in line[2]:\n parts.append('IMIT')\n if 'PART' in line[2]:\n parts.append('PART')\n if ('N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in\n parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or\n 'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts):\n D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'V' in parts or 'PRAED' in parts:\n D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'IMIT' in parts:\n D['IMIT'].append(row % (line[0], line[1], ', '.join(parts),\n line[3]))\n return D\n\n\ndef main():\n D = dict_split(dictionary())\n for k in D:\n D[k] = set(D[k])\n fw = open('udmlex_' + k + '.tsv', 'w', encoding='utf-8')\n fw.write(''.join(D[k]))\n fw.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef openfile(name):\n f = open(name, 'r', encoding='utf-8')\n text = f.readlines()\n f.close()\n return text\n\n\ndef makedict(text):\n A = []\n for line in text:\n if 'lex:' in line:\n a = []\n a.append(line[6:].replace('\\n', ''))\n elif 'gramm:' in line:\n a.append(line[8:].replace('\\n', ''))\n elif 'trans_ru:' in line:\n a.append(line[11:].replace('\\n', ''))\n A.append(a)\n return A\n\n\ndef writefile(name, text):\n fw = open(name, 'w', encoding='utf-8')\n fw.write(text)\n fw.close()\n\n\ndef dictionary():\n A = []\n for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V'\n ]:\n A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))\n transl = []\n for el in A:\n a = []\n a.append(convert_input(el[0], 'cyr'))\n a += el\n transl.append(a)\n return transl\n\n\ndef dict_split(transl):\n D = {k: [] for k in ['N', 'IMIT', 'V']}\n row = '%s\\t%s\\t%s\\t%s\\n'\n for line in dictionary():\n parts = []\n if line[2] == 'N' or 'ADJ' in line[2]:\n parts.append(line[2])\n elif 'N-persn' in line[2] or 'N,' in line[2]:\n parts.append('N')\n elif 'V,' in line[2]:\n parts.append('V')\n if 'ADV' in line[2]:\n parts.append('ADV')\n if 'POST' in line[2]:\n parts.append('POST')\n if 'PRO' in line[2]:\n parts.append('PRO')\n if 'NUM' in line[2]:\n parts.append('NUM')\n if 'INTRJ' in line[2]:\n parts.append('INTRJ')\n if 'CNJ' in line[2]:\n parts.append('CNJ')\n if 'IMIT' in line[2]:\n parts.append('IMIT')\n if 'PART' in line[2]:\n parts.append('PART')\n if ('N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in\n parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or\n 'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts):\n D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'V' in parts or 'PRAED' in parts:\n D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'IMIT' in parts:\n D['IMIT'].append(row % (line[0], line[1], ', '.join(parts),\n line[3]))\n return D\n\n\ndef main():\n D = dict_split(dictionary())\n for k in D:\n D[k] = set(D[k])\n fw = open('udmlex_' + k + '.tsv', 'w', encoding='utf-8')\n fw.write(''.join(D[k]))\n fw.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef openfile(name):\n f = open(name, 'r', encoding='utf-8')\n text = f.readlines()\n f.close()\n return text\n\n\ndef makedict(text):\n A = []\n for line in text:\n if 'lex:' in line:\n a = []\n a.append(line[6:].replace('\\n', ''))\n elif 'gramm:' in line:\n a.append(line[8:].replace('\\n', ''))\n elif 'trans_ru:' in line:\n a.append(line[11:].replace('\\n', ''))\n A.append(a)\n return A\n\n\ndef writefile(name, text):\n fw = open(name, 'w', encoding='utf-8')\n fw.write(text)\n fw.close()\n\n\ndef dictionary():\n A = []\n for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V'\n ]:\n A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))\n transl = []\n for el in A:\n a = []\n a.append(convert_input(el[0], 'cyr'))\n a += el\n transl.append(a)\n return transl\n\n\ndef dict_split(transl):\n D = {k: [] for k in ['N', 'IMIT', 'V']}\n row = '%s\\t%s\\t%s\\t%s\\n'\n for line in dictionary():\n parts = []\n if line[2] == 'N' or 'ADJ' in line[2]:\n parts.append(line[2])\n elif 'N-persn' in line[2] or 'N,' in line[2]:\n parts.append('N')\n elif 'V,' in line[2]:\n parts.append('V')\n if 'ADV' in line[2]:\n parts.append('ADV')\n if 'POST' in line[2]:\n parts.append('POST')\n if 'PRO' in line[2]:\n parts.append('PRO')\n if 'NUM' in line[2]:\n parts.append('NUM')\n if 'INTRJ' in line[2]:\n parts.append('INTRJ')\n if 'CNJ' in line[2]:\n parts.append('CNJ')\n if 'IMIT' in line[2]:\n parts.append('IMIT')\n if 'PART' in line[2]:\n parts.append('PART')\n if ('N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in\n parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or\n 'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts):\n D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'V' in parts or 'PRAED' in parts:\n D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'IMIT' in parts:\n D['IMIT'].append(row % (line[0], line[1], ', '.join(parts),\n line[3]))\n return D\n\n\ndef main():\n D = dict_split(dictionary())\n for k in D:\n D[k] = set(D[k])\n fw = open('udmlex_' + k + '.tsv', 'w', encoding='utf-8')\n fw.write(''.join(D[k]))\n fw.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from translit import convert_input\n\n\ndef openfile(name):\n f = open(name, 'r', encoding='utf-8')\n text = f.readlines()\n f.close()\n return text\n\n\ndef makedict(text):\n A = []\n for line in text:\n if 'lex:' in line:\n a = []\n a.append(line[6:].replace('\\n', ''))\n elif 'gramm:' in line:\n a.append(line[8:].replace('\\n', ''))\n elif 'trans_ru:' in line:\n a.append(line[11:].replace('\\n', ''))\n A.append(a)\n return A\n\n\ndef writefile(name, text):\n fw = open(name, 'w', encoding='utf-8')\n fw.write(text)\n fw.close()\n\n\ndef dictionary():\n A = []\n for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V'\n ]:\n A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))\n transl = []\n for el in A:\n a = []\n a.append(convert_input(el[0], 'cyr'))\n a += el\n transl.append(a)\n return transl\n\n\ndef dict_split(transl):\n D = {k: [] for k in ['N', 'IMIT', 'V']}\n row = '%s\\t%s\\t%s\\t%s\\n'\n for line in dictionary():\n parts = []\n if line[2] == 'N' or 'ADJ' in line[2]:\n parts.append(line[2])\n elif 'N-persn' in line[2] or 'N,' in line[2]:\n parts.append('N')\n elif 'V,' in line[2]:\n parts.append('V')\n if 'ADV' in line[2]:\n parts.append('ADV')\n if 'POST' in line[2]:\n parts.append('POST')\n if 'PRO' in line[2]:\n parts.append('PRO')\n if 'NUM' in line[2]:\n parts.append('NUM')\n if 'INTRJ' in line[2]:\n parts.append('INTRJ')\n if 'CNJ' in line[2]:\n parts.append('CNJ')\n if 'IMIT' in line[2]:\n parts.append('IMIT')\n if 'PART' in line[2]:\n parts.append('PART')\n if ('N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in\n parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or\n 'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts):\n D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'V' in parts or 'PRAED' in parts:\n D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'IMIT' in parts:\n D['IMIT'].append(row % (line[0], line[1], ', '.join(parts),\n line[3]))\n return D\n\n\ndef main():\n D = dict_split(dictionary())\n for k in D:\n D[k] = set(D[k])\n fw = open('udmlex_' + k + '.tsv', 'w', encoding='utf-8')\n fw.write(''.join(D[k]))\n fw.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from translit import convert_input\r\n\r\ndef openfile(name):\r\n f = open(name, 'r', encoding = 'utf-8')\r\n text = f.readlines()\r\n f.close()\r\n return text\r\n\r\ndef makedict(text):\r\n A = []\r\n for line in text:\r\n if 'lex:' in line:\r\n a = []\r\n a.append(line[6:].replace('\\n',''))\r\n elif 'gramm:' in line:\r\n a.append(line[8:].replace('\\n',''))\r\n elif 'trans_ru:' in line:\r\n a.append(line[11:].replace('\\n',''))\r\n A.append(a)\r\n return A\r\n\r\ndef writefile(name, text):\r\n fw = open(name, 'w', encoding = 'utf-8')\r\n fw.write(text) \r\n fw.close()\r\n\r\n#alf = 'абвгдежзийклмнопрстуфхцчшыьёюяӧӝӟӵ'\r\n#trans = list('abvgdežzijklmnoprstufxcčšə')\r\n#trans.append('ə̂')\r\n#trans.append('ə̈əɤ')\r\n\r\ndef dictionary():\r\n A = []\r\n for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V']:\r\n A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))\r\n transl = []\r\n for el in A:\r\n a = []\r\n a.append(convert_input(el[0], 'cyr'))\r\n a += el\r\n transl.append(a)\r\n return transl\r\n\r\ndef dict_split(transl):\r\n D = {k:[] for k in ['N', 'IMIT', 'V']}\r\n row = '%s\\t%s\\t%s\\t%s\\n'\r\n for line in dictionary():\r\n parts = []\r\n if line[2] == 'N' or 'ADJ' in line[2]:\r\n parts.append(line[2])\r\n elif 'N-persn' in line[2] or 'N,' in line[2]:\r\n parts.append('N')\r\n elif 'V,' in line[2]: \r\n parts.append('V')\r\n if 'ADV' in line[2]:\r\n parts.append('ADV')\r\n if 'POST' in line[2]:\r\n parts.append('POST')\r\n if 'PRO' in line[2]:\r\n parts.append('PRO')\r\n if 'NUM' in line[2]:\r\n parts.append('NUM')\r\n if 'INTRJ' in line[2]:\r\n parts.append('INTRJ')\r\n if 'CNJ' in line[2]:\r\n parts.append('CNJ')\r\n if 'IMIT' in line[2]:\r\n parts.append('IMIT')\r\n if 'PART' in line[2]:\r\n parts.append('PART')\r\n if 'N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or 'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts:\r\n D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\r\n if 'V' in parts or 'PRAED' in parts:\r\n D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\r\n if 'IMIT' in parts:\r\n D['IMIT'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\r\n return D\r\n\r\ndef main():\r\n D = dict_split(dictionary()) \r\n for k in D:\r\n D[k] = set(D[k])\r\n fw = open('udmlex_' + k + '.tsv', 'w', encoding = 'utf-8')\r\n fw.write(''.join(D[k]))\r\n fw.close()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import sys, os
class Extractor:
def __init__(self, prefix=''):
self.variables = {}
self.prefix = os.path.basename(prefix)
'''
Returns the variable name if a variable with
the value <value> is found.
'''
def find_variable_name(self, value):
for var, val in self.variables.items():
if value == val:
return var
'''
Scans a list of <lines> containing CSS and
returns a list of strings containing the
rendered LESS version.
'''
def scan(self, lines):
yield "@import '%s_variables.less'\n\n" %self.prefix
for line in lines:
found_prop = False
for prop in ('background-color', 'background', 'color'):
if prop in line:
found_prop = True
value = line.split(':')[1].strip().replace('}', '')
if not (value in self.variables.values()):
self.variables['@var%i' %(len(self.variables) + 1)] = value
yield line.replace(value, self.find_variable_name(value) + ';')
if not found_prop:
yield line
'''
Returns the output for the variables.less
file as a list of strings
'''
def get_variables(self):
for var, val in self.variables.items():
yield var + ': ' + val
if __name__ == '__main__':
if len(sys.argv) > 1:
for path in sys.argv[1:]:
name = '.'.join(path.split('.')[:-1])
extractor = Extractor(name)
read = open(path)
write = open(name + '.less', 'w')
variables = open(name + '_variables.less', 'w')
try:
for line in extractor.scan(read.readlines()):
write.write(line)
for line in extractor.get_variables():
variables.write(line + os.linesep)
finally:
variables.close()
write.close()
read.close()
else:
print('usage: python extract.py [file]')
|
normal
|
{
"blob_id": "dffcaf47ec8e0daa940e7047f11681ef3eabc772",
"index": 8591,
"step-1": "<mask token>\n\n\nclass Extractor:\n\n def __init__(self, prefix=''):\n self.variables = {}\n self.prefix = os.path.basename(prefix)\n <mask token>\n\n def find_variable_name(self, value):\n for var, val in self.variables.items():\n if value == val:\n return var\n <mask token>\n\n def scan(self, lines):\n yield \"@import '%s_variables.less'\\n\\n\" % self.prefix\n for line in lines:\n found_prop = False\n for prop in ('background-color', 'background', 'color'):\n if prop in line:\n found_prop = True\n value = line.split(':')[1].strip().replace('}', '')\n if not value in self.variables.values():\n self.variables['@var%i' % (len(self.variables) + 1)\n ] = value\n yield line.replace(value, self.find_variable_name(value\n ) + ';')\n if not found_prop:\n yield line\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Extractor:\n\n def __init__(self, prefix=''):\n self.variables = {}\n self.prefix = os.path.basename(prefix)\n \"\"\"\n Returns the variable name if a variable with\n the value <value> is found.\n \"\"\"\n\n def find_variable_name(self, value):\n for var, val in self.variables.items():\n if value == val:\n return var\n \"\"\"\n Scans a list of <lines> containing CSS and\n returns a list of strings containing the\n rendered LESS version.\n \"\"\"\n\n def scan(self, lines):\n yield \"@import '%s_variables.less'\\n\\n\" % self.prefix\n for line in lines:\n found_prop = False\n for prop in ('background-color', 'background', 'color'):\n if prop in line:\n found_prop = True\n value = line.split(':')[1].strip().replace('}', '')\n if not value in self.variables.values():\n self.variables['@var%i' % (len(self.variables) + 1)\n ] = value\n yield line.replace(value, self.find_variable_name(value\n ) + ';')\n if not found_prop:\n yield line\n \"\"\"\n Returns the output for the variables.less\n file as a list of strings\n \"\"\"\n\n def get_variables(self):\n for var, val in self.variables.items():\n yield var + ': ' + val\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Extractor:\n\n def __init__(self, prefix=''):\n self.variables = {}\n self.prefix = os.path.basename(prefix)\n \"\"\"\n Returns the variable name if a variable with\n the value <value> is found.\n \"\"\"\n\n def find_variable_name(self, value):\n for var, val in self.variables.items():\n if value == val:\n return var\n \"\"\"\n Scans a list of <lines> containing CSS and\n returns a list of strings containing the\n rendered LESS version.\n \"\"\"\n\n def scan(self, lines):\n yield \"@import '%s_variables.less'\\n\\n\" % self.prefix\n for line in lines:\n found_prop = False\n for prop in ('background-color', 'background', 'color'):\n if prop in line:\n found_prop = True\n value = line.split(':')[1].strip().replace('}', '')\n if not value in self.variables.values():\n self.variables['@var%i' % (len(self.variables) + 1)\n ] = value\n yield line.replace(value, self.find_variable_name(value\n ) + ';')\n if not found_prop:\n yield line\n \"\"\"\n Returns the output for the variables.less\n file as a list of strings\n \"\"\"\n\n def get_variables(self):\n for var, val in self.variables.items():\n yield var + ': ' + val\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n for path in sys.argv[1:]:\n name = '.'.join(path.split('.')[:-1])\n extractor = Extractor(name)\n read = open(path)\n write = open(name + '.less', 'w')\n variables = open(name + '_variables.less', 'w')\n try:\n for line in extractor.scan(read.readlines()):\n write.write(line)\n for line in extractor.get_variables():\n variables.write(line + os.linesep)\n finally:\n variables.close()\n write.close()\n read.close()\n else:\n print('usage: python extract.py [file]')\n",
"step-4": "import sys, os\n\n\nclass Extractor:\n\n def __init__(self, prefix=''):\n self.variables = {}\n self.prefix = os.path.basename(prefix)\n \"\"\"\n Returns the variable name if a variable with\n the value <value> is found.\n \"\"\"\n\n def find_variable_name(self, value):\n for var, val in self.variables.items():\n if value == val:\n return var\n \"\"\"\n Scans a list of <lines> containing CSS and\n returns a list of strings containing the\n rendered LESS version.\n \"\"\"\n\n def scan(self, lines):\n yield \"@import '%s_variables.less'\\n\\n\" % self.prefix\n for line in lines:\n found_prop = False\n for prop in ('background-color', 'background', 'color'):\n if prop in line:\n found_prop = True\n value = line.split(':')[1].strip().replace('}', '')\n if not value in self.variables.values():\n self.variables['@var%i' % (len(self.variables) + 1)\n ] = value\n yield line.replace(value, self.find_variable_name(value\n ) + ';')\n if not found_prop:\n yield line\n \"\"\"\n Returns the output for the variables.less\n file as a list of strings\n \"\"\"\n\n def get_variables(self):\n for var, val in self.variables.items():\n yield var + ': ' + val\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n for path in sys.argv[1:]:\n name = '.'.join(path.split('.')[:-1])\n extractor = Extractor(name)\n read = open(path)\n write = open(name + '.less', 'w')\n variables = open(name + '_variables.less', 'w')\n try:\n for line in extractor.scan(read.readlines()):\n write.write(line)\n for line in extractor.get_variables():\n variables.write(line + os.linesep)\n finally:\n variables.close()\n write.close()\n read.close()\n else:\n print('usage: python extract.py [file]')\n",
"step-5": "import sys, os\n\nclass Extractor:\n def __init__(self, prefix=''):\n self.variables = {}\n self.prefix = os.path.basename(prefix)\n \n '''\n Returns the variable name if a variable with\n the value <value> is found.\n '''\n def find_variable_name(self, value):\n for var, val in self.variables.items():\n if value == val:\n return var\n \n '''\n Scans a list of <lines> containing CSS and\n returns a list of strings containing the\n rendered LESS version.\n '''\n def scan(self, lines):\n yield \"@import '%s_variables.less'\\n\\n\" %self.prefix\n for line in lines:\n found_prop = False\n for prop in ('background-color', 'background', 'color'):\n if prop in line:\n found_prop = True\n value = line.split(':')[1].strip().replace('}', '')\n if not (value in self.variables.values()):\n self.variables['@var%i' %(len(self.variables) + 1)] = value\n yield line.replace(value, self.find_variable_name(value) + ';')\n if not found_prop:\n yield line\n\n '''\n Returns the output for the variables.less\n file as a list of strings\n '''\n def get_variables(self):\n for var, val in self.variables.items():\n yield var + ': ' + val \n \n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n for path in sys.argv[1:]:\n name = '.'.join(path.split('.')[:-1])\n extractor = Extractor(name)\n read = open(path)\n write = open(name + '.less', 'w')\n variables = open(name + '_variables.less', 'w')\n try:\n for line in extractor.scan(read.readlines()):\n write.write(line)\n for line in extractor.get_variables():\n variables.write(line + os.linesep)\n finally:\n variables.close()\n write.close()\n read.close() \n \n else:\n print('usage: python extract.py [file]')\n \n\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import time
if __name__ == '__main__':
for i in range(10):
print('here %s' % i)
time.sleep(1)
print('TEST SUCEEDED')
|
normal
|
{
"blob_id": "a159f9f9cc06bb9d22f84781fb2fc664ea204b64",
"index": 6856,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n for i in range(10):\n print('here %s' % i)\n time.sleep(1)\n print('TEST SUCEEDED')\n",
"step-3": "import time\nif __name__ == '__main__':\n for i in range(10):\n print('here %s' % i)\n time.sleep(1)\n print('TEST SUCEEDED')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import discord
import requests
import math
from keys import GITHUB_DISCORD_TOKEN, GITHUB_FORTNITE_API_KEY
client = discord.Client()
# Constant
DISCORD_TOKEN = GITHUB_DISCORD_TOKEN
FORTNITE_API_KEY = GITHUB_FORTNITE_API_KEY
LIST = ['Verified']
VERIFIED = 4
# Return the current season squad K/D of the fortnite player
def get_ratio(username):
try:
print(username)
link = 'https://api.fortnitetracker.com/v1/profile/pc/' + username
response = requests.get(link, headers={'TRN-Api-Key': FORTNITE_API_KEY})
if response.status_code == 200:
collection = response.json()
if 'error' in collection:
return "-1"
else:
ratio = collection['stats']['curr_p9']['kd']['value']
return ratio
print("Invalid username")
return "-1"
else:
print("Error parsing data.")
return "-2"
except KeyError:
print("Error finding data. KeyError was returned.")
return "-3"
@client.event
async def on_message(message):
# we do not want the bot to reply to itself
if message.author == client.user:
return
# The command !patch return a link with the lastest patch note
if message.content.startswith('!patch'):
await message.channel.send('Latest patch notes: https://www.epicgames.com/fortnite/en-US/patch-notes/')
# The command !help explains the one function
if message.content.startswith('!help'):
embed = discord.Embed(colour=discord.Colour(0x8e2626), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify Bot Help", icon_url="")
embed.add_field(name="Set your Discord nickname to be exacly the same as your Epic Games player name. Then type: !verify", value="You can change your nickname by typing \"/nick *YourEpicIGN*\". The bot looks at your squad K/D for the current season, so if you have no games played yet, the bot won\'t be able to verify you.", inline=False)
await message.channel.send(embed=embed)
# The command !verify return attribute a rank according to the K/D of the user
if message.content.startswith("!verify"):
for list in LIST:
roles = discord.utils.get(message.guild.roles, name=list)
username = '{0.author.display_name}'.format(message)
ratio = float(get_ratio(username))
msgRatio = str(ratio)
msgVerified = str(VERIFIED)
print(ratio)
if ratio == -1.0:
embed = discord.Embed(colour=discord.Colour(0x8e2626), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name="Fortnite player **" + message.author.display_name + "** not found.", value="\nYour Discord nickname and IGN must be exactly the same. Change your Discord nickname to your IGN and try again.", inline=False)
await message.channel.send(embed=embed)
elif ratio == -2.0:
embed = discord.Embed(colour=discord.Colour(0x8e2626), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name="Data not found.", value="Fortnite Tracker is down. Please try again shortly.", inline=False)
await message.channel.send(embed=embed)
elif ratio == -3.0:
embed = discord.Embed(colour=discord.Colour(0x8e2626), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name="No stats found for squad mode in the current season.", value="Play some games and try again.", inline=False)
await message.channel.send(embed=embed)
elif ratio > 0 and ratio < VERIFIED:
print("🚫")
print("-")
embed = discord.Embed(colour=discord.Colour(0x45278e), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name=message.author.display_name + " does not have over a " + msgVerified + " K/D.", value="Current season squads K/D: **" + msgRatio + "**", inline=False)
await message.channel.send(embed=embed)
elif ratio >= VERIFIED:
print("✅")
print("-")
role = discord.utils.get(message.guild.roles, name=LIST[0])
embed = discord.Embed(colour=discord.Colour(0x45278e), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name=message.author.display_name + " has over a " + msgVerified + " K/D. Verified!", value="Current season squads K/D: **" + msgRatio + "**", inline=False)
user=message.author
await message.channel.send(embed=embed)
await user.add_roles(role)
@client.event
async def on_ready():
print("-")
print("Logged in as: " + client.user.name)
print("With Client User ID: " + str(client.user.id))
print("Verified set to: " + str(VERIFIED))
print("-")
client.run(DISCORD_TOKEN)
|
normal
|
{
"blob_id": "6c6a49dfced680fe034cbbc2fa28d57d2aa1273e",
"index": 8973,
"step-1": "<mask token>\n\n\ndef get_ratio(username):\n try:\n print(username)\n link = 'https://api.fortnitetracker.com/v1/profile/pc/' + username\n response = requests.get(link, headers={'TRN-Api-Key': FORTNITE_API_KEY}\n )\n if response.status_code == 200:\n collection = response.json()\n if 'error' in collection:\n return '-1'\n else:\n ratio = collection['stats']['curr_p9']['kd']['value']\n return ratio\n print('Invalid username')\n return '-1'\n else:\n print('Error parsing data.')\n return '-2'\n except KeyError:\n print('Error finding data. KeyError was returned.')\n return '-3'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_ratio(username):\n try:\n print(username)\n link = 'https://api.fortnitetracker.com/v1/profile/pc/' + username\n response = requests.get(link, headers={'TRN-Api-Key': FORTNITE_API_KEY}\n )\n if response.status_code == 200:\n collection = response.json()\n if 'error' in collection:\n return '-1'\n else:\n ratio = collection['stats']['curr_p9']['kd']['value']\n return ratio\n print('Invalid username')\n return '-1'\n else:\n print('Error parsing data.')\n return '-2'\n except KeyError:\n print('Error finding data. KeyError was returned.')\n return '-3'\n\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n if message.content.startswith('!patch'):\n await message.channel.send(\n 'Latest patch notes: https://www.epicgames.com/fortnite/en-US/patch-notes/'\n )\n if message.content.startswith('!help'):\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify Bot Help', icon_url='')\n embed.add_field(name=\n 'Set your Discord nickname to be exacly the same as your Epic Games player name. Then type: !verify'\n , value=\n 'You can change your nickname by typing \"/nick *YourEpicIGN*\". The bot looks at your squad K/D for the current season, so if you have no games played yet, the bot won\\'t be able to verify you.'\n , inline=False)\n await message.channel.send(embed=embed)\n if message.content.startswith('!verify'):\n for list in LIST:\n roles = discord.utils.get(message.guild.roles, name=list)\n username = '{0.author.display_name}'.format(message)\n ratio = float(get_ratio(username))\n msgRatio = str(ratio)\n msgVerified = str(VERIFIED)\n print(ratio)\n if ratio == -1.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name='Fortnite player **' + message.author.\n display_name + '** not found.', value=\n \"\"\"\nYour Discord nickname and IGN must be exactly the same. Change your Discord nickname to your IGN and try again.\"\"\"\n , inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -2.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name='Data not found.', value=\n 'Fortnite Tracker is down. Please try again shortly.',\n inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -3.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=\n 'No stats found for squad mode in the current season.',\n value='Play some games and try again.', inline=False)\n await message.channel.send(embed=embed)\n elif ratio > 0 and ratio < VERIFIED:\n print('🚫')\n print('-')\n embed = discord.Embed(colour=discord.Colour(4532110), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name +\n ' does not have over a ' + msgVerified + ' K/D.', value=\n 'Current season squads K/D: **' + msgRatio + '**', inline=False\n )\n await message.channel.send(embed=embed)\n elif ratio >= VERIFIED:\n print('✅')\n print('-')\n role = discord.utils.get(message.guild.roles, name=LIST[0])\n embed = discord.Embed(colour=discord.Colour(4532110), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name +\n ' has over a ' + msgVerified + ' K/D. Verified!', value=\n 'Current season squads K/D: **' + msgRatio + '**', inline=False\n )\n user = message.author\n await message.channel.send(embed=embed)\n await user.add_roles(role)\n\n\[email protected]\nasync def on_ready():\n print('-')\n print('Logged in as: ' + client.user.name)\n print('With Client User ID: ' + str(client.user.id))\n print('Verified set to: ' + str(VERIFIED))\n print('-')\n\n\nclient.run(DISCORD_TOKEN)\n",
"step-3": "<mask token>\nclient = discord.Client()\nDISCORD_TOKEN = GITHUB_DISCORD_TOKEN\nFORTNITE_API_KEY = GITHUB_FORTNITE_API_KEY\nLIST = ['Verified']\nVERIFIED = 4\n\n\ndef get_ratio(username):\n try:\n print(username)\n link = 'https://api.fortnitetracker.com/v1/profile/pc/' + username\n response = requests.get(link, headers={'TRN-Api-Key': FORTNITE_API_KEY}\n )\n if response.status_code == 200:\n collection = response.json()\n if 'error' in collection:\n return '-1'\n else:\n ratio = collection['stats']['curr_p9']['kd']['value']\n return ratio\n print('Invalid username')\n return '-1'\n else:\n print('Error parsing data.')\n return '-2'\n except KeyError:\n print('Error finding data. KeyError was returned.')\n return '-3'\n\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n if message.content.startswith('!patch'):\n await message.channel.send(\n 'Latest patch notes: https://www.epicgames.com/fortnite/en-US/patch-notes/'\n )\n if message.content.startswith('!help'):\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify Bot Help', icon_url='')\n embed.add_field(name=\n 'Set your Discord nickname to be exacly the same as your Epic Games player name. Then type: !verify'\n , value=\n 'You can change your nickname by typing \"/nick *YourEpicIGN*\". The bot looks at your squad K/D for the current season, so if you have no games played yet, the bot won\\'t be able to verify you.'\n , inline=False)\n await message.channel.send(embed=embed)\n if message.content.startswith('!verify'):\n for list in LIST:\n roles = discord.utils.get(message.guild.roles, name=list)\n username = '{0.author.display_name}'.format(message)\n ratio = float(get_ratio(username))\n msgRatio = str(ratio)\n msgVerified = str(VERIFIED)\n print(ratio)\n if ratio == -1.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name='Fortnite player **' + message.author.\n display_name + '** not found.', value=\n \"\"\"\nYour Discord nickname and IGN must be exactly the same. Change your Discord nickname to your IGN and try again.\"\"\"\n , inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -2.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name='Data not found.', value=\n 'Fortnite Tracker is down. Please try again shortly.',\n inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -3.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=\n 'No stats found for squad mode in the current season.',\n value='Play some games and try again.', inline=False)\n await message.channel.send(embed=embed)\n elif ratio > 0 and ratio < VERIFIED:\n print('🚫')\n print('-')\n embed = discord.Embed(colour=discord.Colour(4532110), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name +\n ' does not have over a ' + msgVerified + ' K/D.', value=\n 'Current season squads K/D: **' + msgRatio + '**', inline=False\n )\n await message.channel.send(embed=embed)\n elif ratio >= VERIFIED:\n print('✅')\n print('-')\n role = discord.utils.get(message.guild.roles, name=LIST[0])\n embed = discord.Embed(colour=discord.Colour(4532110), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name +\n ' has over a ' + msgVerified + ' K/D. Verified!', value=\n 'Current season squads K/D: **' + msgRatio + '**', inline=False\n )\n user = message.author\n await message.channel.send(embed=embed)\n await user.add_roles(role)\n\n\[email protected]\nasync def on_ready():\n print('-')\n print('Logged in as: ' + client.user.name)\n print('With Client User ID: ' + str(client.user.id))\n print('Verified set to: ' + str(VERIFIED))\n print('-')\n\n\nclient.run(DISCORD_TOKEN)\n",
"step-4": "import discord\nimport requests\nimport math\nfrom keys import GITHUB_DISCORD_TOKEN, GITHUB_FORTNITE_API_KEY\nclient = discord.Client()\nDISCORD_TOKEN = GITHUB_DISCORD_TOKEN\nFORTNITE_API_KEY = GITHUB_FORTNITE_API_KEY\nLIST = ['Verified']\nVERIFIED = 4\n\n\ndef get_ratio(username):\n try:\n print(username)\n link = 'https://api.fortnitetracker.com/v1/profile/pc/' + username\n response = requests.get(link, headers={'TRN-Api-Key': FORTNITE_API_KEY}\n )\n if response.status_code == 200:\n collection = response.json()\n if 'error' in collection:\n return '-1'\n else:\n ratio = collection['stats']['curr_p9']['kd']['value']\n return ratio\n print('Invalid username')\n return '-1'\n else:\n print('Error parsing data.')\n return '-2'\n except KeyError:\n print('Error finding data. KeyError was returned.')\n return '-3'\n\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n if message.content.startswith('!patch'):\n await message.channel.send(\n 'Latest patch notes: https://www.epicgames.com/fortnite/en-US/patch-notes/'\n )\n if message.content.startswith('!help'):\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify Bot Help', icon_url='')\n embed.add_field(name=\n 'Set your Discord nickname to be exacly the same as your Epic Games player name. Then type: !verify'\n , value=\n 'You can change your nickname by typing \"/nick *YourEpicIGN*\". The bot looks at your squad K/D for the current season, so if you have no games played yet, the bot won\\'t be able to verify you.'\n , inline=False)\n await message.channel.send(embed=embed)\n if message.content.startswith('!verify'):\n for list in LIST:\n roles = discord.utils.get(message.guild.roles, name=list)\n username = '{0.author.display_name}'.format(message)\n ratio = float(get_ratio(username))\n msgRatio = str(ratio)\n msgVerified = str(VERIFIED)\n print(ratio)\n if ratio == -1.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name='Fortnite player **' + message.author.\n display_name + '** not found.', value=\n \"\"\"\nYour Discord nickname and IGN must be exactly the same. Change your Discord nickname to your IGN and try again.\"\"\"\n , inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -2.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name='Data not found.', value=\n 'Fortnite Tracker is down. Please try again shortly.',\n inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -3.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=\n 'No stats found for squad mode in the current season.',\n value='Play some games and try again.', inline=False)\n await message.channel.send(embed=embed)\n elif ratio > 0 and ratio < VERIFIED:\n print('🚫')\n print('-')\n embed = discord.Embed(colour=discord.Colour(4532110), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name +\n ' does not have over a ' + msgVerified + ' K/D.', value=\n 'Current season squads K/D: **' + msgRatio + '**', inline=False\n )\n await message.channel.send(embed=embed)\n elif ratio >= VERIFIED:\n print('✅')\n print('-')\n role = discord.utils.get(message.guild.roles, name=LIST[0])\n embed = discord.Embed(colour=discord.Colour(4532110), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name +\n ' has over a ' + msgVerified + ' K/D. Verified!', value=\n 'Current season squads K/D: **' + msgRatio + '**', inline=False\n )\n user = message.author\n await message.channel.send(embed=embed)\n await user.add_roles(role)\n\n\[email protected]\nasync def on_ready():\n print('-')\n print('Logged in as: ' + client.user.name)\n print('With Client User ID: ' + str(client.user.id))\n print('Verified set to: ' + str(VERIFIED))\n print('-')\n\n\nclient.run(DISCORD_TOKEN)\n",
"step-5": "import discord\nimport requests\nimport math\nfrom keys import GITHUB_DISCORD_TOKEN, GITHUB_FORTNITE_API_KEY\n\nclient = discord.Client()\n\n# Constant\nDISCORD_TOKEN = GITHUB_DISCORD_TOKEN\nFORTNITE_API_KEY = GITHUB_FORTNITE_API_KEY\n\nLIST = ['Verified']\nVERIFIED = 4\n\n# Return the current season squad K/D of the fortnite player\ndef get_ratio(username):\n try:\n print(username)\n link = 'https://api.fortnitetracker.com/v1/profile/pc/' + username\n response = requests.get(link, headers={'TRN-Api-Key': FORTNITE_API_KEY})\n if response.status_code == 200:\n collection = response.json()\n if 'error' in collection:\n return \"-1\"\n else:\n ratio = collection['stats']['curr_p9']['kd']['value']\n return ratio\n print(\"Invalid username\")\n return \"-1\"\n else:\n print(\"Error parsing data.\")\n return \"-2\"\n except KeyError:\n print(\"Error finding data. KeyError was returned.\")\n return \"-3\"\n\[email protected]\nasync def on_message(message):\n # we do not want the bot to reply to itself\n if message.author == client.user:\n return\n # The command !patch return a link with the lastest patch note\n if message.content.startswith('!patch'):\n await message.channel.send('Latest patch notes: https://www.epicgames.com/fortnite/en-US/patch-notes/')\n # The command !help explains the one function\n if message.content.startswith('!help'):\n embed = discord.Embed(colour=discord.Colour(0x8e2626), url=\"https://github.com/af1/kdFortniteDiscordBot\",)\n embed.set_author(name=\"Verify Bot Help\", icon_url=\"\")\n embed.add_field(name=\"Set your Discord nickname to be exacly the same as your Epic Games player name. Then type: !verify\", value=\"You can change your nickname by typing \\\"/nick *YourEpicIGN*\\\". The bot looks at your squad K/D for the current season, so if you have no games played yet, the bot won\\'t be able to verify you.\", inline=False)\n await message.channel.send(embed=embed)\n # The command !verify return attribute a rank according to the K/D of the user\n if message.content.startswith(\"!verify\"):\n for list in LIST:\n roles = discord.utils.get(message.guild.roles, name=list)\n username = '{0.author.display_name}'.format(message)\n ratio = float(get_ratio(username))\n msgRatio = str(ratio)\n msgVerified = str(VERIFIED)\n print(ratio)\n if ratio == -1.0:\n embed = discord.Embed(colour=discord.Colour(0x8e2626), url=\"https://github.com/af1/kdFortniteDiscordBot\",)\n embed.set_author(name=\"Verify \" + message.author.display_name, icon_url=message.author.avatar_url)\n embed.add_field(name=\"Fortnite player **\" + message.author.display_name + \"** not found.\", value=\"\\nYour Discord nickname and IGN must be exactly the same. Change your Discord nickname to your IGN and try again.\", inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -2.0:\n embed = discord.Embed(colour=discord.Colour(0x8e2626), url=\"https://github.com/af1/kdFortniteDiscordBot\",)\n embed.set_author(name=\"Verify \" + message.author.display_name, icon_url=message.author.avatar_url)\n embed.add_field(name=\"Data not found.\", value=\"Fortnite Tracker is down. Please try again shortly.\", inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -3.0:\n embed = discord.Embed(colour=discord.Colour(0x8e2626), url=\"https://github.com/af1/kdFortniteDiscordBot\",)\n embed.set_author(name=\"Verify \" + message.author.display_name, icon_url=message.author.avatar_url)\n embed.add_field(name=\"No stats found for squad mode in the current season.\", value=\"Play some games and try again.\", inline=False)\n await message.channel.send(embed=embed)\n elif ratio > 0 and ratio < VERIFIED:\n print(\"🚫\")\n print(\"-\")\n embed = discord.Embed(colour=discord.Colour(0x45278e), url=\"https://github.com/af1/kdFortniteDiscordBot\",)\n embed.set_author(name=\"Verify \" + message.author.display_name, icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name + \" does not have over a \" + msgVerified + \" K/D.\", value=\"Current season squads K/D: **\" + msgRatio + \"**\", inline=False)\n await message.channel.send(embed=embed)\n elif ratio >= VERIFIED:\n print(\"✅\")\n print(\"-\")\n role = discord.utils.get(message.guild.roles, name=LIST[0])\n embed = discord.Embed(colour=discord.Colour(0x45278e), url=\"https://github.com/af1/kdFortniteDiscordBot\",)\n embed.set_author(name=\"Verify \" + message.author.display_name, icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name + \" has over a \" + msgVerified + \" K/D. Verified!\", value=\"Current season squads K/D: **\" + msgRatio + \"**\", inline=False)\n user=message.author\n await message.channel.send(embed=embed)\n await user.add_roles(role) \n \[email protected]\nasync def on_ready():\n print(\"-\")\n print(\"Logged in as: \" + client.user.name)\n print(\"With Client User ID: \" + str(client.user.id))\n print(\"Verified set to: \" + str(VERIFIED))\n print(\"-\")\n\nclient.run(DISCORD_TOKEN)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""Module containing class `Station`."""
from zoneinfo import ZoneInfo
import datetime
from vesper.util.named import Named
class Station(Named):
"""Recording station."""
def __init__(
self, name, long_name, time_zone_name,
latitude=None, longitude=None, elevation=None):
super().__init__(name)
self._long_name = long_name
self._time_zone = ZoneInfo(time_zone_name)
self._latitude = latitude
self._longitude = longitude
self._elevation = elevation
@property
def long_name(self):
return self._long_name
@property
def time_zone(self):
return self._time_zone
@property
def latitude(self):
return self._latitude
@property
def longitude(self):
return self._longitude
@property
def elevation(self):
return self._elevation
def get_night(self, time):
"""
Gets the station-local night that includes the specified time.
:Parameters:
time : `datetime`
the time whose night is to be gotten.
The time may be either naive or aware. If the time
is naive, it is assumed to be in the station's
time zone.
:Returns:
the station-local night that includes the specified time, a `date`.
The station-local night of a time is the starting date of the
local 24-hour period starting at noon that contains the time.
"""
if time.tzinfo is not None:
# time is aware
# convert time to station time zone
time = time.astimezone(self.time_zone)
if time.hour < 12:
time -= datetime.timedelta(hours=12)
return time.date()
|
normal
|
{
"blob_id": "ad09880b9e06a129b9623be2a086ebcc8dc55c2c",
"index": 9079,
"step-1": "<mask token>\n\n\nclass Station(Named):\n <mask token>\n\n def __init__(self, name, long_name, time_zone_name, latitude=None,\n longitude=None, elevation=None):\n super().__init__(name)\n self._long_name = long_name\n self._time_zone = ZoneInfo(time_zone_name)\n self._latitude = latitude\n self._longitude = longitude\n self._elevation = elevation\n <mask token>\n\n @property\n def time_zone(self):\n return self._time_zone\n\n @property\n def latitude(self):\n return self._latitude\n\n @property\n def longitude(self):\n return self._longitude\n\n @property\n def elevation(self):\n return self._elevation\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Station(Named):\n <mask token>\n\n def __init__(self, name, long_name, time_zone_name, latitude=None,\n longitude=None, elevation=None):\n super().__init__(name)\n self._long_name = long_name\n self._time_zone = ZoneInfo(time_zone_name)\n self._latitude = latitude\n self._longitude = longitude\n self._elevation = elevation\n <mask token>\n\n @property\n def time_zone(self):\n return self._time_zone\n\n @property\n def latitude(self):\n return self._latitude\n\n @property\n def longitude(self):\n return self._longitude\n\n @property\n def elevation(self):\n return self._elevation\n\n def get_night(self, time):\n \"\"\"\n Gets the station-local night that includes the specified time.\n \n :Parameters:\n time : `datetime`\n the time whose night is to be gotten.\n \n The time may be either naive or aware. If the time\n is naive, it is assumed to be in the station's\n time zone.\n \n :Returns:\n the station-local night that includes the specified time, a `date`.\n \n The station-local night of a time is the starting date of the\n local 24-hour period starting at noon that contains the time.\n \"\"\"\n if time.tzinfo is not None:\n time = time.astimezone(self.time_zone)\n if time.hour < 12:\n time -= datetime.timedelta(hours=12)\n return time.date()\n",
"step-3": "<mask token>\n\n\nclass Station(Named):\n \"\"\"Recording station.\"\"\"\n\n def __init__(self, name, long_name, time_zone_name, latitude=None,\n longitude=None, elevation=None):\n super().__init__(name)\n self._long_name = long_name\n self._time_zone = ZoneInfo(time_zone_name)\n self._latitude = latitude\n self._longitude = longitude\n self._elevation = elevation\n\n @property\n def long_name(self):\n return self._long_name\n\n @property\n def time_zone(self):\n return self._time_zone\n\n @property\n def latitude(self):\n return self._latitude\n\n @property\n def longitude(self):\n return self._longitude\n\n @property\n def elevation(self):\n return self._elevation\n\n def get_night(self, time):\n \"\"\"\n Gets the station-local night that includes the specified time.\n \n :Parameters:\n time : `datetime`\n the time whose night is to be gotten.\n \n The time may be either naive or aware. If the time\n is naive, it is assumed to be in the station's\n time zone.\n \n :Returns:\n the station-local night that includes the specified time, a `date`.\n \n The station-local night of a time is the starting date of the\n local 24-hour period starting at noon that contains the time.\n \"\"\"\n if time.tzinfo is not None:\n time = time.astimezone(self.time_zone)\n if time.hour < 12:\n time -= datetime.timedelta(hours=12)\n return time.date()\n",
"step-4": "<mask token>\nfrom zoneinfo import ZoneInfo\nimport datetime\nfrom vesper.util.named import Named\n\n\nclass Station(Named):\n \"\"\"Recording station.\"\"\"\n\n def __init__(self, name, long_name, time_zone_name, latitude=None,\n longitude=None, elevation=None):\n super().__init__(name)\n self._long_name = long_name\n self._time_zone = ZoneInfo(time_zone_name)\n self._latitude = latitude\n self._longitude = longitude\n self._elevation = elevation\n\n @property\n def long_name(self):\n return self._long_name\n\n @property\n def time_zone(self):\n return self._time_zone\n\n @property\n def latitude(self):\n return self._latitude\n\n @property\n def longitude(self):\n return self._longitude\n\n @property\n def elevation(self):\n return self._elevation\n\n def get_night(self, time):\n \"\"\"\n Gets the station-local night that includes the specified time.\n \n :Parameters:\n time : `datetime`\n the time whose night is to be gotten.\n \n The time may be either naive or aware. If the time\n is naive, it is assumed to be in the station's\n time zone.\n \n :Returns:\n the station-local night that includes the specified time, a `date`.\n \n The station-local night of a time is the starting date of the\n local 24-hour period starting at noon that contains the time.\n \"\"\"\n if time.tzinfo is not None:\n time = time.astimezone(self.time_zone)\n if time.hour < 12:\n time -= datetime.timedelta(hours=12)\n return time.date()\n",
"step-5": "\"\"\"Module containing class `Station`.\"\"\"\n\n\nfrom zoneinfo import ZoneInfo\nimport datetime\n\nfrom vesper.util.named import Named\n\n\nclass Station(Named):\n \n \"\"\"Recording station.\"\"\"\n \n \n def __init__(\n self, name, long_name, time_zone_name,\n latitude=None, longitude=None, elevation=None):\n \n super().__init__(name)\n self._long_name = long_name\n self._time_zone = ZoneInfo(time_zone_name)\n self._latitude = latitude\n self._longitude = longitude\n self._elevation = elevation\n \n \n @property\n def long_name(self):\n return self._long_name\n \n \n @property\n def time_zone(self):\n return self._time_zone\n \n \n @property\n def latitude(self):\n return self._latitude\n \n \n @property\n def longitude(self):\n return self._longitude\n \n \n @property\n def elevation(self):\n return self._elevation\n \n \n def get_night(self, time):\n \n \"\"\"\n Gets the station-local night that includes the specified time.\n \n :Parameters:\n time : `datetime`\n the time whose night is to be gotten.\n \n The time may be either naive or aware. If the time\n is naive, it is assumed to be in the station's\n time zone.\n \n :Returns:\n the station-local night that includes the specified time, a `date`.\n \n The station-local night of a time is the starting date of the\n local 24-hour period starting at noon that contains the time.\n \"\"\"\n \n if time.tzinfo is not None:\n # time is aware\n \n # convert time to station time zone\n time = time.astimezone(self.time_zone)\n \n if time.hour < 12:\n time -= datetime.timedelta(hours=12)\n \n return time.date()\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
import csv
import glob
import random
import sys
from math import ceil, floor
from os.path import basename, exists, dirname, isfile
import numpy as np
import keras
from keras import Model, Input, regularizers
from keras.layers import TimeDistributed, LSTMCell, Reshape, Dense, Lambda, Dropout, Concatenate
from keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler
from keras.optimizers import Adam, SGD
from sklearn.metrics import confusion_matrix, accuracy_score # , classification_report
from sklearn.preprocessing import LabelBinarizer
from tqdm import tqdm
from Dataset.Dataset_Utils.augmenter import NoAug
from Dataset.Dataset_Utils.datagen import DataGenerator as DataGen
from Dataset.Dataset_Utils.dataset_tools import print_cm
from Models.model_sharma import SharmaNet
from audio_classifier import AudioClassifier, from_arff_to_feture
from frames_classifier import FramesClassifier
from test_models import *
classes = ["Angry", "Disgust", "Fear", "Happy", "Neutral", "Sad", "Surprise"]
def my_model():
r1, r2 = regularizers.l2(1e-5), regularizers.l2(1e-5)
frame_input = Input(shape=(16, 1024))
audio_input = Input(shape=(16, 1582))
x = Concatenate(name='fusion1')([frame_input, audio_input])
x = TimeDistributed(Dense(100, activation='tanh', kernel_regularizer=r1, name='ff_logit_lstm'))(x)
x = TimeDistributed(Dropout(0.5))(x)
x = TimeDistributed(Dense(7, activation='softmax', kernel_regularizer=r2, name='ff_logit'))(x)
x = Lambda(lambda y: tf.reduce_mean(y, axis=1))(x)
return Model([audio_input, frame_input], x)
class VideoClassifier:
def __init__(self, train_mode="late_fusion", video_model_path=None, time_step=16,
base_path="/user/vlongobardi/AFEW/aligned/", feature_name="emobase2010_100", stride=1):
self.time_step = time_step
self.train_mode = train_mode
self.feature_name = feature_name
self.classes = classes
self.lb = LabelBinarizer()
self.lb.fit_transform(np.array(classes))
self.feature_num = 1582
self.offset = ceil(int(self.feature_name.split("_")[1]) / 2 / 40)
self.stride = stride
if video_model_path is not None:
try:
self.model = my_model()
self.model.load_weights(video_model_path)
print("VideoClassifier loaded successfully", video_model_path)
except:
print("Exception")
else:
t_files = glob.glob(base_path + "Train" + "/*/*csv")
v_files = glob.glob(base_path + "Val" + "/*/*csv")
self.csv_fusion = self.generate_feature(t_files, v_files)
self.do_training()
def do_training(self):
skips = 0
iters = 1
bs = 16
ep = 150
opts = ["SGD"]#, "Adam"]
lrs = [0.01]
models = [my_model]
models_name = [x.__name__ for x in models]
for index, model in enumerate(models):
for opt in opts:
for lr in lrs:
for iteration in range(iters):
if skips > 0:
skips -= 1
continue
train_infos = {
"iteration": iteration, "model_name": models_name[index],
"batch_size": bs, "epoch": ep, "lr": lr, "opt": opt
}
print(
"\n\n################################################################################\n"
"############################## ITERATION " + str(iteration + 1) + " of " + str(iters) +
" ###########################\n######################################################" +
" ########################\nepochs:", ep, "batch_size:", bs, "\nmodel:", models_name[index],
"in", models_name, "\nopt:", opt, "in", opts, "\nlr:", lr, "in", lrs)
train_infos["generator1"] = self.early_gen_train
train_infos["generator2"] = self.early_gen_new_val
t_files, v_files = self.csv_fusion["train"], self.csv_fusion["val"]
m = model()
self.train(t_files, v_files, train_infos, m)
def generate_feature(self, t_files, v_files):
if not exists('features_path_early_fusion_train_' + self.feature_name + '.csv'):
print("\n##### GENERATING CSV FOR EARLY FUSION... #####")
csv_early_fusion = {
"train": self._generate_data_for_early_fusion(t_files, "train"),
"val": self._generate_data_for_early_fusion(v_files, "val")
}
print("\n##### CSV GENERATED! #####")
else:
csv_early_fusion = {}
for name in ["train", "val"]:
csv_early_fusion[name] = self.load_early_csv(name)
return csv_early_fusion
def load_early_csv(self, dataset):
csv_early_fusion = {}
print("Opening csv: features_path_early_fusion_" + dataset + "_" + self.feature_name + '.csv')
with open('features_path_early_fusion_' + dataset + "_" + self.feature_name + '.csv', 'r') as f:
f.readline()
csv_reader = csv.reader(f)
for clip_id, ground_truth, frame_label, audio_label in csv_reader:
if clip_id not in csv_early_fusion:
csv_early_fusion[clip_id] = []
csv_early_fusion[clip_id].append([ground_truth, frame_label, audio_label])
return csv_early_fusion
def _generate_data_for_early_fusion(self, files, name):
# '/user/vlongobardi/AFEW/aligned/Train/Angry/012738600.csv'
# '/user/vlongobardi/early_feature/framefeature/Train/Angry/012738600_0.dat'
# '/user/vlongobardi/early_feature/emobase2010_600/Train/Angry/012738600_0.arff'
if "full" in self.feature_name:
frame_to_discard = 0
else:
window_size = int(self.feature_name.split("_")[1])
frame_to_discard = ceil(window_size / 2 / 40)
my_csv = {}
for file in tqdm(files):
clip_id_temp = file.split(".")[0]
base_path = clip_id_temp.replace("AFEW/aligned", "early_feature/framefeature") + "*"
frames_features_path = glob.glob(base_path)
audio_features_path = glob.glob(
base_path.replace("early_feature/framefeature", "early_feature/" + self.feature_name))
frames_features_path.sort(key=lambda x: int(x.split("_")[-1].split(".")[0]))
if "full" not in self.feature_name:
audio_features_path.sort(key=lambda x: int(x.split("_")[-1].split(".")[0]))
ground_truth = basename(dirname(clip_id_temp))
clip_id = basename(clip_id_temp)
# discard video frames based on window size
frames_features_path = frames_features_path[frame_to_discard:]
if len(frames_features_path) < 16:
continue
# print("FRAME TOO FEW SAMPLES:", len(frames_features_path), clip_id)
if len(audio_features_path) < 16 and "full" not in self.feature_name:
continue
# print("AUDIO TOO FEW SAMPLES:", len(audio_features_path), clip_id)
for index, frame in enumerate(frames_features_path):
if clip_id not in my_csv.keys():
my_csv[clip_id] = []
if "full" not in self.feature_name:
my_csv[clip_id].append([ground_truth, frame, audio_features_path[index]])
else:
my_csv[clip_id].append([ground_truth, frame, audio_features_path[0]])
with open('features_path_early_fusion_' + name + "_" + self.feature_name + '.csv', 'w') as f:
f.write("clip_id, ground_truth, frame_label, audio_label\n")
for key in my_csv:
for line in my_csv[key]:
f.write(key + "," + line[0] + "," + line[1] + "," + line[2] + "\n")
return my_csv
def early_gen_train(self, list_files, batch_size):
c = 0
clip_ids = list(self.csv_fusion["train"].keys())
random.shuffle(clip_ids)
while True:
labels = []
features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'),
np.zeros((batch_size, self.time_step, 1024)).astype('float')]
for i in range(c, c + batch_size):
clip_id = clip_ids[i]
video_info = self.csv_fusion["train"][clip_id]
ground_truth = video_info[0][0]
# first_frame_num = int(video_info[0][1].split("_")[-1].split(".")[0])
start = random.randint(0, len(video_info) - self.time_step)
for index, elem in enumerate(video_info[start:self.time_step + start]):
_, frame_path, audio_path = elem
if not isfile(frame_path):
start += 1
if start >= len(video_info):
raise
continue
frame_feature = np.load(frame_path)
features[0][i - c][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, )
features[1][i - c][index] = frame_feature.reshape(1024, )
labels.append(ground_truth)
c += batch_size
if c + batch_size > len(clip_ids):
c = 0
random.shuffle(clip_ids)
labels = self.lb.transform(np.array(labels)).reshape((batch_size, 7))
yield features, labels
def early_gen_new_val(self, list_files, batch_size, mode="val", stride=1):
""" stride 50% sul su tutti i file """
c = 0
labels = features = []
clip_ids = list(list_files.keys())
while True:
for clip_id in tqdm(clip_ids):
video_info = list_files[clip_id]
ground_truth = video_info[0][0]
for start in range(0, len(video_info) - self.time_step, self.time_step // stride):
if c == 0:
labels = []
features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'),
np.zeros((batch_size, self.time_step, 1024)).astype('float')]
for index, elem in enumerate(video_info[start:self.time_step + start]):
_, frame_path, audio_path = elem
frame_feature = np.load(frame_path)
features[0][c][index] = np.array(from_arff_to_feture(audio_path)).reshape(
self.feature_num, )
features[1][c][index] = frame_feature.reshape(1024, )
labels.append(ground_truth)
c += 1
if c == batch_size:
c = 0
labels = self.lb.transform(np.array(labels)).reshape((batch_size, 7))
yield features, labels
if mode == "eval":
break
def early_gen_test_clip(self, list_files, clip_id, stride=1):
""" stride su singolo file, quindi va richiamato per ogni file """
ground_truth = list_files[0][0]
start = 0
end = len(list_files) - self.time_step
while True:
labels = []
features = [np.zeros((1, self.time_step, self.feature_num)).astype('float'),
np.zeros((1, self.time_step, 1024)).astype('float')]
for index, elem in enumerate(list_files[start:start + self.time_step]):
_, frame_path, audio_path = elem
frame_feature = np.load(frame_path)
features[0][0][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, )
features[1][0][index] = frame_feature.reshape(1024, )
labels.append(ground_truth)
start += self.time_step // stride
if start >= end:
break
labels = self.lb.transform(np.array(labels)).reshape((1, 7))
yield features, labels
def get_validation_dim(self):
if self.stride == 2:
if "full" in self.feature_name:
return 141
elif "600" in self.feature_name:
return 0
elif "300" in self.feature_name:
return 114
elif "100" in self.feature_name:
return 128
elif self.stride == 1:
if "full" in self.feature_name:
return 76
elif "600" in self.feature_name:
return 0
elif "300" in self.feature_name:
return 63
elif "100" in self.feature_name:
return 69
elif self.stride == self.time_step:
return 0
def train(self, train_files, val_files, train_data, model):
if train_data["opt"] == "Adam":
optimizer = Adam(lr=train_data["lr"])
else:
optimizer = SGD(lr=train_data["lr"])
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
train_gen = train_data["generator1"](train_files, train_data["batch_size"])
no_of_training_images = len(train_files)
no_of_val_images = self.get_validation_dim()
print("no_of_val_images:", no_of_val_images)
val_gen = train_data["generator2"](val_files, train_data["batch_size"])
# stride = 1, no overlapping
# stride = 2, overlapping: 50%
# stride = time_step, stride: 1
model_name = "_lr" + str(train_data["lr"]) + "_Opt" + train_data["opt"] + "_Model" + str(
train_data["model_name"]) + "_Feature" + self.feature_name + "_" + str(
train_data["iteration"]) + "_" + self.train_mode # + "_modelType" + str(self.model_type)
model_name += "stride" + str(self.stride)
model_name += ".h5"
def custom_scheduler(epoch):
if epoch < 50:
print(0.1)
return 0.1
if epoch < 100:
print(0.01)
return 0.01
if epoch < 125:
print(0.001)
return 0.001
else:
print(0.0001)
return 0.0001
#print(0.1 / 10 ** (floor(epoch / 40) + 1))
#return 0.1 / 10 ** (floor(epoch / 40) + 1)
class CheckValCMCallback(keras.callbacks.Callback):
def __init__(self, m, dim, validation_files, epoch):
super().__init__()
self.vc = m
self.dim = dim
self.val_files = validation_files
self.epoch = epoch
self.accs = []
def on_epoch_end(self, epoch, logs=None):
csv_fusion = self.vc.load_early_csv("val")
# gen = self.vc.early_gen_new_val(csv_fusion, 16, "eval")
# predictions = []
# ground_truths = []
# for x in gen:
# ground_truths.append(self.vc.lb.inverse_transform(x[1])[0])
# pred = self.model.predict(x[0])
# pred = self.vc.lb.inverse_transform(pred)
# predictions.append(pred[0])
# self.vc.print_stats(ground_truths, predictions, "Video" + str(epoch))
gen = self.vc.early_gen_new_val(csv_fusion, 16, "eval")
acc = self.model.evaluate_generator(gen, self.dim, workers=0)
self.accs.append(acc)
print("Evaluate:", acc)
if self.epoch == epoch + 1:
print("Validation_Accuracy =", self.accs)
cb = [ModelCheckpoint(
filepath=str(
"weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}" + model_name),
monitor="val_accuracy", save_weights_only=True),
TensorBoard(log_dir="NewFusionLogs_sched/" + self.train_mode + "/" + self.feature_name, write_graph=True,
write_images=True)]
cb += [LearningRateScheduler(custom_scheduler)]
#cb += [CheckValCMCallback(self, no_of_val_images, val_files, train_data["epoch"])]
history = model.fit_generator(train_gen,
validation_data=val_gen,
epochs=train_data["epoch"],
steps_per_epoch=(no_of_training_images * 2 // train_data["batch_size"]),
validation_steps=(no_of_val_images),
workers=0, verbose=1, callbacks=cb)
print("\n\nTrain_Accuracy =", history.history['accuracy'])
print("\nVal_Accuracy =", history.history['val_accuracy'])
print("\n\nTrain_Loss =", history.history['loss'])
print("\nVal_Loss =", history.history['val_loss'])
def print_stats(self, ground_truths, predictions, name):
cm = confusion_matrix(ground_truths, predictions, self.classes)
print("###" + name + " Results###\n")
# print_cm(cm, self.classes)
# print("\n\n")
print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=3), self.classes)
print("\n\n")
print("Accuracy score: ", accuracy_score(ground_truths, predictions), "\n\n")
# print("Report")
# print(classification_report(ground_truths, predictions))
print("#################################################################end###\n\n\n")
def print_confusion_matrix(self, stride=1):
""" IMPLEMENT FOR EARLY FUSION MISSING """
csv_fusion = {}
predictions = []
ground_truths = []
if self.train_mode == "early_fusion":
csv_fusion = self.load_early_csv("val")
print("CSV loaded", len(csv_fusion))
gen = self.early_gen_new_val(csv_fusion, 1, "eval", stride)
for x in gen:
ground_truths.append(self.lb.inverse_transform(x[1])[0])
pred = self.model.predict(x[0])
pred = self.lb.inverse_transform(pred)
predictions.append(pred[0])
# print("\ngt, pred", self.lb.inverse_transform(x[1]), pred)
self.print_stats(ground_truths, predictions, "Video")
else:
with open('lables_late_fusion' + self.feature_name + '.csv', 'r') as f:
f.readline()
csv_reader = csv.reader(f)
for row in csv_reader:
csv_fusion[row[0]] = [row[1], row[2], row[3]]
a_p = []
f_p = []
files = glob.glob("/user/vlongobardi/late_feature/" + self.feature_name + "/*/*csv")
for file in files:
clip_id = basename(file).split(".")[0]
ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]
sample = np.append(self.lb.transform(np.array([audio_pred])), self.lb.transform(np.array([frame_pred])))
pred = self.model.predict(sample.reshape((1, 14)))
pred = self.lb.inverse_transform(pred)[0]
predictions.append(pred)
a_p.append(audio_pred)
f_p.append(frame_pred)
ground_truths.append(ground_truth)
self.print_stats(ground_truths, predictions, "Video")
self.print_stats(ground_truths, a_p, "Audio")
self.print_stats(ground_truths, f_p, "Frame")
if __name__ == "__main__":
if sys.argv[1] == "late":
print("LATE")
model_path = [
"audio_models/audioModel_0.2285_epoch135_lr0.1_OptSGD_Modela_model7_Featureemobase2010_100_3.h5",
"audio_models/audioModel_0.2650_epoch01_lr0.01_OptSGD_Modela_model7_Featureemobase2010_300_2.h5",
"audio_models/audioModel_0.2865_epoch13_lr0.001_OptSGD_Modela_model7_Featureemobase2010_600_0.h5",
"audio_models/audioModel_0.3668_epoch67_lr0.001_OptSGD_Modela_model7_Featureemobase2010_full_2.h5"
]
for mp in model_path:
vc = VideoClassifier(train_mode="late_fusion", audio_model_path=mp)
elif sys.argv[1] == "early":
# mt = int(sys.argv[2])
print("EARLY") # , Model_type:", mt)
arff_paths = {"e1": "emobase2010_100", "i1": "IS09_emotion_100",
"e3": "emobase2010_300", "i3": "IS09_emotion_300",
"e6": "emobase2010_600", "i6": "IS09_emotion_600",
"ef": "emobase2010_full", "if": "IS09_emotion_full"}
vc = VideoClassifier(train_mode="early_fusion", feature_name=arff_paths[sys.argv[2]]) # , model_type=mt)
|
normal
|
{
"blob_id": "c925bed2f4d8120e156caebbe8e6bf9d6a51ee37",
"index": 3330,
"step-1": "<mask token>\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode='late_fusion', video_model_path=None,\n time_step=16, base_path='/user/vlongobardi/AFEW/aligned/',\n feature_name='emobase2010_100', stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40)\n self.stride = stride\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print('VideoClassifier loaded successfully', video_model_path)\n except:\n print('Exception')\n else:\n t_files = glob.glob(base_path + 'Train' + '/*/*csv')\n v_files = glob.glob(base_path + 'Val' + '/*/*csv')\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n <mask token>\n <mask token>\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print('Opening csv: features_path_early_fusion_' + dataset + '_' +\n self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + '_' + self.\n feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label,\n audio_label])\n return csv_early_fusion\n <mask token>\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion['train'].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.\n feature_num)).astype('float'), np.zeros((batch_size, self.\n time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion['train'][clip_id]\n ground_truth = video_info[0][0]\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][i - c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n <mask token>\n <mask token>\n\n def get_validation_dim(self):\n if self.stride == 2:\n if 'full' in self.feature_name:\n return 141\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 114\n elif '100' in self.feature_name:\n return 128\n elif self.stride == 1:\n if 'full' in self.feature_name:\n return 76\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 63\n elif '100' in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data['opt'] == 'Adam':\n optimizer = Adam(lr=train_data['lr'])\n else:\n optimizer = SGD(lr=train_data['lr'])\n model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n train_gen = train_data['generator1'](train_files, train_data[\n 'batch_size'])\n no_of_training_images = len(train_files)\n no_of_val_images = self.get_validation_dim()\n print('no_of_val_images:', no_of_val_images)\n val_gen = train_data['generator2'](val_files, train_data['batch_size'])\n model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt'\n ] + '_Model' + str(train_data['model_name']\n ) + '_Feature' + self.feature_name + '_' + str(train_data[\n 'iteration']) + '_' + self.train_mode\n model_name += 'stride' + str(self.stride)\n model_name += '.h5'\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n\n\n class CheckValCMCallback(keras.callbacks.Callback):\n\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv('val')\n gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval')\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print('Evaluate:', acc)\n if self.epoch == epoch + 1:\n print('Validation_Accuracy =', self.accs)\n cb = [ModelCheckpoint(filepath=str(\n 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' +\n model_name), monitor='val_accuracy', save_weights_only=True),\n TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode +\n '/' + self.feature_name, write_graph=True, write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n history = model.fit_generator(train_gen, validation_data=val_gen,\n epochs=train_data['epoch'], steps_per_epoch=\n no_of_training_images * 2 // train_data['batch_size'],\n validation_steps=no_of_val_images, workers=0, verbose=1,\n callbacks=cb)\n print('\\n\\nTrain_Accuracy =', history.history['accuracy'])\n print('\\nVal_Accuracy =', history.history['val_accuracy'])\n print('\\n\\nTrain_Loss =', history.history['loss'])\n print('\\nVal_Loss =', history.history['val_loss'])\n <mask token>\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == 'early_fusion':\n csv_fusion = self.load_early_csv('val')\n print('CSV loaded', len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n self.print_stats(ground_truths, predictions, 'Video')\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r'\n ) as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob('/user/vlongobardi/late_feature/' + self.\n feature_name + '/*/*csv')\n for file in files:\n clip_id = basename(file).split('.')[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])\n ), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n self.print_stats(ground_truths, predictions, 'Video')\n self.print_stats(ground_truths, a_p, 'Audio')\n self.print_stats(ground_truths, f_p, 'Frame')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode='late_fusion', video_model_path=None,\n time_step=16, base_path='/user/vlongobardi/AFEW/aligned/',\n feature_name='emobase2010_100', stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40)\n self.stride = stride\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print('VideoClassifier loaded successfully', video_model_path)\n except:\n print('Exception')\n else:\n t_files = glob.glob(base_path + 'Train' + '/*/*csv')\n v_files = glob.glob(base_path + 'Val' + '/*/*csv')\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n <mask token>\n <mask token>\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print('Opening csv: features_path_early_fusion_' + dataset + '_' +\n self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + '_' + self.\n feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label,\n audio_label])\n return csv_early_fusion\n\n def _generate_data_for_early_fusion(self, files, name):\n if 'full' in self.feature_name:\n frame_to_discard = 0\n else:\n window_size = int(self.feature_name.split('_')[1])\n frame_to_discard = ceil(window_size / 2 / 40)\n my_csv = {}\n for file in tqdm(files):\n clip_id_temp = file.split('.')[0]\n base_path = clip_id_temp.replace('AFEW/aligned',\n 'early_feature/framefeature') + '*'\n frames_features_path = glob.glob(base_path)\n audio_features_path = glob.glob(base_path.replace(\n 'early_feature/framefeature', 'early_feature/' + self.\n feature_name))\n frames_features_path.sort(key=lambda x: int(x.split('_')[-1].\n split('.')[0]))\n if 'full' not in self.feature_name:\n audio_features_path.sort(key=lambda x: int(x.split('_')[-1]\n .split('.')[0]))\n ground_truth = basename(dirname(clip_id_temp))\n clip_id = basename(clip_id_temp)\n frames_features_path = frames_features_path[frame_to_discard:]\n if len(frames_features_path) < 16:\n continue\n if len(audio_features_path\n ) < 16 and 'full' not in self.feature_name:\n continue\n for index, frame in enumerate(frames_features_path):\n if clip_id not in my_csv.keys():\n my_csv[clip_id] = []\n if 'full' not in self.feature_name:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[index]])\n else:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[0]])\n with open('features_path_early_fusion_' + name + '_' + self.\n feature_name + '.csv', 'w') as f:\n f.write('clip_id, ground_truth, frame_label, audio_label\\n')\n for key in my_csv:\n for line in my_csv[key]:\n f.write(key + ',' + line[0] + ',' + line[1] + ',' +\n line[2] + '\\n')\n return my_csv\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion['train'].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.\n feature_num)).astype('float'), np.zeros((batch_size, self.\n time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion['train'][clip_id]\n ground_truth = video_info[0][0]\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][i - c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n\n def early_gen_new_val(self, list_files, batch_size, mode='val', stride=1):\n \"\"\" stride 50% sul su tutti i file \"\"\"\n c = 0\n labels = features = []\n clip_ids = list(list_files.keys())\n while True:\n for clip_id in tqdm(clip_ids):\n video_info = list_files[clip_id]\n ground_truth = video_info[0][0]\n for start in range(0, len(video_info) - self.time_step, \n self.time_step // stride):\n if c == 0:\n labels = []\n features = [np.zeros((batch_size, self.time_step,\n self.feature_num)).astype('float'), np.zeros((\n batch_size, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += 1\n if c == batch_size:\n c = 0\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n if mode == 'eval':\n break\n\n def early_gen_test_clip(self, list_files, clip_id, stride=1):\n \"\"\" stride su singolo file, quindi va richiamato per ogni file \"\"\"\n ground_truth = list_files[0][0]\n start = 0\n end = len(list_files) - self.time_step\n while True:\n labels = []\n features = [np.zeros((1, self.time_step, self.feature_num)).\n astype('float'), np.zeros((1, self.time_step, 1024)).astype\n ('float')]\n for index, elem in enumerate(list_files[start:start + self.\n time_step]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][0][index] = np.array(from_arff_to_feture(\n audio_path)).reshape(self.feature_num)\n features[1][0][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n start += self.time_step // stride\n if start >= end:\n break\n labels = self.lb.transform(np.array(labels)).reshape((1, 7))\n yield features, labels\n\n def get_validation_dim(self):\n if self.stride == 2:\n if 'full' in self.feature_name:\n return 141\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 114\n elif '100' in self.feature_name:\n return 128\n elif self.stride == 1:\n if 'full' in self.feature_name:\n return 76\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 63\n elif '100' in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data['opt'] == 'Adam':\n optimizer = Adam(lr=train_data['lr'])\n else:\n optimizer = SGD(lr=train_data['lr'])\n model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n train_gen = train_data['generator1'](train_files, train_data[\n 'batch_size'])\n no_of_training_images = len(train_files)\n no_of_val_images = self.get_validation_dim()\n print('no_of_val_images:', no_of_val_images)\n val_gen = train_data['generator2'](val_files, train_data['batch_size'])\n model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt'\n ] + '_Model' + str(train_data['model_name']\n ) + '_Feature' + self.feature_name + '_' + str(train_data[\n 'iteration']) + '_' + self.train_mode\n model_name += 'stride' + str(self.stride)\n model_name += '.h5'\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n\n\n class CheckValCMCallback(keras.callbacks.Callback):\n\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv('val')\n gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval')\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print('Evaluate:', acc)\n if self.epoch == epoch + 1:\n print('Validation_Accuracy =', self.accs)\n cb = [ModelCheckpoint(filepath=str(\n 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' +\n model_name), monitor='val_accuracy', save_weights_only=True),\n TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode +\n '/' + self.feature_name, write_graph=True, write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n history = model.fit_generator(train_gen, validation_data=val_gen,\n epochs=train_data['epoch'], steps_per_epoch=\n no_of_training_images * 2 // train_data['batch_size'],\n validation_steps=no_of_val_images, workers=0, verbose=1,\n callbacks=cb)\n print('\\n\\nTrain_Accuracy =', history.history['accuracy'])\n print('\\nVal_Accuracy =', history.history['val_accuracy'])\n print('\\n\\nTrain_Loss =', history.history['loss'])\n print('\\nVal_Loss =', history.history['val_loss'])\n\n def print_stats(self, ground_truths, predictions, name):\n cm = confusion_matrix(ground_truths, predictions, self.classes)\n print('###' + name + ' Results###\\n')\n print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.\n newaxis], decimals=3), self.classes)\n print('\\n\\n')\n print('Accuracy score: ', accuracy_score(ground_truths, predictions\n ), '\\n\\n')\n print(\n '#################################################################end###\\n\\n\\n'\n )\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == 'early_fusion':\n csv_fusion = self.load_early_csv('val')\n print('CSV loaded', len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n self.print_stats(ground_truths, predictions, 'Video')\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r'\n ) as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob('/user/vlongobardi/late_feature/' + self.\n feature_name + '/*/*csv')\n for file in files:\n clip_id = basename(file).split('.')[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])\n ), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n self.print_stats(ground_truths, predictions, 'Video')\n self.print_stats(ground_truths, a_p, 'Audio')\n self.print_stats(ground_truths, f_p, 'Frame')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode='late_fusion', video_model_path=None,\n time_step=16, base_path='/user/vlongobardi/AFEW/aligned/',\n feature_name='emobase2010_100', stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40)\n self.stride = stride\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print('VideoClassifier loaded successfully', video_model_path)\n except:\n print('Exception')\n else:\n t_files = glob.glob(base_path + 'Train' + '/*/*csv')\n v_files = glob.glob(base_path + 'Val' + '/*/*csv')\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n\n def do_training(self):\n skips = 0\n iters = 1\n bs = 16\n ep = 150\n opts = ['SGD']\n lrs = [0.01]\n models = [my_model]\n models_name = [x.__name__ for x in models]\n for index, model in enumerate(models):\n for opt in opts:\n for lr in lrs:\n for iteration in range(iters):\n if skips > 0:\n skips -= 1\n continue\n train_infos = {'iteration': iteration, 'model_name':\n models_name[index], 'batch_size': bs, 'epoch':\n ep, 'lr': lr, 'opt': opt}\n print(\n \"\"\"\n\n################################################################################\n############################## ITERATION \"\"\"\n + str(iteration + 1) + ' of ' + str(iters) +\n \"\"\" ###########################\n######################################################\"\"\"\n + \"\"\" ########################\nepochs:\"\"\", ep,\n 'batch_size:', bs, '\\nmodel:', models_name[\n index], 'in', models_name, '\\nopt:', opt, 'in',\n opts, '\\nlr:', lr, 'in', lrs)\n train_infos['generator1'] = self.early_gen_train\n train_infos['generator2'] = self.early_gen_new_val\n t_files, v_files = self.csv_fusion['train'\n ], self.csv_fusion['val']\n m = model()\n self.train(t_files, v_files, train_infos, m)\n\n def generate_feature(self, t_files, v_files):\n if not exists('features_path_early_fusion_train_' + self.\n feature_name + '.csv'):\n print('\\n##### GENERATING CSV FOR EARLY FUSION... #####')\n csv_early_fusion = {'train': self.\n _generate_data_for_early_fusion(t_files, 'train'), 'val':\n self._generate_data_for_early_fusion(v_files, 'val')}\n print('\\n##### CSV GENERATED! #####')\n else:\n csv_early_fusion = {}\n for name in ['train', 'val']:\n csv_early_fusion[name] = self.load_early_csv(name)\n return csv_early_fusion\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print('Opening csv: features_path_early_fusion_' + dataset + '_' +\n self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + '_' + self.\n feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label,\n audio_label])\n return csv_early_fusion\n\n def _generate_data_for_early_fusion(self, files, name):\n if 'full' in self.feature_name:\n frame_to_discard = 0\n else:\n window_size = int(self.feature_name.split('_')[1])\n frame_to_discard = ceil(window_size / 2 / 40)\n my_csv = {}\n for file in tqdm(files):\n clip_id_temp = file.split('.')[0]\n base_path = clip_id_temp.replace('AFEW/aligned',\n 'early_feature/framefeature') + '*'\n frames_features_path = glob.glob(base_path)\n audio_features_path = glob.glob(base_path.replace(\n 'early_feature/framefeature', 'early_feature/' + self.\n feature_name))\n frames_features_path.sort(key=lambda x: int(x.split('_')[-1].\n split('.')[0]))\n if 'full' not in self.feature_name:\n audio_features_path.sort(key=lambda x: int(x.split('_')[-1]\n .split('.')[0]))\n ground_truth = basename(dirname(clip_id_temp))\n clip_id = basename(clip_id_temp)\n frames_features_path = frames_features_path[frame_to_discard:]\n if len(frames_features_path) < 16:\n continue\n if len(audio_features_path\n ) < 16 and 'full' not in self.feature_name:\n continue\n for index, frame in enumerate(frames_features_path):\n if clip_id not in my_csv.keys():\n my_csv[clip_id] = []\n if 'full' not in self.feature_name:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[index]])\n else:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[0]])\n with open('features_path_early_fusion_' + name + '_' + self.\n feature_name + '.csv', 'w') as f:\n f.write('clip_id, ground_truth, frame_label, audio_label\\n')\n for key in my_csv:\n for line in my_csv[key]:\n f.write(key + ',' + line[0] + ',' + line[1] + ',' +\n line[2] + '\\n')\n return my_csv\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion['train'].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.\n feature_num)).astype('float'), np.zeros((batch_size, self.\n time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion['train'][clip_id]\n ground_truth = video_info[0][0]\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][i - c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n\n def early_gen_new_val(self, list_files, batch_size, mode='val', stride=1):\n \"\"\" stride 50% sul su tutti i file \"\"\"\n c = 0\n labels = features = []\n clip_ids = list(list_files.keys())\n while True:\n for clip_id in tqdm(clip_ids):\n video_info = list_files[clip_id]\n ground_truth = video_info[0][0]\n for start in range(0, len(video_info) - self.time_step, \n self.time_step // stride):\n if c == 0:\n labels = []\n features = [np.zeros((batch_size, self.time_step,\n self.feature_num)).astype('float'), np.zeros((\n batch_size, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += 1\n if c == batch_size:\n c = 0\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n if mode == 'eval':\n break\n\n def early_gen_test_clip(self, list_files, clip_id, stride=1):\n \"\"\" stride su singolo file, quindi va richiamato per ogni file \"\"\"\n ground_truth = list_files[0][0]\n start = 0\n end = len(list_files) - self.time_step\n while True:\n labels = []\n features = [np.zeros((1, self.time_step, self.feature_num)).\n astype('float'), np.zeros((1, self.time_step, 1024)).astype\n ('float')]\n for index, elem in enumerate(list_files[start:start + self.\n time_step]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][0][index] = np.array(from_arff_to_feture(\n audio_path)).reshape(self.feature_num)\n features[1][0][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n start += self.time_step // stride\n if start >= end:\n break\n labels = self.lb.transform(np.array(labels)).reshape((1, 7))\n yield features, labels\n\n def get_validation_dim(self):\n if self.stride == 2:\n if 'full' in self.feature_name:\n return 141\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 114\n elif '100' in self.feature_name:\n return 128\n elif self.stride == 1:\n if 'full' in self.feature_name:\n return 76\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 63\n elif '100' in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data['opt'] == 'Adam':\n optimizer = Adam(lr=train_data['lr'])\n else:\n optimizer = SGD(lr=train_data['lr'])\n model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n train_gen = train_data['generator1'](train_files, train_data[\n 'batch_size'])\n no_of_training_images = len(train_files)\n no_of_val_images = self.get_validation_dim()\n print('no_of_val_images:', no_of_val_images)\n val_gen = train_data['generator2'](val_files, train_data['batch_size'])\n model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt'\n ] + '_Model' + str(train_data['model_name']\n ) + '_Feature' + self.feature_name + '_' + str(train_data[\n 'iteration']) + '_' + self.train_mode\n model_name += 'stride' + str(self.stride)\n model_name += '.h5'\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n\n\n class CheckValCMCallback(keras.callbacks.Callback):\n\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv('val')\n gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval')\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print('Evaluate:', acc)\n if self.epoch == epoch + 1:\n print('Validation_Accuracy =', self.accs)\n cb = [ModelCheckpoint(filepath=str(\n 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' +\n model_name), monitor='val_accuracy', save_weights_only=True),\n TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode +\n '/' + self.feature_name, write_graph=True, write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n history = model.fit_generator(train_gen, validation_data=val_gen,\n epochs=train_data['epoch'], steps_per_epoch=\n no_of_training_images * 2 // train_data['batch_size'],\n validation_steps=no_of_val_images, workers=0, verbose=1,\n callbacks=cb)\n print('\\n\\nTrain_Accuracy =', history.history['accuracy'])\n print('\\nVal_Accuracy =', history.history['val_accuracy'])\n print('\\n\\nTrain_Loss =', history.history['loss'])\n print('\\nVal_Loss =', history.history['val_loss'])\n\n def print_stats(self, ground_truths, predictions, name):\n cm = confusion_matrix(ground_truths, predictions, self.classes)\n print('###' + name + ' Results###\\n')\n print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.\n newaxis], decimals=3), self.classes)\n print('\\n\\n')\n print('Accuracy score: ', accuracy_score(ground_truths, predictions\n ), '\\n\\n')\n print(\n '#################################################################end###\\n\\n\\n'\n )\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == 'early_fusion':\n csv_fusion = self.load_early_csv('val')\n print('CSV loaded', len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n self.print_stats(ground_truths, predictions, 'Video')\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r'\n ) as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob('/user/vlongobardi/late_feature/' + self.\n feature_name + '/*/*csv')\n for file in files:\n clip_id = basename(file).split('.')[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])\n ), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n self.print_stats(ground_truths, predictions, 'Video')\n self.print_stats(ground_truths, a_p, 'Audio')\n self.print_stats(ground_truths, f_p, 'Frame')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef my_model():\n r1, r2 = regularizers.l2(1e-05), regularizers.l2(1e-05)\n frame_input = Input(shape=(16, 1024))\n audio_input = Input(shape=(16, 1582))\n x = Concatenate(name='fusion1')([frame_input, audio_input])\n x = TimeDistributed(Dense(100, activation='tanh', kernel_regularizer=r1,\n name='ff_logit_lstm'))(x)\n x = TimeDistributed(Dropout(0.5))(x)\n x = TimeDistributed(Dense(7, activation='softmax', kernel_regularizer=\n r2, name='ff_logit'))(x)\n x = Lambda(lambda y: tf.reduce_mean(y, axis=1))(x)\n return Model([audio_input, frame_input], x)\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode='late_fusion', video_model_path=None,\n time_step=16, base_path='/user/vlongobardi/AFEW/aligned/',\n feature_name='emobase2010_100', stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split('_')[1]) / 2 / 40)\n self.stride = stride\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print('VideoClassifier loaded successfully', video_model_path)\n except:\n print('Exception')\n else:\n t_files = glob.glob(base_path + 'Train' + '/*/*csv')\n v_files = glob.glob(base_path + 'Val' + '/*/*csv')\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n\n def do_training(self):\n skips = 0\n iters = 1\n bs = 16\n ep = 150\n opts = ['SGD']\n lrs = [0.01]\n models = [my_model]\n models_name = [x.__name__ for x in models]\n for index, model in enumerate(models):\n for opt in opts:\n for lr in lrs:\n for iteration in range(iters):\n if skips > 0:\n skips -= 1\n continue\n train_infos = {'iteration': iteration, 'model_name':\n models_name[index], 'batch_size': bs, 'epoch':\n ep, 'lr': lr, 'opt': opt}\n print(\n \"\"\"\n\n################################################################################\n############################## ITERATION \"\"\"\n + str(iteration + 1) + ' of ' + str(iters) +\n \"\"\" ###########################\n######################################################\"\"\"\n + \"\"\" ########################\nepochs:\"\"\", ep,\n 'batch_size:', bs, '\\nmodel:', models_name[\n index], 'in', models_name, '\\nopt:', opt, 'in',\n opts, '\\nlr:', lr, 'in', lrs)\n train_infos['generator1'] = self.early_gen_train\n train_infos['generator2'] = self.early_gen_new_val\n t_files, v_files = self.csv_fusion['train'\n ], self.csv_fusion['val']\n m = model()\n self.train(t_files, v_files, train_infos, m)\n\n def generate_feature(self, t_files, v_files):\n if not exists('features_path_early_fusion_train_' + self.\n feature_name + '.csv'):\n print('\\n##### GENERATING CSV FOR EARLY FUSION... #####')\n csv_early_fusion = {'train': self.\n _generate_data_for_early_fusion(t_files, 'train'), 'val':\n self._generate_data_for_early_fusion(v_files, 'val')}\n print('\\n##### CSV GENERATED! #####')\n else:\n csv_early_fusion = {}\n for name in ['train', 'val']:\n csv_early_fusion[name] = self.load_early_csv(name)\n return csv_early_fusion\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print('Opening csv: features_path_early_fusion_' + dataset + '_' +\n self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + '_' + self.\n feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label,\n audio_label])\n return csv_early_fusion\n\n def _generate_data_for_early_fusion(self, files, name):\n if 'full' in self.feature_name:\n frame_to_discard = 0\n else:\n window_size = int(self.feature_name.split('_')[1])\n frame_to_discard = ceil(window_size / 2 / 40)\n my_csv = {}\n for file in tqdm(files):\n clip_id_temp = file.split('.')[0]\n base_path = clip_id_temp.replace('AFEW/aligned',\n 'early_feature/framefeature') + '*'\n frames_features_path = glob.glob(base_path)\n audio_features_path = glob.glob(base_path.replace(\n 'early_feature/framefeature', 'early_feature/' + self.\n feature_name))\n frames_features_path.sort(key=lambda x: int(x.split('_')[-1].\n split('.')[0]))\n if 'full' not in self.feature_name:\n audio_features_path.sort(key=lambda x: int(x.split('_')[-1]\n .split('.')[0]))\n ground_truth = basename(dirname(clip_id_temp))\n clip_id = basename(clip_id_temp)\n frames_features_path = frames_features_path[frame_to_discard:]\n if len(frames_features_path) < 16:\n continue\n if len(audio_features_path\n ) < 16 and 'full' not in self.feature_name:\n continue\n for index, frame in enumerate(frames_features_path):\n if clip_id not in my_csv.keys():\n my_csv[clip_id] = []\n if 'full' not in self.feature_name:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[index]])\n else:\n my_csv[clip_id].append([ground_truth, frame,\n audio_features_path[0]])\n with open('features_path_early_fusion_' + name + '_' + self.\n feature_name + '.csv', 'w') as f:\n f.write('clip_id, ground_truth, frame_label, audio_label\\n')\n for key in my_csv:\n for line in my_csv[key]:\n f.write(key + ',' + line[0] + ',' + line[1] + ',' +\n line[2] + '\\n')\n return my_csv\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion['train'].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.\n feature_num)).astype('float'), np.zeros((batch_size, self.\n time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion['train'][clip_id]\n ground_truth = video_info[0][0]\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][i - c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n\n def early_gen_new_val(self, list_files, batch_size, mode='val', stride=1):\n \"\"\" stride 50% sul su tutti i file \"\"\"\n c = 0\n labels = features = []\n clip_ids = list(list_files.keys())\n while True:\n for clip_id in tqdm(clip_ids):\n video_info = list_files[clip_id]\n ground_truth = video_info[0][0]\n for start in range(0, len(video_info) - self.time_step, \n self.time_step // stride):\n if c == 0:\n labels = []\n features = [np.zeros((batch_size, self.time_step,\n self.feature_num)).astype('float'), np.zeros((\n batch_size, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(video_info[start:self.\n time_step + start]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][c][index] = np.array(from_arff_to_feture\n (audio_path)).reshape(self.feature_num)\n features[1][c][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n c += 1\n if c == batch_size:\n c = 0\n labels = self.lb.transform(np.array(labels)).reshape((\n batch_size, 7))\n yield features, labels\n if mode == 'eval':\n break\n\n def early_gen_test_clip(self, list_files, clip_id, stride=1):\n \"\"\" stride su singolo file, quindi va richiamato per ogni file \"\"\"\n ground_truth = list_files[0][0]\n start = 0\n end = len(list_files) - self.time_step\n while True:\n labels = []\n features = [np.zeros((1, self.time_step, self.feature_num)).\n astype('float'), np.zeros((1, self.time_step, 1024)).astype\n ('float')]\n for index, elem in enumerate(list_files[start:start + self.\n time_step]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][0][index] = np.array(from_arff_to_feture(\n audio_path)).reshape(self.feature_num)\n features[1][0][index] = frame_feature.reshape(1024)\n labels.append(ground_truth)\n start += self.time_step // stride\n if start >= end:\n break\n labels = self.lb.transform(np.array(labels)).reshape((1, 7))\n yield features, labels\n\n def get_validation_dim(self):\n if self.stride == 2:\n if 'full' in self.feature_name:\n return 141\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 114\n elif '100' in self.feature_name:\n return 128\n elif self.stride == 1:\n if 'full' in self.feature_name:\n return 76\n elif '600' in self.feature_name:\n return 0\n elif '300' in self.feature_name:\n return 63\n elif '100' in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data['opt'] == 'Adam':\n optimizer = Adam(lr=train_data['lr'])\n else:\n optimizer = SGD(lr=train_data['lr'])\n model.compile(optimizer=optimizer, loss='categorical_crossentropy',\n metrics=['accuracy'])\n model.summary()\n train_gen = train_data['generator1'](train_files, train_data[\n 'batch_size'])\n no_of_training_images = len(train_files)\n no_of_val_images = self.get_validation_dim()\n print('no_of_val_images:', no_of_val_images)\n val_gen = train_data['generator2'](val_files, train_data['batch_size'])\n model_name = '_lr' + str(train_data['lr']) + '_Opt' + train_data['opt'\n ] + '_Model' + str(train_data['model_name']\n ) + '_Feature' + self.feature_name + '_' + str(train_data[\n 'iteration']) + '_' + self.train_mode\n model_name += 'stride' + str(self.stride)\n model_name += '.h5'\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n\n\n class CheckValCMCallback(keras.callbacks.Callback):\n\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv('val')\n gen = self.vc.early_gen_new_val(csv_fusion, 16, 'eval')\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print('Evaluate:', acc)\n if self.epoch == epoch + 1:\n print('Validation_Accuracy =', self.accs)\n cb = [ModelCheckpoint(filepath=str(\n 'weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}' +\n model_name), monitor='val_accuracy', save_weights_only=True),\n TensorBoard(log_dir='NewFusionLogs_sched/' + self.train_mode +\n '/' + self.feature_name, write_graph=True, write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n history = model.fit_generator(train_gen, validation_data=val_gen,\n epochs=train_data['epoch'], steps_per_epoch=\n no_of_training_images * 2 // train_data['batch_size'],\n validation_steps=no_of_val_images, workers=0, verbose=1,\n callbacks=cb)\n print('\\n\\nTrain_Accuracy =', history.history['accuracy'])\n print('\\nVal_Accuracy =', history.history['val_accuracy'])\n print('\\n\\nTrain_Loss =', history.history['loss'])\n print('\\nVal_Loss =', history.history['val_loss'])\n\n def print_stats(self, ground_truths, predictions, name):\n cm = confusion_matrix(ground_truths, predictions, self.classes)\n print('###' + name + ' Results###\\n')\n print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.\n newaxis], decimals=3), self.classes)\n print('\\n\\n')\n print('Accuracy score: ', accuracy_score(ground_truths, predictions\n ), '\\n\\n')\n print(\n '#################################################################end###\\n\\n\\n'\n )\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == 'early_fusion':\n csv_fusion = self.load_early_csv('val')\n print('CSV loaded', len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, 'eval', stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n self.print_stats(ground_truths, predictions, 'Video')\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r'\n ) as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob('/user/vlongobardi/late_feature/' + self.\n feature_name + '/*/*csv')\n for file in files:\n clip_id = basename(file).split('.')[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])\n ), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n self.print_stats(ground_truths, predictions, 'Video')\n self.print_stats(ground_truths, a_p, 'Audio')\n self.print_stats(ground_truths, f_p, 'Frame')\n\n\n<mask token>\n",
"step-5": "import csv\nimport glob\nimport random\nimport sys\nfrom math import ceil, floor\nfrom os.path import basename, exists, dirname, isfile\n\nimport numpy as np\nimport keras\nfrom keras import Model, Input, regularizers\nfrom keras.layers import TimeDistributed, LSTMCell, Reshape, Dense, Lambda, Dropout, Concatenate\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler\nfrom keras.optimizers import Adam, SGD\nfrom sklearn.metrics import confusion_matrix, accuracy_score # , classification_report\nfrom sklearn.preprocessing import LabelBinarizer\nfrom tqdm import tqdm\n\nfrom Dataset.Dataset_Utils.augmenter import NoAug\nfrom Dataset.Dataset_Utils.datagen import DataGenerator as DataGen\nfrom Dataset.Dataset_Utils.dataset_tools import print_cm\nfrom Models.model_sharma import SharmaNet\nfrom audio_classifier import AudioClassifier, from_arff_to_feture\nfrom frames_classifier import FramesClassifier\nfrom test_models import *\n\nclasses = [\"Angry\", \"Disgust\", \"Fear\", \"Happy\", \"Neutral\", \"Sad\", \"Surprise\"]\n\n\ndef my_model():\n r1, r2 = regularizers.l2(1e-5), regularizers.l2(1e-5)\n frame_input = Input(shape=(16, 1024))\n audio_input = Input(shape=(16, 1582))\n x = Concatenate(name='fusion1')([frame_input, audio_input])\n x = TimeDistributed(Dense(100, activation='tanh', kernel_regularizer=r1, name='ff_logit_lstm'))(x)\n x = TimeDistributed(Dropout(0.5))(x)\n x = TimeDistributed(Dense(7, activation='softmax', kernel_regularizer=r2, name='ff_logit'))(x)\n x = Lambda(lambda y: tf.reduce_mean(y, axis=1))(x)\n return Model([audio_input, frame_input], x)\n\n\nclass VideoClassifier:\n\n def __init__(self, train_mode=\"late_fusion\", video_model_path=None, time_step=16,\n base_path=\"/user/vlongobardi/AFEW/aligned/\", feature_name=\"emobase2010_100\", stride=1):\n self.time_step = time_step\n self.train_mode = train_mode\n self.feature_name = feature_name\n self.classes = classes\n self.lb = LabelBinarizer()\n self.lb.fit_transform(np.array(classes))\n self.feature_num = 1582\n self.offset = ceil(int(self.feature_name.split(\"_\")[1]) / 2 / 40)\n self.stride = stride\n\n if video_model_path is not None:\n try:\n self.model = my_model()\n self.model.load_weights(video_model_path)\n print(\"VideoClassifier loaded successfully\", video_model_path)\n except:\n print(\"Exception\")\n else:\n t_files = glob.glob(base_path + \"Train\" + \"/*/*csv\")\n v_files = glob.glob(base_path + \"Val\" + \"/*/*csv\")\n self.csv_fusion = self.generate_feature(t_files, v_files)\n self.do_training()\n\n def do_training(self):\n skips = 0\n iters = 1\n bs = 16\n ep = 150\n opts = [\"SGD\"]#, \"Adam\"]\n lrs = [0.01]\n models = [my_model]\n models_name = [x.__name__ for x in models]\n for index, model in enumerate(models):\n for opt in opts:\n for lr in lrs:\n for iteration in range(iters):\n\n if skips > 0:\n skips -= 1\n continue\n\n train_infos = {\n \"iteration\": iteration, \"model_name\": models_name[index],\n \"batch_size\": bs, \"epoch\": ep, \"lr\": lr, \"opt\": opt\n }\n\n print(\n \"\\n\\n################################################################################\\n\"\n \"############################## ITERATION \" + str(iteration + 1) + \" of \" + str(iters) +\n \" ###########################\\n######################################################\" +\n \" ########################\\nepochs:\", ep, \"batch_size:\", bs, \"\\nmodel:\", models_name[index],\n \"in\", models_name, \"\\nopt:\", opt, \"in\", opts, \"\\nlr:\", lr, \"in\", lrs)\n\n train_infos[\"generator1\"] = self.early_gen_train\n train_infos[\"generator2\"] = self.early_gen_new_val\n t_files, v_files = self.csv_fusion[\"train\"], self.csv_fusion[\"val\"]\n m = model()\n\n self.train(t_files, v_files, train_infos, m)\n\n def generate_feature(self, t_files, v_files):\n if not exists('features_path_early_fusion_train_' + self.feature_name + '.csv'):\n print(\"\\n##### GENERATING CSV FOR EARLY FUSION... #####\")\n csv_early_fusion = {\n \"train\": self._generate_data_for_early_fusion(t_files, \"train\"),\n \"val\": self._generate_data_for_early_fusion(v_files, \"val\")\n }\n print(\"\\n##### CSV GENERATED! #####\")\n else:\n csv_early_fusion = {}\n for name in [\"train\", \"val\"]:\n csv_early_fusion[name] = self.load_early_csv(name)\n return csv_early_fusion\n\n def load_early_csv(self, dataset):\n csv_early_fusion = {}\n print(\"Opening csv: features_path_early_fusion_\" + dataset + \"_\" + self.feature_name + '.csv')\n with open('features_path_early_fusion_' + dataset + \"_\" + self.feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for clip_id, ground_truth, frame_label, audio_label in csv_reader:\n if clip_id not in csv_early_fusion:\n csv_early_fusion[clip_id] = []\n csv_early_fusion[clip_id].append([ground_truth, frame_label, audio_label])\n return csv_early_fusion\n\n def _generate_data_for_early_fusion(self, files, name):\n # '/user/vlongobardi/AFEW/aligned/Train/Angry/012738600.csv'\n # '/user/vlongobardi/early_feature/framefeature/Train/Angry/012738600_0.dat'\n # '/user/vlongobardi/early_feature/emobase2010_600/Train/Angry/012738600_0.arff'\n if \"full\" in self.feature_name:\n frame_to_discard = 0\n else:\n window_size = int(self.feature_name.split(\"_\")[1])\n frame_to_discard = ceil(window_size / 2 / 40)\n my_csv = {}\n for file in tqdm(files):\n clip_id_temp = file.split(\".\")[0]\n base_path = clip_id_temp.replace(\"AFEW/aligned\", \"early_feature/framefeature\") + \"*\"\n frames_features_path = glob.glob(base_path)\n audio_features_path = glob.glob(\n base_path.replace(\"early_feature/framefeature\", \"early_feature/\" + self.feature_name))\n frames_features_path.sort(key=lambda x: int(x.split(\"_\")[-1].split(\".\")[0]))\n if \"full\" not in self.feature_name:\n audio_features_path.sort(key=lambda x: int(x.split(\"_\")[-1].split(\".\")[0]))\n ground_truth = basename(dirname(clip_id_temp))\n clip_id = basename(clip_id_temp)\n\n # discard video frames based on window size\n frames_features_path = frames_features_path[frame_to_discard:]\n if len(frames_features_path) < 16:\n continue\n # print(\"FRAME TOO FEW SAMPLES:\", len(frames_features_path), clip_id)\n if len(audio_features_path) < 16 and \"full\" not in self.feature_name:\n continue\n # print(\"AUDIO TOO FEW SAMPLES:\", len(audio_features_path), clip_id)\n for index, frame in enumerate(frames_features_path):\n if clip_id not in my_csv.keys():\n my_csv[clip_id] = []\n if \"full\" not in self.feature_name:\n my_csv[clip_id].append([ground_truth, frame, audio_features_path[index]])\n else:\n my_csv[clip_id].append([ground_truth, frame, audio_features_path[0]])\n with open('features_path_early_fusion_' + name + \"_\" + self.feature_name + '.csv', 'w') as f:\n f.write(\"clip_id, ground_truth, frame_label, audio_label\\n\")\n for key in my_csv:\n for line in my_csv[key]:\n f.write(key + \",\" + line[0] + \",\" + line[1] + \",\" + line[2] + \"\\n\")\n return my_csv\n\n def early_gen_train(self, list_files, batch_size):\n c = 0\n clip_ids = list(self.csv_fusion[\"train\"].keys())\n random.shuffle(clip_ids)\n while True:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'),\n np.zeros((batch_size, self.time_step, 1024)).astype('float')]\n for i in range(c, c + batch_size):\n clip_id = clip_ids[i]\n video_info = self.csv_fusion[\"train\"][clip_id]\n ground_truth = video_info[0][0]\n\n # first_frame_num = int(video_info[0][1].split(\"_\")[-1].split(\".\")[0])\n start = random.randint(0, len(video_info) - self.time_step)\n for index, elem in enumerate(video_info[start:self.time_step + start]):\n _, frame_path, audio_path = elem\n if not isfile(frame_path):\n start += 1\n if start >= len(video_info):\n raise\n continue\n frame_feature = np.load(frame_path)\n features[0][i - c][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, )\n features[1][i - c][index] = frame_feature.reshape(1024, )\n labels.append(ground_truth)\n c += batch_size\n if c + batch_size > len(clip_ids):\n c = 0\n random.shuffle(clip_ids)\n labels = self.lb.transform(np.array(labels)).reshape((batch_size, 7))\n yield features, labels\n\n def early_gen_new_val(self, list_files, batch_size, mode=\"val\", stride=1):\n \"\"\" stride 50% sul su tutti i file \"\"\"\n c = 0\n labels = features = []\n clip_ids = list(list_files.keys())\n while True:\n for clip_id in tqdm(clip_ids):\n video_info = list_files[clip_id]\n ground_truth = video_info[0][0]\n\n for start in range(0, len(video_info) - self.time_step, self.time_step // stride):\n if c == 0:\n labels = []\n features = [np.zeros((batch_size, self.time_step, self.feature_num)).astype('float'),\n np.zeros((batch_size, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(video_info[start:self.time_step + start]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][c][index] = np.array(from_arff_to_feture(audio_path)).reshape(\n self.feature_num, )\n features[1][c][index] = frame_feature.reshape(1024, )\n labels.append(ground_truth)\n\n c += 1\n if c == batch_size:\n c = 0\n labels = self.lb.transform(np.array(labels)).reshape((batch_size, 7))\n yield features, labels\n if mode == \"eval\":\n break\n\n def early_gen_test_clip(self, list_files, clip_id, stride=1):\n \"\"\" stride su singolo file, quindi va richiamato per ogni file \"\"\"\n ground_truth = list_files[0][0]\n start = 0\n end = len(list_files) - self.time_step\n while True:\n labels = []\n features = [np.zeros((1, self.time_step, self.feature_num)).astype('float'),\n np.zeros((1, self.time_step, 1024)).astype('float')]\n for index, elem in enumerate(list_files[start:start + self.time_step]):\n _, frame_path, audio_path = elem\n frame_feature = np.load(frame_path)\n features[0][0][index] = np.array(from_arff_to_feture(audio_path)).reshape(self.feature_num, )\n features[1][0][index] = frame_feature.reshape(1024, )\n labels.append(ground_truth)\n start += self.time_step // stride\n if start >= end:\n break\n labels = self.lb.transform(np.array(labels)).reshape((1, 7))\n yield features, labels\n\n def get_validation_dim(self):\n if self.stride == 2:\n if \"full\" in self.feature_name:\n return 141\n elif \"600\" in self.feature_name:\n return 0\n elif \"300\" in self.feature_name:\n return 114\n elif \"100\" in self.feature_name:\n return 128\n elif self.stride == 1:\n if \"full\" in self.feature_name:\n return 76\n elif \"600\" in self.feature_name:\n return 0\n elif \"300\" in self.feature_name:\n return 63\n elif \"100\" in self.feature_name:\n return 69\n elif self.stride == self.time_step:\n return 0\n\n def train(self, train_files, val_files, train_data, model):\n if train_data[\"opt\"] == \"Adam\":\n optimizer = Adam(lr=train_data[\"lr\"])\n else:\n optimizer = SGD(lr=train_data[\"lr\"])\n\n model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n\n train_gen = train_data[\"generator1\"](train_files, train_data[\"batch_size\"])\n no_of_training_images = len(train_files)\n\n no_of_val_images = self.get_validation_dim()\n print(\"no_of_val_images:\", no_of_val_images)\n val_gen = train_data[\"generator2\"](val_files, train_data[\"batch_size\"])\n\n # stride = 1, no overlapping\n # stride = 2, overlapping: 50%\n # stride = time_step, stride: 1\n\n model_name = \"_lr\" + str(train_data[\"lr\"]) + \"_Opt\" + train_data[\"opt\"] + \"_Model\" + str(\n train_data[\"model_name\"]) + \"_Feature\" + self.feature_name + \"_\" + str(\n train_data[\"iteration\"]) + \"_\" + self.train_mode # + \"_modelType\" + str(self.model_type)\n model_name += \"stride\" + str(self.stride)\n model_name += \".h5\"\n\n def custom_scheduler(epoch):\n if epoch < 50:\n print(0.1)\n return 0.1\n if epoch < 100:\n print(0.01)\n return 0.01\n if epoch < 125:\n print(0.001)\n return 0.001\n else:\n print(0.0001)\n return 0.0001\n #print(0.1 / 10 ** (floor(epoch / 40) + 1))\n #return 0.1 / 10 ** (floor(epoch / 40) + 1)\n\n class CheckValCMCallback(keras.callbacks.Callback):\n def __init__(self, m, dim, validation_files, epoch):\n super().__init__()\n self.vc = m\n self.dim = dim\n self.val_files = validation_files\n self.epoch = epoch\n self.accs = []\n\n def on_epoch_end(self, epoch, logs=None):\n csv_fusion = self.vc.load_early_csv(\"val\")\n # gen = self.vc.early_gen_new_val(csv_fusion, 16, \"eval\")\n # predictions = []\n # ground_truths = []\n # for x in gen:\n # ground_truths.append(self.vc.lb.inverse_transform(x[1])[0])\n # pred = self.model.predict(x[0])\n # pred = self.vc.lb.inverse_transform(pred)\n # predictions.append(pred[0])\n # self.vc.print_stats(ground_truths, predictions, \"Video\" + str(epoch))\n gen = self.vc.early_gen_new_val(csv_fusion, 16, \"eval\")\n acc = self.model.evaluate_generator(gen, self.dim, workers=0)\n self.accs.append(acc)\n print(\"Evaluate:\", acc)\n\n if self.epoch == epoch + 1:\n print(\"Validation_Accuracy =\", self.accs)\n\n cb = [ModelCheckpoint(\n filepath=str(\n \"weights_new_fusion/videoModel__t{accuracy:.4f}_epoch{epoch:02d}\" + model_name),\n monitor=\"val_accuracy\", save_weights_only=True),\n TensorBoard(log_dir=\"NewFusionLogs_sched/\" + self.train_mode + \"/\" + self.feature_name, write_graph=True,\n write_images=True)]\n cb += [LearningRateScheduler(custom_scheduler)]\n #cb += [CheckValCMCallback(self, no_of_val_images, val_files, train_data[\"epoch\"])]\n history = model.fit_generator(train_gen,\n validation_data=val_gen,\n epochs=train_data[\"epoch\"],\n steps_per_epoch=(no_of_training_images * 2 // train_data[\"batch_size\"]),\n validation_steps=(no_of_val_images),\n workers=0, verbose=1, callbacks=cb)\n print(\"\\n\\nTrain_Accuracy =\", history.history['accuracy'])\n print(\"\\nVal_Accuracy =\", history.history['val_accuracy'])\n print(\"\\n\\nTrain_Loss =\", history.history['loss'])\n print(\"\\nVal_Loss =\", history.history['val_loss'])\n\n def print_stats(self, ground_truths, predictions, name):\n cm = confusion_matrix(ground_truths, predictions, self.classes)\n print(\"###\" + name + \" Results###\\n\")\n # print_cm(cm, self.classes)\n # print(\"\\n\\n\")\n print_cm(np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=3), self.classes)\n print(\"\\n\\n\")\n print(\"Accuracy score: \", accuracy_score(ground_truths, predictions), \"\\n\\n\")\n # print(\"Report\")\n # print(classification_report(ground_truths, predictions))\n print(\"#################################################################end###\\n\\n\\n\")\n\n def print_confusion_matrix(self, stride=1):\n \"\"\" IMPLEMENT FOR EARLY FUSION MISSING \"\"\"\n csv_fusion = {}\n predictions = []\n ground_truths = []\n if self.train_mode == \"early_fusion\":\n csv_fusion = self.load_early_csv(\"val\")\n print(\"CSV loaded\", len(csv_fusion))\n gen = self.early_gen_new_val(csv_fusion, 1, \"eval\", stride)\n for x in gen:\n ground_truths.append(self.lb.inverse_transform(x[1])[0])\n pred = self.model.predict(x[0])\n pred = self.lb.inverse_transform(pred)\n predictions.append(pred[0])\n # print(\"\\ngt, pred\", self.lb.inverse_transform(x[1]), pred)\n self.print_stats(ground_truths, predictions, \"Video\")\n else:\n with open('lables_late_fusion' + self.feature_name + '.csv', 'r') as f:\n f.readline()\n csv_reader = csv.reader(f)\n for row in csv_reader:\n csv_fusion[row[0]] = [row[1], row[2], row[3]]\n a_p = []\n f_p = []\n files = glob.glob(\"/user/vlongobardi/late_feature/\" + self.feature_name + \"/*/*csv\")\n for file in files:\n clip_id = basename(file).split(\".\")[0]\n ground_truth, frame_pred, audio_pred = csv_fusion[clip_id]\n sample = np.append(self.lb.transform(np.array([audio_pred])), self.lb.transform(np.array([frame_pred])))\n pred = self.model.predict(sample.reshape((1, 14)))\n pred = self.lb.inverse_transform(pred)[0]\n predictions.append(pred)\n a_p.append(audio_pred)\n f_p.append(frame_pred)\n ground_truths.append(ground_truth)\n\n self.print_stats(ground_truths, predictions, \"Video\")\n self.print_stats(ground_truths, a_p, \"Audio\")\n self.print_stats(ground_truths, f_p, \"Frame\")\n\n\nif __name__ == \"__main__\":\n if sys.argv[1] == \"late\":\n print(\"LATE\")\n model_path = [\n \"audio_models/audioModel_0.2285_epoch135_lr0.1_OptSGD_Modela_model7_Featureemobase2010_100_3.h5\",\n \"audio_models/audioModel_0.2650_epoch01_lr0.01_OptSGD_Modela_model7_Featureemobase2010_300_2.h5\",\n \"audio_models/audioModel_0.2865_epoch13_lr0.001_OptSGD_Modela_model7_Featureemobase2010_600_0.h5\",\n \"audio_models/audioModel_0.3668_epoch67_lr0.001_OptSGD_Modela_model7_Featureemobase2010_full_2.h5\"\n ]\n for mp in model_path:\n vc = VideoClassifier(train_mode=\"late_fusion\", audio_model_path=mp)\n elif sys.argv[1] == \"early\":\n # mt = int(sys.argv[2])\n print(\"EARLY\") # , Model_type:\", mt)\n arff_paths = {\"e1\": \"emobase2010_100\", \"i1\": \"IS09_emotion_100\",\n \"e3\": \"emobase2010_300\", \"i3\": \"IS09_emotion_300\",\n \"e6\": \"emobase2010_600\", \"i6\": \"IS09_emotion_600\",\n \"ef\": \"emobase2010_full\", \"if\": \"IS09_emotion_full\"}\n vc = VideoClassifier(train_mode=\"early_fusion\", feature_name=arff_paths[sys.argv[2]]) # , model_type=mt)\n",
"step-ids": [
7,
11,
13,
14,
18
]
}
|
[
7,
11,
13,
14,
18
] |
#!/usr/bin/env python
from distutils.core import setup
setup(
name='RBM',
version='0.0.1',
description='Restricted Boltzmann Machines',
long_description='README',
install_requires=['numpy','pandas'],
)
|
normal
|
{
"blob_id": "fab7ee8a7336ba2c044adce4cc8483af78b775ba",
"index": 1827,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='RBM', version='0.0.1', description=\n 'Restricted Boltzmann Machines', long_description='README',\n install_requires=['numpy', 'pandas'])\n",
"step-3": "from distutils.core import setup\nsetup(name='RBM', version='0.0.1', description=\n 'Restricted Boltzmann Machines', long_description='README',\n install_requires=['numpy', 'pandas'])\n",
"step-4": "#!/usr/bin/env python\n\nfrom distutils.core import setup\n\nsetup(\n name='RBM',\n version='0.0.1',\n description='Restricted Boltzmann Machines',\n long_description='README',\n install_requires=['numpy','pandas'],\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import psycopg2
DBNAME = "news"
query1 = """
select title, count(*) as numOfViews from articles,log
where concat('/article/', articles.slug) = log.path
group by title order by numOfViews desc limit 3;
"""
query2 = """
select authors.name, count(*) as numOfViews
from articles, authors, log
where articles.author = authors.id
and concat('/article/', articles.slug) = log.path
group by authors.name order by numOfViews desc ;
"""
query3 = """
select innerQuery.badDay, ROUND((100.0*innerQuery.err/innerQuery.total),3)
as error from (select date_trunc('day', time) as badDay,
count(*) as total,
sum(case when status!='200 OK' then 1 else 0 end) as err
from log
group by badDay) as innerQuery
where round((100.0*innerQuery.err/innerQuery.total),3) >1;
"""
result = ''
def get_data(query):
""" fetch data from database """
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
c.execute(query)
data = c.fetchall()
db.close()
return data
def fileWrite(content):
""" write result to result.txt """
file = open('./result.txt', 'w')
file.write(content)
file.close()
def appendToResult(content, isError=False):
""" formating db result to readable text """
global result
if(isError):
for c in content:
result += c[0].strftime("%Y-%m-%d") + ' - ' + str(c[1]) + '% error'
else:
for c in content:
result += c[0] + ' - ' + str(c[1]) + ' views \n'
fileWrite(result)
if __name__ == '__main__':
result += '\n1. What are the most popular three articles of all time?\n\n'
appendToResult(get_data(query1))
result += ' \n2. Who are the most popular article authors of all time?\n\n'
appendToResult(get_data(query2))
result += '''\n3. On which days did more than
1% of requests lead to errors?\n\n'''
appendToResult(get_data(query3), True)
print(result)
fileWrite(result)
|
normal
|
{
"blob_id": "612a3d168a09fc26530b95d258cbb4de6728419d",
"index": 3721,
"step-1": "<mask token>\n\n\ndef fileWrite(content):\n \"\"\" write result to result.txt \"\"\"\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_data(query):\n \"\"\" fetch data from database \"\"\"\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n data = c.fetchall()\n db.close()\n return data\n\n\ndef fileWrite(content):\n \"\"\" write result to result.txt \"\"\"\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\nDBNAME = 'news'\nquery1 = \"\"\"\nselect title, count(*) as numOfViews from articles,log\nwhere concat('/article/', articles.slug) = log.path\ngroup by title order by numOfViews desc limit 3;\n\"\"\"\nquery2 = \"\"\"\nselect authors.name, count(*) as numOfViews\nfrom articles, authors, log\nwhere articles.author = authors.id\nand concat('/article/', articles.slug) = log.path\ngroup by authors.name order by numOfViews desc ;\n\"\"\"\nquery3 = \"\"\"\n select innerQuery.badDay, ROUND((100.0*innerQuery.err/innerQuery.total),3)\n as error from (select date_trunc('day', time) as badDay,\n count(*) as total,\n sum(case when status!='200 OK' then 1 else 0 end) as err\n from log\n group by badDay) as innerQuery\n where round((100.0*innerQuery.err/innerQuery.total),3) >1;\n \"\"\"\nresult = ''\n\n\ndef get_data(query):\n \"\"\" fetch data from database \"\"\"\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n data = c.fetchall()\n db.close()\n return data\n\n\ndef fileWrite(content):\n \"\"\" write result to result.txt \"\"\"\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()\n\n\ndef appendToResult(content, isError=False):\n \"\"\" formating db result to readable text \"\"\"\n global result\n if isError:\n for c in content:\n result += c[0].strftime('%Y-%m-%d') + ' - ' + str(c[1]) + '% error'\n else:\n for c in content:\n result += c[0] + ' - ' + str(c[1]) + ' views \\n'\n fileWrite(result)\n\n\nif __name__ == '__main__':\n result += '\\n1. What are the most popular three articles of all time?\\n\\n'\n appendToResult(get_data(query1))\n result += ' \\n2. Who are the most popular article authors of all time?\\n\\n'\n appendToResult(get_data(query2))\n result += (\n '\\n3. On which days did more than\\n 1% of requests lead to errors?\\n\\n'\n )\n appendToResult(get_data(query3), True)\n print(result)\n fileWrite(result)\n",
"step-4": "import psycopg2\nDBNAME = 'news'\nquery1 = \"\"\"\nselect title, count(*) as numOfViews from articles,log\nwhere concat('/article/', articles.slug) = log.path\ngroup by title order by numOfViews desc limit 3;\n\"\"\"\nquery2 = \"\"\"\nselect authors.name, count(*) as numOfViews\nfrom articles, authors, log\nwhere articles.author = authors.id\nand concat('/article/', articles.slug) = log.path\ngroup by authors.name order by numOfViews desc ;\n\"\"\"\nquery3 = \"\"\"\n select innerQuery.badDay, ROUND((100.0*innerQuery.err/innerQuery.total),3)\n as error from (select date_trunc('day', time) as badDay,\n count(*) as total,\n sum(case when status!='200 OK' then 1 else 0 end) as err\n from log\n group by badDay) as innerQuery\n where round((100.0*innerQuery.err/innerQuery.total),3) >1;\n \"\"\"\nresult = ''\n\n\ndef get_data(query):\n \"\"\" fetch data from database \"\"\"\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n data = c.fetchall()\n db.close()\n return data\n\n\ndef fileWrite(content):\n \"\"\" write result to result.txt \"\"\"\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()\n\n\ndef appendToResult(content, isError=False):\n \"\"\" formating db result to readable text \"\"\"\n global result\n if isError:\n for c in content:\n result += c[0].strftime('%Y-%m-%d') + ' - ' + str(c[1]) + '% error'\n else:\n for c in content:\n result += c[0] + ' - ' + str(c[1]) + ' views \\n'\n fileWrite(result)\n\n\nif __name__ == '__main__':\n result += '\\n1. What are the most popular three articles of all time?\\n\\n'\n appendToResult(get_data(query1))\n result += ' \\n2. Who are the most popular article authors of all time?\\n\\n'\n appendToResult(get_data(query2))\n result += (\n '\\n3. On which days did more than\\n 1% of requests lead to errors?\\n\\n'\n )\n appendToResult(get_data(query3), True)\n print(result)\n fileWrite(result)\n",
"step-5": "#!/usr/bin/env python\n\nimport psycopg2\n\nDBNAME = \"news\"\n\nquery1 = \"\"\"\nselect title, count(*) as numOfViews from articles,log\nwhere concat('/article/', articles.slug) = log.path\ngroup by title order by numOfViews desc limit 3;\n\"\"\"\nquery2 = \"\"\"\nselect authors.name, count(*) as numOfViews\nfrom articles, authors, log\nwhere articles.author = authors.id\nand concat('/article/', articles.slug) = log.path\ngroup by authors.name order by numOfViews desc ;\n\"\"\"\nquery3 = \"\"\"\n select innerQuery.badDay, ROUND((100.0*innerQuery.err/innerQuery.total),3)\n as error from (select date_trunc('day', time) as badDay,\n count(*) as total,\n sum(case when status!='200 OK' then 1 else 0 end) as err\n from log\n group by badDay) as innerQuery\n where round((100.0*innerQuery.err/innerQuery.total),3) >1;\n \"\"\"\nresult = ''\n\n\ndef get_data(query):\n \"\"\" fetch data from database \"\"\"\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(query)\n data = c.fetchall()\n db.close()\n return data\n\n\ndef fileWrite(content):\n \"\"\" write result to result.txt \"\"\"\n file = open('./result.txt', 'w')\n file.write(content)\n file.close()\n\n\ndef appendToResult(content, isError=False):\n \"\"\" formating db result to readable text \"\"\"\n global result\n if(isError):\n for c in content:\n result += c[0].strftime(\"%Y-%m-%d\") + ' - ' + str(c[1]) + '% error'\n else:\n for c in content:\n result += c[0] + ' - ' + str(c[1]) + ' views \\n'\n fileWrite(result)\n\n\nif __name__ == '__main__':\n result += '\\n1. What are the most popular three articles of all time?\\n\\n'\n appendToResult(get_data(query1))\n result += ' \\n2. Who are the most popular article authors of all time?\\n\\n'\n appendToResult(get_data(query2))\n result += '''\\n3. On which days did more than\n 1% of requests lead to errors?\\n\\n'''\n appendToResult(get_data(query3), True)\n print(result)\n fileWrite(result)\n",
"step-ids": [
1,
2,
5,
6,
7
]
}
|
[
1,
2,
5,
6,
7
] |
def getGC(st):
n = 0
for char in st:
if char == 'C' or char == 'G':
n += 1
return n
while True:
try:
DNA = input()
ln = int(input())
maxLen = 0
subDNA = ''
for i in range(len(DNA) - ln + 1):
sub = DNA[i:i + ln]
if getGC(sub) > maxLen:
maxLen = getGC(sub)
subDNA = sub
print(subDNA)
except:
break
|
normal
|
{
"blob_id": "afe63f94c7107cf79e57f695df8543e0786a155f",
"index": 6556,
"step-1": "<mask token>\n",
"step-2": "def getGC(st):\n n = 0\n for char in st:\n if char == 'C' or char == 'G':\n n += 1\n return n\n\n\n<mask token>\n",
"step-3": "def getGC(st):\n n = 0\n for char in st:\n if char == 'C' or char == 'G':\n n += 1\n return n\n\n\nwhile True:\n try:\n DNA = input()\n ln = int(input())\n maxLen = 0\n subDNA = ''\n for i in range(len(DNA) - ln + 1):\n sub = DNA[i:i + ln]\n if getGC(sub) > maxLen:\n maxLen = getGC(sub)\n subDNA = sub\n print(subDNA)\n except:\n break\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.contrib import admin
from .models import Sport
from .models import Action
admin.site.register(Sport)
admin.site.register(Action)
|
normal
|
{
"blob_id": "ab38371ee3941e214344497b7e56786908a9b3d1",
"index": 2236,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Sport)\nadmin.site.register(Action)\n",
"step-3": "from django.contrib import admin\nfrom .models import Sport\nfrom .models import Action\nadmin.site.register(Sport)\nadmin.site.register(Action)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import logging
from pathlib import Path
import numpy as np
import torch
import re
import json
from helpers import init_helper, data_helper, vsumm_helper, bbox_helper
from modules.model_zoo import get_model
logger = logging.getLogger()
def evaluate(model, val_loader, nms_thresh, device):
model.eval()
stats = data_helper.AverageMeter('fscore', 'diversity')
json_file = []
with torch.no_grad():
for test_key, seq, gt, cps, n_frames, nfps, picks, user_summary, name in val_loader:
seq_len = len(seq)
seq_torch = torch.from_numpy(seq).unsqueeze(0).to(device)
pred_cls, pred_bboxes = model.predict(seq_torch)
pred_bboxes = np.clip(pred_bboxes, 0, seq_len).round().astype(np.int32)
pred_cls, pred_bboxes = bbox_helper.nms(pred_cls, pred_bboxes, nms_thresh)
pred_summ, score = vsumm_helper.bbox2summary(
seq_len, pred_cls, pred_bboxes, cps, n_frames, nfps, picks)
eval_metric = 'avg' if 'tvsum' in test_key else 'max'
fscore = vsumm_helper.get_summ_f1score(
pred_summ, user_summary, eval_metric)
pred_arr, pred_seg = convert_array(pred_summ, nfps)
pred_summ = vsumm_helper.downsample_summ(pred_summ)
json_file.append({"video":str(name), "gt": convert_array_2(gt),
"pred_score": convert_array_2(score),
"user_anno":convert_user(user_summary),
"fscore": float(fscore),
"pred_sum": convert_array_2(pred_summ)})
diversity = vsumm_helper.get_summ_diversity(pred_summ, seq)
stats.update(fscore=fscore, diversity=diversity)
return stats.fscore, stats.diversity, json_file
def convert_user(arr):
res = []
for i in arr:
temp = []
for a in i:
temp.append(a.item())
res.append(temp)
return res
def convert_array_2(arr):
res = []
for i in arr:
res.append(i.item())
return res
def convert_array(user, nfps):
user_arr = []
shots_arr = []
for b in user:
user_arr.append(1 if b else 0)
shots_arr.append(nfps[0].item())
for i in range(1, len(nfps)):
shots_arr.append(shots_arr[i-1] + nfps[i].item())
return user_arr, shots_arr
def get_file_name(name):
arr = re.split("[\\/]", name)
print(arr)
return arr[-1]
def main():
args = init_helper.get_arguments()
init_helper.init_logger(args.model_dir, args.log_file)
init_helper.set_random_seed(args.seed)
logger.info(vars(args))
model = get_model(args.model, **vars(args))
model = model.eval().to(args.device)
f = []
for split_path in args.splits:
split_path = Path(split_path)
splits = data_helper.load_yaml(split_path)
stats = data_helper.AverageMeter('fscore', 'diversity')
for split_idx, split in enumerate(splits):
ckpt_path = data_helper.get_ckpt_path(args.model_dir, split_path, split_idx)
state_dict = torch.load(str(ckpt_path),
map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict)
val_set = data_helper.VideoDataset(split['test_keys'])
val_loader = data_helper.DataLoader(val_set, shuffle=False)
fscore, diversity, json_file = evaluate(model, val_loader, args.nms_thresh, args.device)
f += json_file
stats.update(fscore=fscore, diversity=diversity)
logger.info(f'{split_path.stem} split {split_idx}: diversity: '
f'{diversity:.4f}, F-score: {fscore:.4f}')
logger.info(f'{split_path.stem}: diversity: {stats.diversity:.4f}, '
f'F-score: {stats.fscore:.4f}')
# with open('aftvsum.json', 'w') as fout:
# json.dump(f, fout)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "dd3419f42a3b1aafd1d4f5d88189fb3c6bd0c67e",
"index": 4233,
"step-1": "<mask token>\n\n\ndef evaluate(model, val_loader, nms_thresh, device):\n model.eval()\n stats = data_helper.AverageMeter('fscore', 'diversity')\n json_file = []\n with torch.no_grad():\n for test_key, seq, gt, cps, n_frames, nfps, picks, user_summary, name in val_loader:\n seq_len = len(seq)\n seq_torch = torch.from_numpy(seq).unsqueeze(0).to(device)\n pred_cls, pred_bboxes = model.predict(seq_torch)\n pred_bboxes = np.clip(pred_bboxes, 0, seq_len).round().astype(np\n .int32)\n pred_cls, pred_bboxes = bbox_helper.nms(pred_cls, pred_bboxes,\n nms_thresh)\n pred_summ, score = vsumm_helper.bbox2summary(seq_len, pred_cls,\n pred_bboxes, cps, n_frames, nfps, picks)\n eval_metric = 'avg' if 'tvsum' in test_key else 'max'\n fscore = vsumm_helper.get_summ_f1score(pred_summ, user_summary,\n eval_metric)\n pred_arr, pred_seg = convert_array(pred_summ, nfps)\n pred_summ = vsumm_helper.downsample_summ(pred_summ)\n json_file.append({'video': str(name), 'gt': convert_array_2(gt),\n 'pred_score': convert_array_2(score), 'user_anno':\n convert_user(user_summary), 'fscore': float(fscore),\n 'pred_sum': convert_array_2(pred_summ)})\n diversity = vsumm_helper.get_summ_diversity(pred_summ, seq)\n stats.update(fscore=fscore, diversity=diversity)\n return stats.fscore, stats.diversity, json_file\n\n\ndef convert_user(arr):\n res = []\n for i in arr:\n temp = []\n for a in i:\n temp.append(a.item())\n res.append(temp)\n return res\n\n\n<mask token>\n\n\ndef get_file_name(name):\n arr = re.split('[\\\\/]', name)\n print(arr)\n return arr[-1]\n\n\ndef main():\n args = init_helper.get_arguments()\n init_helper.init_logger(args.model_dir, args.log_file)\n init_helper.set_random_seed(args.seed)\n logger.info(vars(args))\n model = get_model(args.model, **vars(args))\n model = model.eval().to(args.device)\n f = []\n for split_path in args.splits:\n split_path = Path(split_path)\n splits = data_helper.load_yaml(split_path)\n stats = data_helper.AverageMeter('fscore', 'diversity')\n for split_idx, split in enumerate(splits):\n ckpt_path = data_helper.get_ckpt_path(args.model_dir,\n split_path, split_idx)\n state_dict = torch.load(str(ckpt_path), map_location=lambda\n storage, loc: storage)\n model.load_state_dict(state_dict)\n val_set = data_helper.VideoDataset(split['test_keys'])\n val_loader = data_helper.DataLoader(val_set, shuffle=False)\n fscore, diversity, json_file = evaluate(model, val_loader, args\n .nms_thresh, args.device)\n f += json_file\n stats.update(fscore=fscore, diversity=diversity)\n logger.info(\n f'{split_path.stem} split {split_idx}: diversity: {diversity:.4f}, F-score: {fscore:.4f}'\n )\n logger.info(\n f'{split_path.stem}: diversity: {stats.diversity:.4f}, F-score: {stats.fscore:.4f}'\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef evaluate(model, val_loader, nms_thresh, device):\n model.eval()\n stats = data_helper.AverageMeter('fscore', 'diversity')\n json_file = []\n with torch.no_grad():\n for test_key, seq, gt, cps, n_frames, nfps, picks, user_summary, name in val_loader:\n seq_len = len(seq)\n seq_torch = torch.from_numpy(seq).unsqueeze(0).to(device)\n pred_cls, pred_bboxes = model.predict(seq_torch)\n pred_bboxes = np.clip(pred_bboxes, 0, seq_len).round().astype(np\n .int32)\n pred_cls, pred_bboxes = bbox_helper.nms(pred_cls, pred_bboxes,\n nms_thresh)\n pred_summ, score = vsumm_helper.bbox2summary(seq_len, pred_cls,\n pred_bboxes, cps, n_frames, nfps, picks)\n eval_metric = 'avg' if 'tvsum' in test_key else 'max'\n fscore = vsumm_helper.get_summ_f1score(pred_summ, user_summary,\n eval_metric)\n pred_arr, pred_seg = convert_array(pred_summ, nfps)\n pred_summ = vsumm_helper.downsample_summ(pred_summ)\n json_file.append({'video': str(name), 'gt': convert_array_2(gt),\n 'pred_score': convert_array_2(score), 'user_anno':\n convert_user(user_summary), 'fscore': float(fscore),\n 'pred_sum': convert_array_2(pred_summ)})\n diversity = vsumm_helper.get_summ_diversity(pred_summ, seq)\n stats.update(fscore=fscore, diversity=diversity)\n return stats.fscore, stats.diversity, json_file\n\n\ndef convert_user(arr):\n res = []\n for i in arr:\n temp = []\n for a in i:\n temp.append(a.item())\n res.append(temp)\n return res\n\n\n<mask token>\n\n\ndef convert_array(user, nfps):\n user_arr = []\n shots_arr = []\n for b in user:\n user_arr.append(1 if b else 0)\n shots_arr.append(nfps[0].item())\n for i in range(1, len(nfps)):\n shots_arr.append(shots_arr[i - 1] + nfps[i].item())\n return user_arr, shots_arr\n\n\ndef get_file_name(name):\n arr = re.split('[\\\\/]', name)\n print(arr)\n return arr[-1]\n\n\ndef main():\n args = init_helper.get_arguments()\n init_helper.init_logger(args.model_dir, args.log_file)\n init_helper.set_random_seed(args.seed)\n logger.info(vars(args))\n model = get_model(args.model, **vars(args))\n model = model.eval().to(args.device)\n f = []\n for split_path in args.splits:\n split_path = Path(split_path)\n splits = data_helper.load_yaml(split_path)\n stats = data_helper.AverageMeter('fscore', 'diversity')\n for split_idx, split in enumerate(splits):\n ckpt_path = data_helper.get_ckpt_path(args.model_dir,\n split_path, split_idx)\n state_dict = torch.load(str(ckpt_path), map_location=lambda\n storage, loc: storage)\n model.load_state_dict(state_dict)\n val_set = data_helper.VideoDataset(split['test_keys'])\n val_loader = data_helper.DataLoader(val_set, shuffle=False)\n fscore, diversity, json_file = evaluate(model, val_loader, args\n .nms_thresh, args.device)\n f += json_file\n stats.update(fscore=fscore, diversity=diversity)\n logger.info(\n f'{split_path.stem} split {split_idx}: diversity: {diversity:.4f}, F-score: {fscore:.4f}'\n )\n logger.info(\n f'{split_path.stem}: diversity: {stats.diversity:.4f}, F-score: {stats.fscore:.4f}'\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef evaluate(model, val_loader, nms_thresh, device):\n model.eval()\n stats = data_helper.AverageMeter('fscore', 'diversity')\n json_file = []\n with torch.no_grad():\n for test_key, seq, gt, cps, n_frames, nfps, picks, user_summary, name in val_loader:\n seq_len = len(seq)\n seq_torch = torch.from_numpy(seq).unsqueeze(0).to(device)\n pred_cls, pred_bboxes = model.predict(seq_torch)\n pred_bboxes = np.clip(pred_bboxes, 0, seq_len).round().astype(np\n .int32)\n pred_cls, pred_bboxes = bbox_helper.nms(pred_cls, pred_bboxes,\n nms_thresh)\n pred_summ, score = vsumm_helper.bbox2summary(seq_len, pred_cls,\n pred_bboxes, cps, n_frames, nfps, picks)\n eval_metric = 'avg' if 'tvsum' in test_key else 'max'\n fscore = vsumm_helper.get_summ_f1score(pred_summ, user_summary,\n eval_metric)\n pred_arr, pred_seg = convert_array(pred_summ, nfps)\n pred_summ = vsumm_helper.downsample_summ(pred_summ)\n json_file.append({'video': str(name), 'gt': convert_array_2(gt),\n 'pred_score': convert_array_2(score), 'user_anno':\n convert_user(user_summary), 'fscore': float(fscore),\n 'pred_sum': convert_array_2(pred_summ)})\n diversity = vsumm_helper.get_summ_diversity(pred_summ, seq)\n stats.update(fscore=fscore, diversity=diversity)\n return stats.fscore, stats.diversity, json_file\n\n\ndef convert_user(arr):\n res = []\n for i in arr:\n temp = []\n for a in i:\n temp.append(a.item())\n res.append(temp)\n return res\n\n\ndef convert_array_2(arr):\n res = []\n for i in arr:\n res.append(i.item())\n return res\n\n\ndef convert_array(user, nfps):\n user_arr = []\n shots_arr = []\n for b in user:\n user_arr.append(1 if b else 0)\n shots_arr.append(nfps[0].item())\n for i in range(1, len(nfps)):\n shots_arr.append(shots_arr[i - 1] + nfps[i].item())\n return user_arr, shots_arr\n\n\ndef get_file_name(name):\n arr = re.split('[\\\\/]', name)\n print(arr)\n return arr[-1]\n\n\ndef main():\n args = init_helper.get_arguments()\n init_helper.init_logger(args.model_dir, args.log_file)\n init_helper.set_random_seed(args.seed)\n logger.info(vars(args))\n model = get_model(args.model, **vars(args))\n model = model.eval().to(args.device)\n f = []\n for split_path in args.splits:\n split_path = Path(split_path)\n splits = data_helper.load_yaml(split_path)\n stats = data_helper.AverageMeter('fscore', 'diversity')\n for split_idx, split in enumerate(splits):\n ckpt_path = data_helper.get_ckpt_path(args.model_dir,\n split_path, split_idx)\n state_dict = torch.load(str(ckpt_path), map_location=lambda\n storage, loc: storage)\n model.load_state_dict(state_dict)\n val_set = data_helper.VideoDataset(split['test_keys'])\n val_loader = data_helper.DataLoader(val_set, shuffle=False)\n fscore, diversity, json_file = evaluate(model, val_loader, args\n .nms_thresh, args.device)\n f += json_file\n stats.update(fscore=fscore, diversity=diversity)\n logger.info(\n f'{split_path.stem} split {split_idx}: diversity: {diversity:.4f}, F-score: {fscore:.4f}'\n )\n logger.info(\n f'{split_path.stem}: diversity: {stats.diversity:.4f}, F-score: {stats.fscore:.4f}'\n )\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nlogger = logging.getLogger()\n\n\ndef evaluate(model, val_loader, nms_thresh, device):\n model.eval()\n stats = data_helper.AverageMeter('fscore', 'diversity')\n json_file = []\n with torch.no_grad():\n for test_key, seq, gt, cps, n_frames, nfps, picks, user_summary, name in val_loader:\n seq_len = len(seq)\n seq_torch = torch.from_numpy(seq).unsqueeze(0).to(device)\n pred_cls, pred_bboxes = model.predict(seq_torch)\n pred_bboxes = np.clip(pred_bboxes, 0, seq_len).round().astype(np\n .int32)\n pred_cls, pred_bboxes = bbox_helper.nms(pred_cls, pred_bboxes,\n nms_thresh)\n pred_summ, score = vsumm_helper.bbox2summary(seq_len, pred_cls,\n pred_bboxes, cps, n_frames, nfps, picks)\n eval_metric = 'avg' if 'tvsum' in test_key else 'max'\n fscore = vsumm_helper.get_summ_f1score(pred_summ, user_summary,\n eval_metric)\n pred_arr, pred_seg = convert_array(pred_summ, nfps)\n pred_summ = vsumm_helper.downsample_summ(pred_summ)\n json_file.append({'video': str(name), 'gt': convert_array_2(gt),\n 'pred_score': convert_array_2(score), 'user_anno':\n convert_user(user_summary), 'fscore': float(fscore),\n 'pred_sum': convert_array_2(pred_summ)})\n diversity = vsumm_helper.get_summ_diversity(pred_summ, seq)\n stats.update(fscore=fscore, diversity=diversity)\n return stats.fscore, stats.diversity, json_file\n\n\ndef convert_user(arr):\n res = []\n for i in arr:\n temp = []\n for a in i:\n temp.append(a.item())\n res.append(temp)\n return res\n\n\ndef convert_array_2(arr):\n res = []\n for i in arr:\n res.append(i.item())\n return res\n\n\ndef convert_array(user, nfps):\n user_arr = []\n shots_arr = []\n for b in user:\n user_arr.append(1 if b else 0)\n shots_arr.append(nfps[0].item())\n for i in range(1, len(nfps)):\n shots_arr.append(shots_arr[i - 1] + nfps[i].item())\n return user_arr, shots_arr\n\n\ndef get_file_name(name):\n arr = re.split('[\\\\/]', name)\n print(arr)\n return arr[-1]\n\n\ndef main():\n args = init_helper.get_arguments()\n init_helper.init_logger(args.model_dir, args.log_file)\n init_helper.set_random_seed(args.seed)\n logger.info(vars(args))\n model = get_model(args.model, **vars(args))\n model = model.eval().to(args.device)\n f = []\n for split_path in args.splits:\n split_path = Path(split_path)\n splits = data_helper.load_yaml(split_path)\n stats = data_helper.AverageMeter('fscore', 'diversity')\n for split_idx, split in enumerate(splits):\n ckpt_path = data_helper.get_ckpt_path(args.model_dir,\n split_path, split_idx)\n state_dict = torch.load(str(ckpt_path), map_location=lambda\n storage, loc: storage)\n model.load_state_dict(state_dict)\n val_set = data_helper.VideoDataset(split['test_keys'])\n val_loader = data_helper.DataLoader(val_set, shuffle=False)\n fscore, diversity, json_file = evaluate(model, val_loader, args\n .nms_thresh, args.device)\n f += json_file\n stats.update(fscore=fscore, diversity=diversity)\n logger.info(\n f'{split_path.stem} split {split_idx}: diversity: {diversity:.4f}, F-score: {fscore:.4f}'\n )\n logger.info(\n f'{split_path.stem}: diversity: {stats.diversity:.4f}, F-score: {stats.fscore:.4f}'\n )\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import logging\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nimport re\nimport json\n\nfrom helpers import init_helper, data_helper, vsumm_helper, bbox_helper\nfrom modules.model_zoo import get_model\n\nlogger = logging.getLogger()\n\n\ndef evaluate(model, val_loader, nms_thresh, device):\n model.eval()\n stats = data_helper.AverageMeter('fscore', 'diversity')\n json_file = []\n with torch.no_grad():\n for test_key, seq, gt, cps, n_frames, nfps, picks, user_summary, name in val_loader:\n seq_len = len(seq)\n seq_torch = torch.from_numpy(seq).unsqueeze(0).to(device)\n pred_cls, pred_bboxes = model.predict(seq_torch)\n pred_bboxes = np.clip(pred_bboxes, 0, seq_len).round().astype(np.int32)\n\n pred_cls, pred_bboxes = bbox_helper.nms(pred_cls, pred_bboxes, nms_thresh)\n pred_summ, score = vsumm_helper.bbox2summary(\n seq_len, pred_cls, pred_bboxes, cps, n_frames, nfps, picks)\n eval_metric = 'avg' if 'tvsum' in test_key else 'max'\n fscore = vsumm_helper.get_summ_f1score(\n pred_summ, user_summary, eval_metric)\n pred_arr, pred_seg = convert_array(pred_summ, nfps)\n pred_summ = vsumm_helper.downsample_summ(pred_summ)\n json_file.append({\"video\":str(name), \"gt\": convert_array_2(gt), \n \"pred_score\": convert_array_2(score), \n \"user_anno\":convert_user(user_summary),\n \"fscore\": float(fscore),\n \"pred_sum\": convert_array_2(pred_summ)})\n diversity = vsumm_helper.get_summ_diversity(pred_summ, seq)\n stats.update(fscore=fscore, diversity=diversity)\n\n return stats.fscore, stats.diversity, json_file\n\ndef convert_user(arr):\n res = []\n for i in arr:\n temp = []\n for a in i:\n temp.append(a.item())\n res.append(temp)\n return res\n\ndef convert_array_2(arr):\n res = []\n for i in arr:\n res.append(i.item())\n return res\n\ndef convert_array(user, nfps):\n user_arr = []\n shots_arr = []\n for b in user:\n user_arr.append(1 if b else 0)\n shots_arr.append(nfps[0].item())\n for i in range(1, len(nfps)):\n shots_arr.append(shots_arr[i-1] + nfps[i].item())\n return user_arr, shots_arr\n\ndef get_file_name(name):\n arr = re.split(\"[\\\\/]\", name)\n print(arr)\n return arr[-1]\n\n\ndef main():\n args = init_helper.get_arguments()\n\n init_helper.init_logger(args.model_dir, args.log_file)\n init_helper.set_random_seed(args.seed)\n\n logger.info(vars(args))\n model = get_model(args.model, **vars(args))\n model = model.eval().to(args.device)\n f = []\n for split_path in args.splits:\n split_path = Path(split_path)\n splits = data_helper.load_yaml(split_path)\n\n stats = data_helper.AverageMeter('fscore', 'diversity')\n\n for split_idx, split in enumerate(splits):\n ckpt_path = data_helper.get_ckpt_path(args.model_dir, split_path, split_idx)\n state_dict = torch.load(str(ckpt_path),\n map_location=lambda storage, loc: storage)\n model.load_state_dict(state_dict)\n\n val_set = data_helper.VideoDataset(split['test_keys'])\n val_loader = data_helper.DataLoader(val_set, shuffle=False)\n fscore, diversity, json_file = evaluate(model, val_loader, args.nms_thresh, args.device)\n f += json_file\n stats.update(fscore=fscore, diversity=diversity)\n\n logger.info(f'{split_path.stem} split {split_idx}: diversity: '\n f'{diversity:.4f}, F-score: {fscore:.4f}')\n\n logger.info(f'{split_path.stem}: diversity: {stats.diversity:.4f}, '\n f'F-score: {stats.fscore:.4f}')\n # with open('aftvsum.json', 'w') as fout:\n # json.dump(f, fout)\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
4,
5,
7,
8,
10
]
}
|
[
4,
5,
7,
8,
10
] |
# Author: Sam Erickson
# Date: 2/23/2016
#
# Program Description: This program gives the integer coefficients x,y to the
# equation ax+by=gcd(a,b) given by the extended Euclidean Algorithm.
def extendedEuclid(a,b):
"""
Preconditions - a and b are both positive integers.
Posconditions - The equation for ax+by=gcd(a,b) has been returned where
x and y are solved.
Input - a : int, b : int
Output - ax+by=gcd(a,b) : string
"""
b,a=max(a,b),min(a,b)
# Format of euclidList is for back-substitution
euclidList=[[b%a,1,b,-1*(b//a),a]]
while b%a>0:
b,a=a,b%a
euclidList.append([b%a,1,b,-1*(b//a),a])
if len(euclidList)>1:
euclidList.pop()
euclidList=euclidList[::-1]
for i in range(1,len(euclidList)):
euclidList[i][1]*=euclidList[i-1][3]
euclidList[i][3]*=euclidList[i-1][3]
euclidList[i][3]+=euclidList[i-1][1]
expr=euclidList[len(euclidList)-1]
strExpr=str(expr[1])+"*"+str(expr[2])+" + "+str(expr[3])+"*"+str(expr[4]) \
+" = "+str(euclidList[0][0])
return strExpr
|
normal
|
{
"blob_id": "36e5b0f40b8016f39120f839766db0ac518c9bed",
"index": 4712,
"step-1": "<mask token>\n",
"step-2": "def extendedEuclid(a, b):\n \"\"\"\n Preconditions - a and b are both positive integers.\n Posconditions - The equation for ax+by=gcd(a,b) has been returned where\n x and y are solved.\n Input - a : int, b : int\n Output - ax+by=gcd(a,b) : string\n \"\"\"\n b, a = max(a, b), min(a, b)\n euclidList = [[b % a, 1, b, -1 * (b // a), a]]\n while b % a > 0:\n b, a = a, b % a\n euclidList.append([b % a, 1, b, -1 * (b // a), a])\n if len(euclidList) > 1:\n euclidList.pop()\n euclidList = euclidList[::-1]\n for i in range(1, len(euclidList)):\n euclidList[i][1] *= euclidList[i - 1][3]\n euclidList[i][3] *= euclidList[i - 1][3]\n euclidList[i][3] += euclidList[i - 1][1]\n expr = euclidList[len(euclidList) - 1]\n strExpr = str(expr[1]) + '*' + str(expr[2]) + ' + ' + str(expr[3]\n ) + '*' + str(expr[4]) + ' = ' + str(euclidList[0][0])\n return strExpr\n",
"step-3": "# Author: Sam Erickson\n# Date: 2/23/2016\n#\n# Program Description: This program gives the integer coefficients x,y to the\n# equation ax+by=gcd(a,b) given by the extended Euclidean Algorithm. \n\ndef extendedEuclid(a,b):\n \"\"\"\n Preconditions - a and b are both positive integers.\n Posconditions - The equation for ax+by=gcd(a,b) has been returned where\n x and y are solved.\n Input - a : int, b : int\n Output - ax+by=gcd(a,b) : string\n \"\"\"\n b,a=max(a,b),min(a,b)\n # Format of euclidList is for back-substitution\n euclidList=[[b%a,1,b,-1*(b//a),a]]\n while b%a>0:\n b,a=a,b%a \n euclidList.append([b%a,1,b,-1*(b//a),a])\n if len(euclidList)>1:\n euclidList.pop()\n euclidList=euclidList[::-1]\n for i in range(1,len(euclidList)):\n euclidList[i][1]*=euclidList[i-1][3]\n euclidList[i][3]*=euclidList[i-1][3]\n euclidList[i][3]+=euclidList[i-1][1]\n \n expr=euclidList[len(euclidList)-1]\n strExpr=str(expr[1])+\"*\"+str(expr[2])+\" + \"+str(expr[3])+\"*\"+str(expr[4]) \\\n +\" = \"+str(euclidList[0][0])\n return strExpr\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
"""Labeled entry widget.
The goal of these widgets is twofold: to make it easier for developers
to implement dialogs with compound widgets, and to naturally
standardize the user interface presented to the user.
"""
import logging
import seamm_widgets as sw
import tkinter as tk
import tkinter.ttk as ttk
logger = logging.getLogger(__name__)
options = {
"entry": {
"class_": "class_",
"cursor": "cursor",
"exportselection": "exportselection",
"font": "font",
"invalidcommand": "invalidcommand",
"justify": "justify",
"show": "show",
"style": "style",
"takefocus": "takefocus",
"variable": "textvariable",
"validate": "validate",
"validatecommand": "validatecommand",
"width": "width",
"xscrollcommand": "xscrollcommand",
},
}
class LabeledEntry(sw.LabeledWidget):
def __init__(self, parent, *args, **kwargs):
"""Initialize the instance"""
class_ = kwargs.pop("class_", "MLabeledEntry")
super().__init__(parent, class_=class_)
interior = self.interior
# entry
justify = kwargs.pop("justify", tk.LEFT)
entrywidth = kwargs.pop("width", 15)
self.entry = ttk.Entry(interior, justify=justify, width=entrywidth)
self.entry.grid(row=0, column=0, sticky=tk.EW)
# interior frame
self.interior = ttk.Frame(interior)
self.interior.grid(row=0, column=1, sticky=tk.NSEW)
interior.columnconfigure(0, weight=1)
self.config(**kwargs)
@property
def value(self):
return self.get()
@value.setter
def value(self, value):
self.set(value)
def show(self, *args):
"""Show only the specified subwidgets.
'all' or no arguments reverts to showing all"""
super().show(*args)
show_all = len(args) == 0 or args[0] == "all"
if show_all or "entry" in args:
self.entry.grid(row=0, column=0, sticky=tk.EW)
else:
self.entry.grid_forget()
def set(self, value):
"""Set the value of the entry widget"""
self.entry.delete(0, tk.END)
if value is None:
return
self.entry.insert(0, value)
def get(self):
"""return the current value"""
value = self.entry.get()
return value
def config(self, **kwargs):
"""Set the configuration of the megawidget"""
# our options that we deal with
entry = options["entry"]
# cannot modify kwargs while iterating over it...
keys = [*kwargs.keys()]
for k in keys:
if k in entry:
v = kwargs.pop(k)
self.entry.config(**{entry[k]: v})
# having removed our options, pass rest to parent
super().config(**kwargs)
|
normal
|
{
"blob_id": "111186f1d45b9cf3bf9065c7fa83a8f3f796bbe1",
"index": 5841,
"step-1": "<mask token>\n\n\nclass LabeledEntry(sw.LabeledWidget):\n <mask token>\n\n @property\n def value(self):\n return self.get()\n <mask token>\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n super().show(*args)\n show_all = len(args) == 0 or args[0] == 'all'\n if show_all or 'entry' in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n <mask token>\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n entry = options['entry']\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n super().config(**kwargs)\n",
"step-2": "<mask token>\n\n\nclass LabeledEntry(sw.LabeledWidget):\n <mask token>\n\n @property\n def value(self):\n return self.get()\n <mask token>\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n super().show(*args)\n show_all = len(args) == 0 or args[0] == 'all'\n if show_all or 'entry' in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n\n def set(self, value):\n \"\"\"Set the value of the entry widget\"\"\"\n self.entry.delete(0, tk.END)\n if value is None:\n return\n self.entry.insert(0, value)\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n entry = options['entry']\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n super().config(**kwargs)\n",
"step-3": "<mask token>\n\n\nclass LabeledEntry(sw.LabeledWidget):\n\n def __init__(self, parent, *args, **kwargs):\n \"\"\"Initialize the instance\"\"\"\n class_ = kwargs.pop('class_', 'MLabeledEntry')\n super().__init__(parent, class_=class_)\n interior = self.interior\n justify = kwargs.pop('justify', tk.LEFT)\n entrywidth = kwargs.pop('width', 15)\n self.entry = ttk.Entry(interior, justify=justify, width=entrywidth)\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n self.interior = ttk.Frame(interior)\n self.interior.grid(row=0, column=1, sticky=tk.NSEW)\n interior.columnconfigure(0, weight=1)\n self.config(**kwargs)\n\n @property\n def value(self):\n return self.get()\n\n @value.setter\n def value(self, value):\n self.set(value)\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n super().show(*args)\n show_all = len(args) == 0 or args[0] == 'all'\n if show_all or 'entry' in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n\n def set(self, value):\n \"\"\"Set the value of the entry widget\"\"\"\n self.entry.delete(0, tk.END)\n if value is None:\n return\n self.entry.insert(0, value)\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n entry = options['entry']\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n super().config(**kwargs)\n",
"step-4": "<mask token>\nlogger = logging.getLogger(__name__)\noptions = {'entry': {'class_': 'class_', 'cursor': 'cursor',\n 'exportselection': 'exportselection', 'font': 'font', 'invalidcommand':\n 'invalidcommand', 'justify': 'justify', 'show': 'show', 'style':\n 'style', 'takefocus': 'takefocus', 'variable': 'textvariable',\n 'validate': 'validate', 'validatecommand': 'validatecommand', 'width':\n 'width', 'xscrollcommand': 'xscrollcommand'}}\n\n\nclass LabeledEntry(sw.LabeledWidget):\n\n def __init__(self, parent, *args, **kwargs):\n \"\"\"Initialize the instance\"\"\"\n class_ = kwargs.pop('class_', 'MLabeledEntry')\n super().__init__(parent, class_=class_)\n interior = self.interior\n justify = kwargs.pop('justify', tk.LEFT)\n entrywidth = kwargs.pop('width', 15)\n self.entry = ttk.Entry(interior, justify=justify, width=entrywidth)\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n self.interior = ttk.Frame(interior)\n self.interior.grid(row=0, column=1, sticky=tk.NSEW)\n interior.columnconfigure(0, weight=1)\n self.config(**kwargs)\n\n @property\n def value(self):\n return self.get()\n\n @value.setter\n def value(self, value):\n self.set(value)\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n super().show(*args)\n show_all = len(args) == 0 or args[0] == 'all'\n if show_all or 'entry' in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n\n def set(self, value):\n \"\"\"Set the value of the entry widget\"\"\"\n self.entry.delete(0, tk.END)\n if value is None:\n return\n self.entry.insert(0, value)\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n entry = options['entry']\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n super().config(**kwargs)\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\"\"\"Labeled entry widget.\n\nThe goal of these widgets is twofold: to make it easier for developers\nto implement dialogs with compound widgets, and to naturally\nstandardize the user interface presented to the user.\n\"\"\"\n\nimport logging\nimport seamm_widgets as sw\nimport tkinter as tk\nimport tkinter.ttk as ttk\n\nlogger = logging.getLogger(__name__)\n\noptions = {\n \"entry\": {\n \"class_\": \"class_\",\n \"cursor\": \"cursor\",\n \"exportselection\": \"exportselection\",\n \"font\": \"font\",\n \"invalidcommand\": \"invalidcommand\",\n \"justify\": \"justify\",\n \"show\": \"show\",\n \"style\": \"style\",\n \"takefocus\": \"takefocus\",\n \"variable\": \"textvariable\",\n \"validate\": \"validate\",\n \"validatecommand\": \"validatecommand\",\n \"width\": \"width\",\n \"xscrollcommand\": \"xscrollcommand\",\n },\n}\n\n\nclass LabeledEntry(sw.LabeledWidget):\n def __init__(self, parent, *args, **kwargs):\n \"\"\"Initialize the instance\"\"\"\n class_ = kwargs.pop(\"class_\", \"MLabeledEntry\")\n super().__init__(parent, class_=class_)\n\n interior = self.interior\n\n # entry\n justify = kwargs.pop(\"justify\", tk.LEFT)\n entrywidth = kwargs.pop(\"width\", 15)\n\n self.entry = ttk.Entry(interior, justify=justify, width=entrywidth)\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n\n # interior frame\n self.interior = ttk.Frame(interior)\n self.interior.grid(row=0, column=1, sticky=tk.NSEW)\n\n interior.columnconfigure(0, weight=1)\n\n self.config(**kwargs)\n\n @property\n def value(self):\n return self.get()\n\n @value.setter\n def value(self, value):\n self.set(value)\n\n def show(self, *args):\n \"\"\"Show only the specified subwidgets.\n 'all' or no arguments reverts to showing all\"\"\"\n\n super().show(*args)\n\n show_all = len(args) == 0 or args[0] == \"all\"\n\n if show_all or \"entry\" in args:\n self.entry.grid(row=0, column=0, sticky=tk.EW)\n else:\n self.entry.grid_forget()\n\n def set(self, value):\n \"\"\"Set the value of the entry widget\"\"\"\n\n self.entry.delete(0, tk.END)\n if value is None:\n return\n\n self.entry.insert(0, value)\n\n def get(self):\n \"\"\"return the current value\"\"\"\n value = self.entry.get()\n return value\n\n def config(self, **kwargs):\n \"\"\"Set the configuration of the megawidget\"\"\"\n\n # our options that we deal with\n entry = options[\"entry\"]\n\n # cannot modify kwargs while iterating over it...\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n\n # having removed our options, pass rest to parent\n super().config(**kwargs)\n",
"step-ids": [
5,
6,
8,
9,
11
]
}
|
[
5,
6,
8,
9,
11
] |
from __future__ import print_function
import os
import shutil
import pymake
import flopy
# set up paths
dstpth = os.path.join('temp')
if not os.path.exists(dstpth):
os.makedirs(dstpth)
mp6pth = os.path.join(dstpth, 'Modpath_7_1_000')
expth = os.path.join(mp6pth, 'examples')
exe_name = 'mp7'
srcpth = os.path.join(mp6pth, 'source')
target = os.path.join(dstpth, exe_name)
def compile_code():
# Remove the existing modpath6 directory if it exists
if os.path.isdir(mp6pth):
shutil.rmtree(mp6pth)
# Download the MODFLOW-2005 distribution
url = "https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip"
pymake.download_and_unzip(url, pth=dstpth)
# modify source files that prevent compiling with gfortran
pth = os.path.join(srcpth, 'utl7u1.f')
if os.path.isfile(pth):
os.remove(pth)
fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace('location.', 'location%')
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fname1 = os.path.join(srcpth, 'ModpathCellData.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')
line = line.replace('dimension(grid%GetReducedConnectionCount())',
'dimension(:)')
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
fname1 = os.path.join(srcpth, 'MPath7.f90')
f = open(fname1, 'r')
fname2 = os.path.join(srcpth, 'MPath7_mod.f90')
f2 = open(fname2, 'w')
for line in f:
line = line.replace("form='binary', access='stream'",
"form='unformatted', access='stream'")
f2.write(line)
f.close()
f2.close()
os.remove(fname1)
os.rename(fname2, fname1)
# allow line lengths greater than 132 columns
fflags = 'ffree-line-length-512'
# make modpath 7
pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True,
expedite=False, dryrun=False, double=False, debug=False,
fflags=fflags)
assert os.path.isfile(target), 'Target does not exist.'
def get_simfiles():
dirs = [name for name in os.listdir(expth) if
os.path.isdir(os.path.join(expth, name))]
simfiles = []
for d in dirs:
pth = os.path.join(expth, d, 'original')
simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if
f.endswith('.mpsim')]
return simfiles
def replace_files():
dirs = [name for name in os.listdir(expth) if
os.path.isdir(os.path.join(expth, name))]
# rename a few files for linux
replace_files = ['example_1.BUD', 'Zones_layer_3.txt',
'Retardation_layer_1.txt']
for d in dirs:
pth = os.path.join(expth, d, 'original')
for rf in replace_files:
fname1 = os.path.join(pth, rf)
if rf in os.listdir(pth):
fname2 = os.path.join(pth, 'temp')
print('copy {} to {}'.format(os.path.basename(fname1),
os.path.basename(fname2)))
shutil.copy(fname1, fname2)
print('deleting {}'.format(os.path.basename(fname1)))
os.remove(fname1)
fname1 = os.path.join(pth, rf.lower())
print('rename {} to {}'.format(os.path.basename(fname2),
os.path.basename(fname1)))
os.rename(fname2, fname1)
def run_modpath7(fn):
# run the model
print('running model...{}'.format(fn))
exe = os.path.abspath(target)
fpth = os.path.basename(fn)
model_ws = os.path.dirname(fn)
success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)
assert success, 'could not run...{}'.format(os.path.basename(fn))
return
def clean_up():
# clean up
print('Removing folder ' + mp6pth)
shutil.rmtree(mp6pth)
print('Removing ' + target)
os.remove(target)
return
def test_compile():
# compile MODPATH 7
compile_code()
def test_modpath7():
simfiles = get_simfiles()
replace_files()
for fn in simfiles:
yield run_modpath7, fn
def test_clean_up():
yield clean_up
if __name__ == "__main__":
compile_code()
simfiles = get_simfiles()
replace_files()
for fn in simfiles:
run_modpath7(fn)
clean_up()
|
normal
|
{
"blob_id": "ddaba7a8b53072da36224dd4618696ebf0e9a4e4",
"index": 1015,
"step-1": "<mask token>\n\n\ndef compile_code():\n if os.path.isdir(mp6pth):\n shutil.rmtree(mp6pth)\n url = 'https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip'\n pymake.download_and_unzip(url, pth=dstpth)\n pth = os.path.join(srcpth, 'utl7u1.f')\n if os.path.isfile(pth):\n os.remove(pth)\n fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('location.', 'location%')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'ModpathCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')\n line = line.replace('dimension(grid%GetReducedConnectionCount())',\n 'dimension(:)')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'MPath7.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'MPath7_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace(\"form='binary', access='stream'\",\n \"form='unformatted', access='stream'\")\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fflags = 'ffree-line-length-512'\n pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True, expedite\n =False, dryrun=False, double=False, debug=False, fflags=fflags)\n assert os.path.isfile(target), 'Target does not exist.'\n\n\n<mask token>\n\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1), os.\n path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2), os\n .path.basename(fname1)))\n os.rename(fname2, fname1)\n\n\ndef run_modpath7(fn):\n print('running model...{}'.format(fn))\n exe = os.path.abspath(target)\n fpth = os.path.basename(fn)\n model_ws = os.path.dirname(fn)\n success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)\n assert success, 'could not run...{}'.format(os.path.basename(fn))\n return\n\n\ndef clean_up():\n print('Removing folder ' + mp6pth)\n shutil.rmtree(mp6pth)\n print('Removing ' + target)\n os.remove(target)\n return\n\n\ndef test_compile():\n compile_code()\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef compile_code():\n if os.path.isdir(mp6pth):\n shutil.rmtree(mp6pth)\n url = 'https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip'\n pymake.download_and_unzip(url, pth=dstpth)\n pth = os.path.join(srcpth, 'utl7u1.f')\n if os.path.isfile(pth):\n os.remove(pth)\n fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('location.', 'location%')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'ModpathCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')\n line = line.replace('dimension(grid%GetReducedConnectionCount())',\n 'dimension(:)')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'MPath7.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'MPath7_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace(\"form='binary', access='stream'\",\n \"form='unformatted', access='stream'\")\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fflags = 'ffree-line-length-512'\n pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True, expedite\n =False, dryrun=False, double=False, debug=False, fflags=fflags)\n assert os.path.isfile(target), 'Target does not exist.'\n\n\ndef get_simfiles():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n simfiles = []\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if f.\n endswith('.mpsim')]\n return simfiles\n\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1), os.\n path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2), os\n .path.basename(fname1)))\n os.rename(fname2, fname1)\n\n\ndef run_modpath7(fn):\n print('running model...{}'.format(fn))\n exe = os.path.abspath(target)\n fpth = os.path.basename(fn)\n model_ws = os.path.dirname(fn)\n success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)\n assert success, 'could not run...{}'.format(os.path.basename(fn))\n return\n\n\ndef clean_up():\n print('Removing folder ' + mp6pth)\n shutil.rmtree(mp6pth)\n print('Removing ' + target)\n os.remove(target)\n return\n\n\ndef test_compile():\n compile_code()\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\n<mask token>\n",
"step-3": "<mask token>\nif not os.path.exists(dstpth):\n os.makedirs(dstpth)\n<mask token>\n\n\ndef compile_code():\n if os.path.isdir(mp6pth):\n shutil.rmtree(mp6pth)\n url = 'https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip'\n pymake.download_and_unzip(url, pth=dstpth)\n pth = os.path.join(srcpth, 'utl7u1.f')\n if os.path.isfile(pth):\n os.remove(pth)\n fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('location.', 'location%')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'ModpathCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')\n line = line.replace('dimension(grid%GetReducedConnectionCount())',\n 'dimension(:)')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'MPath7.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'MPath7_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace(\"form='binary', access='stream'\",\n \"form='unformatted', access='stream'\")\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fflags = 'ffree-line-length-512'\n pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True, expedite\n =False, dryrun=False, double=False, debug=False, fflags=fflags)\n assert os.path.isfile(target), 'Target does not exist.'\n\n\ndef get_simfiles():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n simfiles = []\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if f.\n endswith('.mpsim')]\n return simfiles\n\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1), os.\n path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2), os\n .path.basename(fname1)))\n os.rename(fname2, fname1)\n\n\ndef run_modpath7(fn):\n print('running model...{}'.format(fn))\n exe = os.path.abspath(target)\n fpth = os.path.basename(fn)\n model_ws = os.path.dirname(fn)\n success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)\n assert success, 'could not run...{}'.format(os.path.basename(fn))\n return\n\n\ndef clean_up():\n print('Removing folder ' + mp6pth)\n shutil.rmtree(mp6pth)\n print('Removing ' + target)\n os.remove(target)\n return\n\n\ndef test_compile():\n compile_code()\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\nif __name__ == '__main__':\n compile_code()\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n run_modpath7(fn)\n clean_up()\n",
"step-4": "from __future__ import print_function\nimport os\nimport shutil\nimport pymake\nimport flopy\ndstpth = os.path.join('temp')\nif not os.path.exists(dstpth):\n os.makedirs(dstpth)\nmp6pth = os.path.join(dstpth, 'Modpath_7_1_000')\nexpth = os.path.join(mp6pth, 'examples')\nexe_name = 'mp7'\nsrcpth = os.path.join(mp6pth, 'source')\ntarget = os.path.join(dstpth, exe_name)\n\n\ndef compile_code():\n if os.path.isdir(mp6pth):\n shutil.rmtree(mp6pth)\n url = 'https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip'\n pymake.download_and_unzip(url, pth=dstpth)\n pth = os.path.join(srcpth, 'utl7u1.f')\n if os.path.isfile(pth):\n os.remove(pth)\n fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('location.', 'location%')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'ModpathCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')\n line = line.replace('dimension(grid%GetReducedConnectionCount())',\n 'dimension(:)')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fname1 = os.path.join(srcpth, 'MPath7.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'MPath7_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace(\"form='binary', access='stream'\",\n \"form='unformatted', access='stream'\")\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n fflags = 'ffree-line-length-512'\n pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True, expedite\n =False, dryrun=False, double=False, debug=False, fflags=fflags)\n assert os.path.isfile(target), 'Target does not exist.'\n\n\ndef get_simfiles():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n simfiles = []\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if f.\n endswith('.mpsim')]\n return simfiles\n\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if os.path.isdir(os.path.\n join(expth, name))]\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1), os.\n path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2), os\n .path.basename(fname1)))\n os.rename(fname2, fname1)\n\n\ndef run_modpath7(fn):\n print('running model...{}'.format(fn))\n exe = os.path.abspath(target)\n fpth = os.path.basename(fn)\n model_ws = os.path.dirname(fn)\n success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)\n assert success, 'could not run...{}'.format(os.path.basename(fn))\n return\n\n\ndef clean_up():\n print('Removing folder ' + mp6pth)\n shutil.rmtree(mp6pth)\n print('Removing ' + target)\n os.remove(target)\n return\n\n\ndef test_compile():\n compile_code()\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\nif __name__ == '__main__':\n compile_code()\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n run_modpath7(fn)\n clean_up()\n",
"step-5": "from __future__ import print_function\nimport os\nimport shutil\nimport pymake\nimport flopy\n\n# set up paths\ndstpth = os.path.join('temp')\nif not os.path.exists(dstpth):\n os.makedirs(dstpth)\nmp6pth = os.path.join(dstpth, 'Modpath_7_1_000')\nexpth = os.path.join(mp6pth, 'examples')\n\nexe_name = 'mp7'\nsrcpth = os.path.join(mp6pth, 'source')\ntarget = os.path.join(dstpth, exe_name)\n\n\ndef compile_code():\n # Remove the existing modpath6 directory if it exists\n if os.path.isdir(mp6pth):\n shutil.rmtree(mp6pth)\n\n # Download the MODFLOW-2005 distribution\n url = \"https://water.usgs.gov/ogw/modpath/Modpath_7_1_000.zip\"\n pymake.download_and_unzip(url, pth=dstpth)\n\n # modify source files that prevent compiling with gfortran\n pth = os.path.join(srcpth, 'utl7u1.f')\n if os.path.isfile(pth):\n os.remove(pth)\n\n fname1 = os.path.join(srcpth, 'ModpathSubCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathSubCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('location.', 'location%')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n\n fname1 = os.path.join(srcpth, 'ModpathCellData.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'ModpathCellData_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace('dimension(grid%GetCellCount())', 'dimension(:)')\n line = line.replace('dimension(grid%GetReducedConnectionCount())',\n 'dimension(:)')\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n\n fname1 = os.path.join(srcpth, 'MPath7.f90')\n f = open(fname1, 'r')\n fname2 = os.path.join(srcpth, 'MPath7_mod.f90')\n f2 = open(fname2, 'w')\n for line in f:\n line = line.replace(\"form='binary', access='stream'\",\n \"form='unformatted', access='stream'\")\n f2.write(line)\n f.close()\n f2.close()\n os.remove(fname1)\n os.rename(fname2, fname1)\n\n # allow line lengths greater than 132 columns\n fflags = 'ffree-line-length-512'\n\n # make modpath 7\n pymake.main(srcpth, target, 'gfortran', 'gcc', makeclean=True,\n expedite=False, dryrun=False, double=False, debug=False,\n fflags=fflags)\n\n assert os.path.isfile(target), 'Target does not exist.'\n\n\ndef get_simfiles():\n dirs = [name for name in os.listdir(expth) if\n os.path.isdir(os.path.join(expth, name))]\n simfiles = []\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n simfiles += [os.path.join(pth, f) for f in os.listdir(pth) if\n f.endswith('.mpsim')]\n return simfiles\n\ndef replace_files():\n dirs = [name for name in os.listdir(expth) if\n os.path.isdir(os.path.join(expth, name))]\n # rename a few files for linux\n replace_files = ['example_1.BUD', 'Zones_layer_3.txt',\n 'Retardation_layer_1.txt']\n for d in dirs:\n pth = os.path.join(expth, d, 'original')\n for rf in replace_files:\n fname1 = os.path.join(pth, rf)\n if rf in os.listdir(pth):\n fname2 = os.path.join(pth, 'temp')\n print('copy {} to {}'.format(os.path.basename(fname1),\n os.path.basename(fname2)))\n shutil.copy(fname1, fname2)\n print('deleting {}'.format(os.path.basename(fname1)))\n os.remove(fname1)\n fname1 = os.path.join(pth, rf.lower())\n print('rename {} to {}'.format(os.path.basename(fname2),\n os.path.basename(fname1)))\n os.rename(fname2, fname1)\n\ndef run_modpath7(fn):\n # run the model\n print('running model...{}'.format(fn))\n exe = os.path.abspath(target)\n fpth = os.path.basename(fn)\n model_ws = os.path.dirname(fn)\n success, buff = flopy.run_model(exe, fpth, model_ws=model_ws, silent=False)\n assert success, 'could not run...{}'.format(os.path.basename(fn))\n return\n\n\ndef clean_up():\n # clean up\n print('Removing folder ' + mp6pth)\n shutil.rmtree(mp6pth)\n print('Removing ' + target)\n os.remove(target)\n return\n\n\ndef test_compile():\n # compile MODPATH 7\n compile_code()\n\n\ndef test_modpath7():\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n yield run_modpath7, fn\n\n\ndef test_clean_up():\n yield clean_up\n\n\nif __name__ == \"__main__\":\n compile_code()\n simfiles = get_simfiles()\n replace_files()\n for fn in simfiles:\n run_modpath7(fn)\n clean_up()\n",
"step-ids": [
7,
8,
9,
11,
12
]
}
|
[
7,
8,
9,
11,
12
] |
# Generated by Django 3.0.10 on 2020-12-19 15:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("wagtailadmin", "0001_create_admin_access_permissions"),
]
operations = [
migrations.CreateModel(
name="Admin",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
],
options={
"permissions": [("access_admin", "Can access Wagtail admin")],
"managed": False,
"default_permissions": [],
},
),
]
|
normal
|
{
"blob_id": "52a4213a1729e25f96faebc5fd4f299017446c5a",
"index": 6370,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('wagtailadmin', '0001_create_admin_access_permissions')]\n operations = [migrations.CreateModel(name='Admin', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID'))], options={'permissions': [(\n 'access_admin', 'Can access Wagtail admin')], 'managed': False,\n 'default_permissions': []})]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('wagtailadmin', '0001_create_admin_access_permissions')]\n operations = [migrations.CreateModel(name='Admin', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID'))], options={'permissions': [(\n 'access_admin', 'Can access Wagtail admin')], 'managed': False,\n 'default_permissions': []})]\n",
"step-5": "# Generated by Django 3.0.10 on 2020-12-19 15:07\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n (\"wagtailadmin\", \"0001_create_admin_access_permissions\"),\n ]\n\n operations = [\n migrations.CreateModel(\n name=\"Admin\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n ],\n options={\n \"permissions\": [(\"access_admin\", \"Can access Wagtail admin\")],\n \"managed\": False,\n \"default_permissions\": [],\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.urls import reverse
from django.utils.translation import get_language
from drf_dynamic_fields import DynamicFieldsMixin
from geotrek.api.v2.serializers import AttachmentSerializer
from mapentity.serializers import MapentityGeojsonModelSerializer
from rest_framework import serializers as rest_serializers
from rest_framework_gis import fields as rest_gis_fields
from rest_framework_gis.serializers import GeoFeatureModelSerializer
from geotrek.common.serializers import PictogramSerializerMixin, TranslatedModelSerializer
from . import models as sensitivity_models
class RuleSerializer(PictogramSerializerMixin, rest_serializers.ModelSerializer):
class Meta:
model = sensitivity_models.Rule
fields = ('id', 'code', 'name', 'pictogram', 'description', 'url')
class SportPracticeSerializer(TranslatedModelSerializer):
class Meta:
model = sensitivity_models.SportPractice
fields = ('id', 'name')
class SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):
practices = SportPracticeSerializer(many=True)
period = rest_serializers.SerializerMethodField()
def get_period(self, obj):
return [getattr(obj, 'period{:02}'.format(p)) for p in range(1, 13)]
class Meta:
model = sensitivity_models.Species
fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']
class SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.ModelSerializer):
category = rest_serializers.CharField(source='category_display')
structure = rest_serializers.SlugRelatedField('name', read_only=True)
species = rest_serializers.CharField(source='species_display')
class Meta:
model = sensitivity_models.SensitiveArea
fields = "__all__"
class SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):
radius = rest_serializers.IntegerField()
class Meta(MapentityGeojsonModelSerializer.Meta):
model = sensitivity_models.SensitiveArea
fields = ['id', 'species', 'radius', 'published']
class SensitiveAreaAPISerializer(TranslatedModelSerializer):
species = SpeciesSerializer()
kml_url = rest_serializers.SerializerMethodField()
attachments = AttachmentSerializer(many=True)
rules = RuleSerializer(many=True)
def get_kml_url(self, obj):
return reverse('sensitivity:sensitivearea_kml_detail', kwargs={'lang': get_language(), 'pk': obj.pk})
class Meta:
model = sensitivity_models.SensitiveArea
fields = ('id', 'species', 'description', 'contact', 'published', 'publication_date', 'kml_url', 'attachments', 'rules')
class SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer, SensitiveAreaAPISerializer):
# Annotated geom field with API_SRID
geom2d_transformed = rest_gis_fields.GeometryField(read_only=True, precision=7)
class Meta(SensitiveAreaAPISerializer.Meta):
geo_field = 'geom2d_transformed'
fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed', )
|
normal
|
{
"blob_id": "dfd5915428dc8f15fb61c5d81f22dfecfe29af15",
"index": 6409,
"step-1": "<mask token>\n\n\nclass SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = sensitivity_models.Species\n fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']\n\n\nclass SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.\n ModelSerializer):\n category = rest_serializers.CharField(source='category_display')\n structure = rest_serializers.SlugRelatedField('name', read_only=True)\n species = rest_serializers.CharField(source='species_display')\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = '__all__'\n\n\nclass SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):\n radius = rest_serializers.IntegerField()\n\n\n class Meta(MapentityGeojsonModelSerializer.Meta):\n model = sensitivity_models.SensitiveArea\n fields = ['id', 'species', 'radius', 'published']\n\n\nclass SensitiveAreaAPISerializer(TranslatedModelSerializer):\n species = SpeciesSerializer()\n kml_url = rest_serializers.SerializerMethodField()\n attachments = AttachmentSerializer(many=True)\n rules = RuleSerializer(many=True)\n\n def get_kml_url(self, obj):\n return reverse('sensitivity:sensitivearea_kml_detail', kwargs={\n 'lang': get_language(), 'pk': obj.pk})\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = ('id', 'species', 'description', 'contact', 'published',\n 'publication_date', 'kml_url', 'attachments', 'rules')\n\n\nclass SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer,\n SensitiveAreaAPISerializer):\n geom2d_transformed = rest_gis_fields.GeometryField(read_only=True,\n precision=7)\n\n\n class Meta(SensitiveAreaAPISerializer.Meta):\n geo_field = 'geom2d_transformed'\n fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed'\n ,)\n",
"step-2": "<mask token>\n\n\nclass SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):\n <mask token>\n <mask token>\n\n def get_period(self, obj):\n return [getattr(obj, 'period{:02}'.format(p)) for p in range(1, 13)]\n\n\n class Meta:\n model = sensitivity_models.Species\n fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']\n\n\nclass SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.\n ModelSerializer):\n category = rest_serializers.CharField(source='category_display')\n structure = rest_serializers.SlugRelatedField('name', read_only=True)\n species = rest_serializers.CharField(source='species_display')\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = '__all__'\n\n\nclass SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):\n radius = rest_serializers.IntegerField()\n\n\n class Meta(MapentityGeojsonModelSerializer.Meta):\n model = sensitivity_models.SensitiveArea\n fields = ['id', 'species', 'radius', 'published']\n\n\nclass SensitiveAreaAPISerializer(TranslatedModelSerializer):\n species = SpeciesSerializer()\n kml_url = rest_serializers.SerializerMethodField()\n attachments = AttachmentSerializer(many=True)\n rules = RuleSerializer(many=True)\n\n def get_kml_url(self, obj):\n return reverse('sensitivity:sensitivearea_kml_detail', kwargs={\n 'lang': get_language(), 'pk': obj.pk})\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = ('id', 'species', 'description', 'contact', 'published',\n 'publication_date', 'kml_url', 'attachments', 'rules')\n\n\nclass SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer,\n SensitiveAreaAPISerializer):\n geom2d_transformed = rest_gis_fields.GeometryField(read_only=True,\n precision=7)\n\n\n class Meta(SensitiveAreaAPISerializer.Meta):\n geo_field = 'geom2d_transformed'\n fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed'\n ,)\n",
"step-3": "<mask token>\n\n\nclass SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):\n practices = SportPracticeSerializer(many=True)\n period = rest_serializers.SerializerMethodField()\n\n def get_period(self, obj):\n return [getattr(obj, 'period{:02}'.format(p)) for p in range(1, 13)]\n\n\n class Meta:\n model = sensitivity_models.Species\n fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']\n\n\nclass SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.\n ModelSerializer):\n category = rest_serializers.CharField(source='category_display')\n structure = rest_serializers.SlugRelatedField('name', read_only=True)\n species = rest_serializers.CharField(source='species_display')\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = '__all__'\n\n\nclass SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):\n radius = rest_serializers.IntegerField()\n\n\n class Meta(MapentityGeojsonModelSerializer.Meta):\n model = sensitivity_models.SensitiveArea\n fields = ['id', 'species', 'radius', 'published']\n\n\nclass SensitiveAreaAPISerializer(TranslatedModelSerializer):\n species = SpeciesSerializer()\n kml_url = rest_serializers.SerializerMethodField()\n attachments = AttachmentSerializer(many=True)\n rules = RuleSerializer(many=True)\n\n def get_kml_url(self, obj):\n return reverse('sensitivity:sensitivearea_kml_detail', kwargs={\n 'lang': get_language(), 'pk': obj.pk})\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = ('id', 'species', 'description', 'contact', 'published',\n 'publication_date', 'kml_url', 'attachments', 'rules')\n\n\nclass SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer,\n SensitiveAreaAPISerializer):\n geom2d_transformed = rest_gis_fields.GeometryField(read_only=True,\n precision=7)\n\n\n class Meta(SensitiveAreaAPISerializer.Meta):\n geo_field = 'geom2d_transformed'\n fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed'\n ,)\n",
"step-4": "<mask token>\n\n\nclass SportPracticeSerializer(TranslatedModelSerializer):\n\n\n class Meta:\n model = sensitivity_models.SportPractice\n fields = 'id', 'name'\n\n\nclass SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):\n practices = SportPracticeSerializer(many=True)\n period = rest_serializers.SerializerMethodField()\n\n def get_period(self, obj):\n return [getattr(obj, 'period{:02}'.format(p)) for p in range(1, 13)]\n\n\n class Meta:\n model = sensitivity_models.Species\n fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']\n\n\nclass SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.\n ModelSerializer):\n category = rest_serializers.CharField(source='category_display')\n structure = rest_serializers.SlugRelatedField('name', read_only=True)\n species = rest_serializers.CharField(source='species_display')\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = '__all__'\n\n\nclass SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):\n radius = rest_serializers.IntegerField()\n\n\n class Meta(MapentityGeojsonModelSerializer.Meta):\n model = sensitivity_models.SensitiveArea\n fields = ['id', 'species', 'radius', 'published']\n\n\nclass SensitiveAreaAPISerializer(TranslatedModelSerializer):\n species = SpeciesSerializer()\n kml_url = rest_serializers.SerializerMethodField()\n attachments = AttachmentSerializer(many=True)\n rules = RuleSerializer(many=True)\n\n def get_kml_url(self, obj):\n return reverse('sensitivity:sensitivearea_kml_detail', kwargs={\n 'lang': get_language(), 'pk': obj.pk})\n\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = ('id', 'species', 'description', 'contact', 'published',\n 'publication_date', 'kml_url', 'attachments', 'rules')\n\n\nclass SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer,\n SensitiveAreaAPISerializer):\n geom2d_transformed = rest_gis_fields.GeometryField(read_only=True,\n precision=7)\n\n\n class Meta(SensitiveAreaAPISerializer.Meta):\n geo_field = 'geom2d_transformed'\n fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed'\n ,)\n",
"step-5": "from django.urls import reverse\nfrom django.utils.translation import get_language\nfrom drf_dynamic_fields import DynamicFieldsMixin\nfrom geotrek.api.v2.serializers import AttachmentSerializer\nfrom mapentity.serializers import MapentityGeojsonModelSerializer\nfrom rest_framework import serializers as rest_serializers\nfrom rest_framework_gis import fields as rest_gis_fields\nfrom rest_framework_gis.serializers import GeoFeatureModelSerializer\n\nfrom geotrek.common.serializers import PictogramSerializerMixin, TranslatedModelSerializer\nfrom . import models as sensitivity_models\n\n\nclass RuleSerializer(PictogramSerializerMixin, rest_serializers.ModelSerializer):\n\n class Meta:\n model = sensitivity_models.Rule\n fields = ('id', 'code', 'name', 'pictogram', 'description', 'url')\n\n\nclass SportPracticeSerializer(TranslatedModelSerializer):\n class Meta:\n model = sensitivity_models.SportPractice\n fields = ('id', 'name')\n\n\nclass SpeciesSerializer(TranslatedModelSerializer, PictogramSerializerMixin):\n practices = SportPracticeSerializer(many=True)\n period = rest_serializers.SerializerMethodField()\n\n def get_period(self, obj):\n return [getattr(obj, 'period{:02}'.format(p)) for p in range(1, 13)]\n\n class Meta:\n model = sensitivity_models.Species\n fields = ['id', 'name', 'practices', 'url', 'pictogram', 'period']\n\n\nclass SensitiveAreaSerializer(DynamicFieldsMixin, rest_serializers.ModelSerializer):\n category = rest_serializers.CharField(source='category_display')\n structure = rest_serializers.SlugRelatedField('name', read_only=True)\n species = rest_serializers.CharField(source='species_display')\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = \"__all__\"\n\n\nclass SensitiveAreaGeojsonSerializer(MapentityGeojsonModelSerializer):\n radius = rest_serializers.IntegerField()\n\n class Meta(MapentityGeojsonModelSerializer.Meta):\n model = sensitivity_models.SensitiveArea\n fields = ['id', 'species', 'radius', 'published']\n\n\nclass SensitiveAreaAPISerializer(TranslatedModelSerializer):\n species = SpeciesSerializer()\n kml_url = rest_serializers.SerializerMethodField()\n attachments = AttachmentSerializer(many=True)\n rules = RuleSerializer(many=True)\n\n def get_kml_url(self, obj):\n return reverse('sensitivity:sensitivearea_kml_detail', kwargs={'lang': get_language(), 'pk': obj.pk})\n\n class Meta:\n model = sensitivity_models.SensitiveArea\n fields = ('id', 'species', 'description', 'contact', 'published', 'publication_date', 'kml_url', 'attachments', 'rules')\n\n\nclass SensitiveAreaAPIGeojsonSerializer(GeoFeatureModelSerializer, SensitiveAreaAPISerializer):\n # Annotated geom field with API_SRID\n geom2d_transformed = rest_gis_fields.GeometryField(read_only=True, precision=7)\n\n class Meta(SensitiveAreaAPISerializer.Meta):\n geo_field = 'geom2d_transformed'\n fields = SensitiveAreaAPISerializer.Meta.fields + ('geom2d_transformed', )\n",
"step-ids": [
10,
11,
12,
13,
16
]
}
|
[
10,
11,
12,
13,
16
] |
import os , sys , time
print("""
███████████████████████████████
█ █
█═╬═════════════════════════╬═█
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░Wi-fi Fucker Tool░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░coded by arda6░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █
█═╬═════════════════════════╬═█
█ █
███████████████████████████████
""")
pla = sys.platform
if pla == "win32":
win = "Windows"
print(" [!] Your Platform is " +win+ "\n")
elif pla == "darwin":
mac = "MacOs"
print(" [+] Your Platform is " +mac+ "\n")
elif pla == "linux":
mac = "Linux"
print(" [+] Your Platform is " +mac+"\n")
if pla == "win32":
print(" [!] Not Suitable For Tool Windows \n")
time.sleep(3)
exit(" [#] https://www.github/arda6")
print("")
print("""
1) Wep Cracking
2) Wpa2 Cracking
3) Deauth Attack
""")
soru = input("root@eyll:~# ")
if soru == '1':
os.system("python3 main.py")
exit()
elif soru == '2':
os.system("python3 wpa2.py")
elif soru == '3':
os.system("python3 attack.py")
|
normal
|
{
"blob_id": "15eb205e6bd36844fdfc8c05efbc3a3d584c122d",
"index": 7238,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n \"\"\"\n\n ███████████████████████████████\n █ █\n █═╬═════════════════════════╬═█\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░Wi-fi Fucker Tool░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░coded by arda6░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █═╬═════════════════════════╬═█\n █ █\n ███████████████████████████████\n\n\n\"\"\"\n )\n<mask token>\nif pla == 'win32':\n win = 'Windows'\n print(' [!] Your Platform is ' + win + '\\n')\nelif pla == 'darwin':\n mac = 'MacOs'\n print(' [+] Your Platform is ' + mac + '\\n')\nelif pla == 'linux':\n mac = 'Linux'\n print(' [+] Your Platform is ' + mac + '\\n')\nif pla == 'win32':\n print(' [!] Not Suitable For Tool Windows \\n')\n time.sleep(3)\n exit(' [#] https://www.github/arda6')\nprint('')\nprint(\"\"\"\n\n 1) Wep Cracking\n 2) Wpa2 Cracking\n 3) Deauth Attack\n \n\"\"\")\n<mask token>\nif soru == '1':\n os.system('python3 main.py')\n exit()\nelif soru == '2':\n os.system('python3 wpa2.py')\nelif soru == '3':\n os.system('python3 attack.py')\n",
"step-3": "<mask token>\nprint(\n \"\"\"\n\n ███████████████████████████████\n █ █\n █═╬═════════════════════════╬═█\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░Wi-fi Fucker Tool░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░coded by arda6░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █═╬═════════════════════════╬═█\n █ █\n ███████████████████████████████\n\n\n\"\"\"\n )\npla = sys.platform\nif pla == 'win32':\n win = 'Windows'\n print(' [!] Your Platform is ' + win + '\\n')\nelif pla == 'darwin':\n mac = 'MacOs'\n print(' [+] Your Platform is ' + mac + '\\n')\nelif pla == 'linux':\n mac = 'Linux'\n print(' [+] Your Platform is ' + mac + '\\n')\nif pla == 'win32':\n print(' [!] Not Suitable For Tool Windows \\n')\n time.sleep(3)\n exit(' [#] https://www.github/arda6')\nprint('')\nprint(\"\"\"\n\n 1) Wep Cracking\n 2) Wpa2 Cracking\n 3) Deauth Attack\n \n\"\"\")\nsoru = input('root@eyll:~# ')\nif soru == '1':\n os.system('python3 main.py')\n exit()\nelif soru == '2':\n os.system('python3 wpa2.py')\nelif soru == '3':\n os.system('python3 attack.py')\n",
"step-4": "import os, sys, time\nprint(\n \"\"\"\n\n ███████████████████████████████\n █ █\n █═╬═════════════════════════╬═█\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░Wi-fi Fucker Tool░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░coded by arda6░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\n █═╬═════════════════════════╬═█\n █ █\n ███████████████████████████████\n\n\n\"\"\"\n )\npla = sys.platform\nif pla == 'win32':\n win = 'Windows'\n print(' [!] Your Platform is ' + win + '\\n')\nelif pla == 'darwin':\n mac = 'MacOs'\n print(' [+] Your Platform is ' + mac + '\\n')\nelif pla == 'linux':\n mac = 'Linux'\n print(' [+] Your Platform is ' + mac + '\\n')\nif pla == 'win32':\n print(' [!] Not Suitable For Tool Windows \\n')\n time.sleep(3)\n exit(' [#] https://www.github/arda6')\nprint('')\nprint(\"\"\"\n\n 1) Wep Cracking\n 2) Wpa2 Cracking\n 3) Deauth Attack\n \n\"\"\")\nsoru = input('root@eyll:~# ')\nif soru == '1':\n os.system('python3 main.py')\n exit()\nelif soru == '2':\n os.system('python3 wpa2.py')\nelif soru == '3':\n os.system('python3 attack.py')\n",
"step-5": "import os , sys , time\r\nprint(\"\"\"\r\n\r\n ███████████████████████████████\r\n █ █\r\n █═╬═════════════════════════╬═█\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░Wi-fi Fucker Tool░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░░coded by arda6░░░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █ ║░░░░░░░░░░░░░░░░░░░░░░░░░║ █\r\n █═╬═════════════════════════╬═█\r\n █ █\r\n ███████████████████████████████\r\n\r\n\r\n\"\"\")\r\npla = sys.platform\r\nif pla == \"win32\":\r\n win = \"Windows\"\r\n print(\" [!] Your Platform is \" +win+ \"\\n\")\r\nelif pla == \"darwin\":\r\n mac = \"MacOs\"\r\n print(\" [+] Your Platform is \" +mac+ \"\\n\")\r\nelif pla == \"linux\":\r\n mac = \"Linux\"\r\n print(\" [+] Your Platform is \" +mac+\"\\n\")\r\nif pla == \"win32\":\r\n print(\" [!] Not Suitable For Tool Windows \\n\")\r\n time.sleep(3)\r\n exit(\" [#] https://www.github/arda6\")\r\nprint(\"\")\r\nprint(\"\"\"\r\n\r\n 1) Wep Cracking\r\n 2) Wpa2 Cracking\r\n 3) Deauth Attack\r\n \r\n\"\"\")\r\n\r\nsoru = input(\"root@eyll:~# \")\r\nif soru == '1':\r\n os.system(\"python3 main.py\")\r\n exit()\r\nelif soru == '2':\r\n os.system(\"python3 wpa2.py\")\r\nelif soru == '3':\r\n os.system(\"python3 attack.py\")\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Data pre-processing
"""
import os
import corenlp
import numpy as np
import ujson as json
from tqdm import tqdm
from collections import Counter
from bilm import dump_token_embeddings
import sys
sys.path.append('../..')
from LIB.utils import save
def process(json_file, outpur_dir, exclude_titles=None, include_titles=None):
"""
:param json_file: original data in json format
:param outpur_dir: the output directory of pre-processed data
:param exclude_titles: article titles to exclude
:param include_titles: article titles to include
"""
para_file = "{}/paras".format(outpur_dir)
question_file = "{}/questions".format(outpur_dir)
sent_file = "{}/sents".format(outpur_dir)
answer_file = "{}/answers".format(outpur_dir)
print("Generating {} raw data...".format(json_file))
max_sent, max_sent_len, max_que_len, max_ans_len = 0, 0, 0, 0
with open(json_file, "r") as fh, corenlp.CoreNLPClient(annotators="tokenize ssplit pos ner".split(),
endpoint="http://localhost:9099", timeout=50000) as client:
source = json.load(fh)
for article in tqdm(source["data"]):
title = article["title"]
if include_titles and title not in include_titles:
continue
if exclude_titles and title in exclude_titles:
continue
for para in article["paragraphs"]:
paragraphs, questions, answers, sents, ids = [], [], [], [], []
paragraphs_pos, questions_pos, answers_pos, sents_pos = [], [], [], []
paragraphs_ner, questions_ner, answers_ner, sents_ner = [], [], [], []
answers_index, sents_index = [], []
# paragraph
context = para["context"]
if not context.strip():
continue
ann_para = client.annotate(context)
max_sent = max(max_sent, len(ann_para.sentence))
max_sent_len = max(max_sent_len, max(map(lambda x: len(x.token), ann_para.sentence)))
ann_para_tokens, paragraph_tokens, paragraph_pos, paragraph_ner = [], [], [], []
for sent in ann_para.sentence:
for token in sent.token:
ann_para_tokens.append(token)
paragraph_tokens.append(token.word)
paragraph_pos.append(token.pos)
paragraph_ner.append(token.ner)
# questions
for qa in para["qas"]:
# question
ques = qa["question"]
id = qa["id"]
if not ques.strip():
continue
ann_que = client.annotate(ques)
max_que_len = max(max_que_len, len(ann_que.sentence[0].token))
question_tokens, question_pos, question_ner = [], [], []
for sent in ann_que.sentence:
for token in sent.token:
question_tokens.append(token.word)
question_pos.append(token.pos)
question_ner.append(token.ner)
# answer
all_answer_tokens, all_answer_pos, all_answer_ner, all_answer_index = [], [], [], []
all_sent_tokens, all_sent_pos, all_sent_ner, all_sent_index = [], [], [], []
for answer in qa["answers"]:
answer_text = answer["text"]
if not answer_text.strip():
continue
ann_ans = client.annotate(answer_text)
answer_tokens, answer_pos, answer_ner = [], [], []
for sent in ann_ans.sentence:
for token in sent.token:
answer_tokens.append(token.word)
answer_pos.append(token.pos)
answer_ner.append(token.ner)
all_answer_tokens.append(' '.join(answer_tokens))
all_answer_pos.append(' '.join(answer_pos))
all_answer_ner.append(' '.join(answer_ner))
answer_start = answer['answer_start']
answer_end = answer_start + len(answer_text)
# sentence
sentence = []
for sent in ann_para.sentence:
if sent.characterOffsetBegin <= answer_start <= sent.characterOffsetEnd or \
sent.characterOffsetBegin <= answer_end <= sent.characterOffsetEnd:
sentence.append(sent)
sentence = [token for sent in sentence for token in sent.token]
sentence_tokens = [token.word for token in sentence]
sentence_pos = [token.pos for token in sentence]
sentence_ner = [token.ner for token in sentence]
all_sent_tokens.append(' '.join(sentence_tokens))
all_sent_pos.append(' '.join(sentence_pos))
all_sent_ner.append(' '.join(sentence_ner))
# sentence index
y1_sent = sentence[0].tokenBeginIndex
y2_sent = sentence[-1].tokenBeginIndex
# answer index
y1_ans = None
for i, token in enumerate(sentence):
if token.beginChar - 1 <= answer_start <= token.endChar:
y1_ans = sentence[0].tokenBeginIndex + i
try:
assert y1_ans != None
except:
continue
y2_ans = y1_ans + len(answer_tokens) - 1
all_answer_index.append("{},{}".format(y1_ans, y2_ans))
all_sent_index.append("{},{}".format(y1_sent, y2_sent))
paragraphs.append(' '.join(paragraph_tokens))
paragraphs_pos.append(' '.join(paragraph_pos))
paragraphs_ner.append(' '.join(paragraph_ner))
questions.append(' '.join(question_tokens))
questions_pos.append(' '.join(question_pos))
questions_ner.append(' '.join(question_ner))
answers.append('\t'.join(all_answer_tokens))
answers_pos.append('\t'.join(all_answer_pos))
answers_ner.append('\t'.join(all_answer_ner))
answers_index.append('\t'.join(all_answer_index))
sents.append('\t'.join(all_sent_tokens))
sents_pos.append('\t'.join(all_sent_pos))
sents_ner.append('\t'.join(all_sent_ner))
sents_index.append('\t'.join(all_sent_index))
ids.append(id)
# save para
with open("{}.tok".format(para_file), 'a') as f:
f.write('\n'.join(paragraphs) + '\n')
with open("{}.pos".format(para_file), 'a') as f:
f.write('\n'.join(paragraphs_pos) + '\n')
with open("{}.ner".format(para_file), 'a') as f:
f.write('\n'.join(paragraphs_ner) + '\n')
with open("{}.id".format(para_file), 'a') as f:
f.write('\n'.join(ids) + '\n')
# save question
with open("{}.tok".format(question_file), 'a') as f:
f.write('\n'.join(questions) + '\n')
with open("{}.pos".format(question_file), 'a') as f:
f.write('\n'.join(questions_pos) + '\n')
with open("{}.ner".format(question_file), 'a') as f:
f.write('\n'.join(questions_ner) + '\n')
# save answer
with open("{}.tok".format(answer_file), 'a') as f:
f.write('\n'.join(answers) + '\n')
with open("{}.pos".format(answer_file), 'a') as f:
f.write('\n'.join(answers_pos) + '\n')
with open("{}.ner".format(answer_file), 'a') as f:
f.write('\n'.join(answers_ner) + '\n')
with open("{}.index".format(answer_file), 'a') as f:
f.write("\n".join(answers_index) + '\n')
# save sent
with open("{}.tok".format(sent_file), 'a') as f:
f.write('\n'.join(sents) + '\n')
with open("{}.pos".format(sent_file), 'a') as f:
f.write('\n'.join(sents_pos) + '\n')
with open("{}.ner".format(sent_file), 'a') as f:
f.write('\n'.join(sents_ner) + '\n')
with open("{}.index".format(sent_file), 'a') as f:
f.write("\n".join(sents_index) + '\n')
# get BIO labels
label(para_file, answer_file)
def label(para_file, answer_file):
# get the answer BIO label for paragraph
max_node = 0
with open("{}.tok".format(para_file), 'r') as fp, open("{}.label".format(para_file), 'a') as fl, \
open("{}.index".format(answer_file), 'r') as fa:
while True:
para = fp.readline()
if not para:
break
words = [p for p in para.strip().split(' ')]
max_node = max(len(words), max_node)
answer = fa.readline()
labels = []
try:
start, end = map(int, answer.split('\t')[0].split(','))
for i in range(len(words)):
if start <= i <= end:
# answer words
if i == start:
labels.append('B')
else:
labels.append('I')
else:
# non answer words
labels.append('O')
except:
pass
fl.write(' '.join(labels) + '\n')
return max_node
def get_data(train_json, dev_json, test_title_file, output_dir):
test_titles = open(test_title_file, 'r').readlines()
test_titles = set([line.strip() for line in test_titles])
process(train_json, "{}/train/".format(output_dir), exclude_titles=test_titles)
process(dev_json, "{}/dev/".format(output_dir))
process(train_json, "{}/test/".format(output_dir), include_titles=test_titles)
def get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size, vocab_file):
"""
get word embedding matrix from glove
"""
print("Generating word embedding...")
# load word embeddings
embedding_dict = {}
with open(emb_file, "r", encoding="utf-8") as fh:
for line in tqdm(fh, total=emb_size):
array = line.split()
word = "".join(array[0:-vec_size])
vector = list(map(float, array[-vec_size:]))
embedding_dict[word] = vector
TRANSLATE = {
"-lsb-": "[", "-rsb-": "]", "-lrb-": "(", "-rrb-": ")", "-lcb-": "{",
"-rcb-": "}", "-LSB-": "[", "-RSB-": "]", "-LRB-": "(", "-RRB-": ")",
"-LCB-": "{", "-RCB-": "}"
}
SPECIAL_TOKENS = ["<NULL>", "<UNK>", "<S>", "</S>"]
words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x[1], reverse=True)))
words = SPECIAL_TOKENS + words
if vocab_size > 0:
words = words[:vocab_size]
with open(vocab_file, 'w') as f:
f.write('\n'.join(words[1:]))
embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))
word2idx_dict = {}
unknown_count = 0
for i, word in enumerate(words):
word2idx_dict[word] = i
if word in TRANSLATE:
word = TRANSLATE[word]
done = False
for w in (word, word.lower(), word.upper(), word.capitalize()):
if w in embedding_dict:
embedding[i] = embedding_dict[w]
done = True
break
if not done:
unknown_count += 1
return embedding, word2idx_dict, unknown_count
def get_tag_embedding(counter, data_type, vec_size):
"""
get pos/ner/label tags' embedding matrix
"""
print("Generating {} tag embedding...".format(data_type))
SPECIAL_TOKENS = ["<NULL>", "<UNK>"]
tags = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x[1], reverse=True)))
tags = SPECIAL_TOKENS + tags
embedding = np.random.normal(scale=0.1, size=(len(tags), vec_size))
word2idx_dict = {w: i for i, w in enumerate(tags)}
return embedding, word2idx_dict
def get_vocab(config):
print("Get the vocabulary...")
word_counter, char_counter = Counter(), Counter()
pos_counter, ner_counter, label_counter = Counter(), Counter(), Counter()
files = [(config.train_para_file, config.train_question_file), (config.dev_para_file, config.dev_question_file)]
for para_file, que_file in files:
with open("{}.tok".format(para_file), 'r') as fp, open("{}.tok".format(que_file), 'r') as fq, \
open("{}.pos".format(para_file), 'r') as fpp, open("{}.pos".format(que_file), 'r') as fqp, \
open("{}.ner".format(para_file), 'r') as fpn, open("{}.ner".format(que_file), 'r') as fqn, \
open("{}.label".format(para_file), 'r') as fpl:
while True:
para, question = fp.readline(), fq.readline()
pos, que_pos = fpp.readline(), fqp.readline()
ner, que_ner = fpn.readline(), fqn.readline()
label = fpl.readline()
if not question or not para:
break
if config.lower_word:
para = para.lower()
question = question.lower()
para_tokens = para.strip().split(' ')
que_tokens = question.strip().split(' ')
pos_tags = pos.strip().split(' ')
ner_tags = ner.strip().split(' ')
que_pos_tags = que_pos.strip().split(' ')
que_ner_tags = que_ner.strip().split(' ')
labels = label.strip().split(' ')
for token in para_tokens + que_tokens:
word_counter[token] += 1
for char in list(token):
char_counter[char] += 1
for pos_tag in pos_tags + que_pos_tags:
pos_counter[pos_tag] += 1
for ner_tag in ner_tags + que_ner_tags:
ner_counter[ner_tag] += 1
for label in labels:
label_counter[label] += 1
word_emb_mat, word2idx_dict, unk_num = get_word_embedding(word_counter, emb_file=config.glove_word_file,
emb_size=config.glove_word_size,
vocab_size=config.vocab_size_limit,
vec_size=config.glove_dim, vocab_file=config.vocab_file)
char_emb_mat, char2idx_dict = get_tag_embedding(char_counter, "char", vec_size=config.char_dim)
pos_emb_mat, pos2idx_dict = get_tag_embedding(pos_counter, "pos", vec_size=config.pos_dim)
ner_emb_mat, ner2idx_dict = get_tag_embedding(ner_counter, "ner", vec_size=config.ner_dim)
label_emb_mat, label2idx_dict = get_tag_embedding(label_counter, "label", vec_size=config.label_dim)
print("{} out of {} are not in glove".format(unk_num, len(word2idx_dict)))
print("{} chars".format(char_emb_mat.shape[0]))
print("{} pos tags, {} ner tags, {} answer labels, {} chars".format(
pos_emb_mat.shape[0], ner_emb_mat.shape[0], label_emb_mat.shape[0], char_emb_mat.shape[0]))
save(config.word_emb_file, word_emb_mat, message="word embedding")
save(config.char_emb_file, char_emb_mat, message="char embedding")
save(config.pos_emb_file, pos_emb_mat, message="pos embedding")
save(config.ner_emb_file, ner_emb_mat, message="ner embedding")
save(config.label_emb_file, label_emb_mat, message="label embedding")
save(config.word_dictionary, word2idx_dict, message="word dictionary")
save(config.char_dictionary, char2idx_dict, message="char dictionary")
save(config.pos_dictionary, pos2idx_dict, message="pos dictionary")
save(config.ner_dictionary, ner2idx_dict, message="ner dictionary")
save(config.label_dictionary, label2idx_dict, message="label dictionary")
print("Dump elmo word embedding...")
token_embedding_file = config.embedding_file
dump_token_embeddings(
config.vocab_file, config.elmo_options_file, config.elmo_weight_file, token_embedding_file
)
if __name__ == '__main__':
# process data
os.system("mkdir data; mkdir data/processed; mkdir data/processed/train; "
"mkdir data/processed/dev; mkdir data/processed/test")
get_data("../../LIB/squad/train-v1.1.json", "../../LIB/squad/dev-v1.1.json",
"../../LIB/squad/doclist-test.txt", "data/processed")
|
normal
|
{
"blob_id": "0c37806f0a7c0976711edd685fd64d2616147cb6",
"index": 4623,
"step-1": "<mask token>\n\n\ndef process(json_file, outpur_dir, exclude_titles=None, include_titles=None):\n \"\"\"\n :param json_file: original data in json format\n :param outpur_dir: the output directory of pre-processed data\n :param exclude_titles: article titles to exclude\n :param include_titles: article titles to include\n \"\"\"\n para_file = '{}/paras'.format(outpur_dir)\n question_file = '{}/questions'.format(outpur_dir)\n sent_file = '{}/sents'.format(outpur_dir)\n answer_file = '{}/answers'.format(outpur_dir)\n print('Generating {} raw data...'.format(json_file))\n max_sent, max_sent_len, max_que_len, max_ans_len = 0, 0, 0, 0\n with open(json_file, 'r') as fh, corenlp.CoreNLPClient(annotators=\n 'tokenize ssplit pos ner'.split(), endpoint='http://localhost:9099',\n timeout=50000) as client:\n source = json.load(fh)\n for article in tqdm(source['data']):\n title = article['title']\n if include_titles and title not in include_titles:\n continue\n if exclude_titles and title in exclude_titles:\n continue\n for para in article['paragraphs']:\n paragraphs, questions, answers, sents, ids = [], [], [], [], []\n paragraphs_pos, questions_pos, answers_pos, sents_pos = [], [\n ], [], []\n paragraphs_ner, questions_ner, answers_ner, sents_ner = [], [\n ], [], []\n answers_index, sents_index = [], []\n context = para['context']\n if not context.strip():\n continue\n ann_para = client.annotate(context)\n max_sent = max(max_sent, len(ann_para.sentence))\n max_sent_len = max(max_sent_len, max(map(lambda x: len(x.\n token), ann_para.sentence)))\n (ann_para_tokens, paragraph_tokens, paragraph_pos,\n paragraph_ner) = [], [], [], []\n for sent in ann_para.sentence:\n for token in sent.token:\n ann_para_tokens.append(token)\n paragraph_tokens.append(token.word)\n paragraph_pos.append(token.pos)\n paragraph_ner.append(token.ner)\n for qa in para['qas']:\n ques = qa['question']\n id = qa['id']\n if not ques.strip():\n continue\n ann_que = client.annotate(ques)\n max_que_len = max(max_que_len, len(ann_que.sentence[0].\n token))\n question_tokens, question_pos, question_ner = [], [], []\n for sent in ann_que.sentence:\n for token in sent.token:\n question_tokens.append(token.word)\n question_pos.append(token.pos)\n question_ner.append(token.ner)\n (all_answer_tokens, all_answer_pos, all_answer_ner,\n all_answer_index) = [], [], [], []\n (all_sent_tokens, all_sent_pos, all_sent_ner,\n all_sent_index) = [], [], [], []\n for answer in qa['answers']:\n answer_text = answer['text']\n if not answer_text.strip():\n continue\n ann_ans = client.annotate(answer_text)\n answer_tokens, answer_pos, answer_ner = [], [], []\n for sent in ann_ans.sentence:\n for token in sent.token:\n answer_tokens.append(token.word)\n answer_pos.append(token.pos)\n answer_ner.append(token.ner)\n all_answer_tokens.append(' '.join(answer_tokens))\n all_answer_pos.append(' '.join(answer_pos))\n all_answer_ner.append(' '.join(answer_ner))\n answer_start = answer['answer_start']\n answer_end = answer_start + len(answer_text)\n sentence = []\n for sent in ann_para.sentence:\n if (sent.characterOffsetBegin <= answer_start <=\n sent.characterOffsetEnd or sent.\n characterOffsetBegin <= answer_end <= sent.\n characterOffsetEnd):\n sentence.append(sent)\n sentence = [token for sent in sentence for token in\n sent.token]\n sentence_tokens = [token.word for token in sentence]\n sentence_pos = [token.pos for token in sentence]\n sentence_ner = [token.ner for token in sentence]\n all_sent_tokens.append(' '.join(sentence_tokens))\n all_sent_pos.append(' '.join(sentence_pos))\n all_sent_ner.append(' '.join(sentence_ner))\n y1_sent = sentence[0].tokenBeginIndex\n y2_sent = sentence[-1].tokenBeginIndex\n y1_ans = None\n for i, token in enumerate(sentence):\n if (token.beginChar - 1 <= answer_start <=\n token.endChar):\n y1_ans = sentence[0].tokenBeginIndex + i\n try:\n assert y1_ans != None\n except:\n continue\n y2_ans = y1_ans + len(answer_tokens) - 1\n all_answer_index.append('{},{}'.format(y1_ans, y2_ans))\n all_sent_index.append('{},{}'.format(y1_sent, y2_sent))\n paragraphs.append(' '.join(paragraph_tokens))\n paragraphs_pos.append(' '.join(paragraph_pos))\n paragraphs_ner.append(' '.join(paragraph_ner))\n questions.append(' '.join(question_tokens))\n questions_pos.append(' '.join(question_pos))\n questions_ner.append(' '.join(question_ner))\n answers.append('\\t'.join(all_answer_tokens))\n answers_pos.append('\\t'.join(all_answer_pos))\n answers_ner.append('\\t'.join(all_answer_ner))\n answers_index.append('\\t'.join(all_answer_index))\n sents.append('\\t'.join(all_sent_tokens))\n sents_pos.append('\\t'.join(all_sent_pos))\n sents_ner.append('\\t'.join(all_sent_ner))\n sents_index.append('\\t'.join(all_sent_index))\n ids.append(id)\n with open('{}.tok'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs) + '\\n')\n with open('{}.pos'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_pos) + '\\n')\n with open('{}.ner'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_ner) + '\\n')\n with open('{}.id'.format(para_file), 'a') as f:\n f.write('\\n'.join(ids) + '\\n')\n with open('{}.tok'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions) + '\\n')\n with open('{}.pos'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_pos) + '\\n')\n with open('{}.ner'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_ner) + '\\n')\n with open('{}.tok'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers) + '\\n')\n with open('{}.pos'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_pos) + '\\n')\n with open('{}.ner'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_ner) + '\\n')\n with open('{}.index'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_index) + '\\n')\n with open('{}.tok'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents) + '\\n')\n with open('{}.pos'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_pos) + '\\n')\n with open('{}.ner'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_ner) + '\\n')\n with open('{}.index'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_index) + '\\n')\n label(para_file, answer_file)\n\n\n<mask token>\n\n\ndef get_data(train_json, dev_json, test_title_file, output_dir):\n test_titles = open(test_title_file, 'r').readlines()\n test_titles = set([line.strip() for line in test_titles])\n process(train_json, '{}/train/'.format(output_dir), exclude_titles=\n test_titles)\n process(dev_json, '{}/dev/'.format(output_dir))\n process(train_json, '{}/test/'.format(output_dir), include_titles=\n test_titles)\n\n\ndef get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size,\n vocab_file):\n \"\"\"\n get word embedding matrix from glove\n \"\"\"\n print('Generating word embedding...')\n embedding_dict = {}\n with open(emb_file, 'r', encoding='utf-8') as fh:\n for line in tqdm(fh, total=emb_size):\n array = line.split()\n word = ''.join(array[0:-vec_size])\n vector = list(map(float, array[-vec_size:]))\n embedding_dict[word] = vector\n TRANSLATE = {'-lsb-': '[', '-rsb-': ']', '-lrb-': '(', '-rrb-': ')',\n '-lcb-': '{', '-rcb-': '}', '-LSB-': '[', '-RSB-': ']', '-LRB-':\n '(', '-RRB-': ')', '-LCB-': '{', '-RCB-': '}'}\n SPECIAL_TOKENS = ['<NULL>', '<UNK>', '<S>', '</S>']\n words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x:\n x[1], reverse=True)))\n words = SPECIAL_TOKENS + words\n if vocab_size > 0:\n words = words[:vocab_size]\n with open(vocab_file, 'w') as f:\n f.write('\\n'.join(words[1:]))\n embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))\n word2idx_dict = {}\n unknown_count = 0\n for i, word in enumerate(words):\n word2idx_dict[word] = i\n if word in TRANSLATE:\n word = TRANSLATE[word]\n done = False\n for w in (word, word.lower(), word.upper(), word.capitalize()):\n if w in embedding_dict:\n embedding[i] = embedding_dict[w]\n done = True\n break\n if not done:\n unknown_count += 1\n return embedding, word2idx_dict, unknown_count\n\n\ndef get_tag_embedding(counter, data_type, vec_size):\n \"\"\"\n get pos/ner/label tags' embedding matrix\n \"\"\"\n print('Generating {} tag embedding...'.format(data_type))\n SPECIAL_TOKENS = ['<NULL>', '<UNK>']\n tags = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x\n [1], reverse=True)))\n tags = SPECIAL_TOKENS + tags\n embedding = np.random.normal(scale=0.1, size=(len(tags), vec_size))\n word2idx_dict = {w: i for i, w in enumerate(tags)}\n return embedding, word2idx_dict\n\n\ndef get_vocab(config):\n print('Get the vocabulary...')\n word_counter, char_counter = Counter(), Counter()\n pos_counter, ner_counter, label_counter = Counter(), Counter(), Counter()\n files = [(config.train_para_file, config.train_question_file), (config.\n dev_para_file, config.dev_question_file)]\n for para_file, que_file in files:\n with open('{}.tok'.format(para_file), 'r') as fp, open('{}.tok'.\n format(que_file), 'r') as fq, open('{}.pos'.format(para_file), 'r'\n ) as fpp, open('{}.pos'.format(que_file), 'r') as fqp, open(\n '{}.ner'.format(para_file), 'r') as fpn, open('{}.ner'.format(\n que_file), 'r') as fqn, open('{}.label'.format(para_file), 'r'\n ) as fpl:\n while True:\n para, question = fp.readline(), fq.readline()\n pos, que_pos = fpp.readline(), fqp.readline()\n ner, que_ner = fpn.readline(), fqn.readline()\n label = fpl.readline()\n if not question or not para:\n break\n if config.lower_word:\n para = para.lower()\n question = question.lower()\n para_tokens = para.strip().split(' ')\n que_tokens = question.strip().split(' ')\n pos_tags = pos.strip().split(' ')\n ner_tags = ner.strip().split(' ')\n que_pos_tags = que_pos.strip().split(' ')\n que_ner_tags = que_ner.strip().split(' ')\n labels = label.strip().split(' ')\n for token in (para_tokens + que_tokens):\n word_counter[token] += 1\n for char in list(token):\n char_counter[char] += 1\n for pos_tag in (pos_tags + que_pos_tags):\n pos_counter[pos_tag] += 1\n for ner_tag in (ner_tags + que_ner_tags):\n ner_counter[ner_tag] += 1\n for label in labels:\n label_counter[label] += 1\n word_emb_mat, word2idx_dict, unk_num = get_word_embedding(word_counter,\n emb_file=config.glove_word_file, emb_size=config.glove_word_size,\n vocab_size=config.vocab_size_limit, vec_size=config.glove_dim,\n vocab_file=config.vocab_file)\n char_emb_mat, char2idx_dict = get_tag_embedding(char_counter, 'char',\n vec_size=config.char_dim)\n pos_emb_mat, pos2idx_dict = get_tag_embedding(pos_counter, 'pos',\n vec_size=config.pos_dim)\n ner_emb_mat, ner2idx_dict = get_tag_embedding(ner_counter, 'ner',\n vec_size=config.ner_dim)\n label_emb_mat, label2idx_dict = get_tag_embedding(label_counter,\n 'label', vec_size=config.label_dim)\n print('{} out of {} are not in glove'.format(unk_num, len(word2idx_dict)))\n print('{} chars'.format(char_emb_mat.shape[0]))\n print('{} pos tags, {} ner tags, {} answer labels, {} chars'.format(\n pos_emb_mat.shape[0], ner_emb_mat.shape[0], label_emb_mat.shape[0],\n char_emb_mat.shape[0]))\n save(config.word_emb_file, word_emb_mat, message='word embedding')\n save(config.char_emb_file, char_emb_mat, message='char embedding')\n save(config.pos_emb_file, pos_emb_mat, message='pos embedding')\n save(config.ner_emb_file, ner_emb_mat, message='ner embedding')\n save(config.label_emb_file, label_emb_mat, message='label embedding')\n save(config.word_dictionary, word2idx_dict, message='word dictionary')\n save(config.char_dictionary, char2idx_dict, message='char dictionary')\n save(config.pos_dictionary, pos2idx_dict, message='pos dictionary')\n save(config.ner_dictionary, ner2idx_dict, message='ner dictionary')\n save(config.label_dictionary, label2idx_dict, message='label dictionary')\n print('Dump elmo word embedding...')\n token_embedding_file = config.embedding_file\n dump_token_embeddings(config.vocab_file, config.elmo_options_file,\n config.elmo_weight_file, token_embedding_file)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef process(json_file, outpur_dir, exclude_titles=None, include_titles=None):\n \"\"\"\n :param json_file: original data in json format\n :param outpur_dir: the output directory of pre-processed data\n :param exclude_titles: article titles to exclude\n :param include_titles: article titles to include\n \"\"\"\n para_file = '{}/paras'.format(outpur_dir)\n question_file = '{}/questions'.format(outpur_dir)\n sent_file = '{}/sents'.format(outpur_dir)\n answer_file = '{}/answers'.format(outpur_dir)\n print('Generating {} raw data...'.format(json_file))\n max_sent, max_sent_len, max_que_len, max_ans_len = 0, 0, 0, 0\n with open(json_file, 'r') as fh, corenlp.CoreNLPClient(annotators=\n 'tokenize ssplit pos ner'.split(), endpoint='http://localhost:9099',\n timeout=50000) as client:\n source = json.load(fh)\n for article in tqdm(source['data']):\n title = article['title']\n if include_titles and title not in include_titles:\n continue\n if exclude_titles and title in exclude_titles:\n continue\n for para in article['paragraphs']:\n paragraphs, questions, answers, sents, ids = [], [], [], [], []\n paragraphs_pos, questions_pos, answers_pos, sents_pos = [], [\n ], [], []\n paragraphs_ner, questions_ner, answers_ner, sents_ner = [], [\n ], [], []\n answers_index, sents_index = [], []\n context = para['context']\n if not context.strip():\n continue\n ann_para = client.annotate(context)\n max_sent = max(max_sent, len(ann_para.sentence))\n max_sent_len = max(max_sent_len, max(map(lambda x: len(x.\n token), ann_para.sentence)))\n (ann_para_tokens, paragraph_tokens, paragraph_pos,\n paragraph_ner) = [], [], [], []\n for sent in ann_para.sentence:\n for token in sent.token:\n ann_para_tokens.append(token)\n paragraph_tokens.append(token.word)\n paragraph_pos.append(token.pos)\n paragraph_ner.append(token.ner)\n for qa in para['qas']:\n ques = qa['question']\n id = qa['id']\n if not ques.strip():\n continue\n ann_que = client.annotate(ques)\n max_que_len = max(max_que_len, len(ann_que.sentence[0].\n token))\n question_tokens, question_pos, question_ner = [], [], []\n for sent in ann_que.sentence:\n for token in sent.token:\n question_tokens.append(token.word)\n question_pos.append(token.pos)\n question_ner.append(token.ner)\n (all_answer_tokens, all_answer_pos, all_answer_ner,\n all_answer_index) = [], [], [], []\n (all_sent_tokens, all_sent_pos, all_sent_ner,\n all_sent_index) = [], [], [], []\n for answer in qa['answers']:\n answer_text = answer['text']\n if not answer_text.strip():\n continue\n ann_ans = client.annotate(answer_text)\n answer_tokens, answer_pos, answer_ner = [], [], []\n for sent in ann_ans.sentence:\n for token in sent.token:\n answer_tokens.append(token.word)\n answer_pos.append(token.pos)\n answer_ner.append(token.ner)\n all_answer_tokens.append(' '.join(answer_tokens))\n all_answer_pos.append(' '.join(answer_pos))\n all_answer_ner.append(' '.join(answer_ner))\n answer_start = answer['answer_start']\n answer_end = answer_start + len(answer_text)\n sentence = []\n for sent in ann_para.sentence:\n if (sent.characterOffsetBegin <= answer_start <=\n sent.characterOffsetEnd or sent.\n characterOffsetBegin <= answer_end <= sent.\n characterOffsetEnd):\n sentence.append(sent)\n sentence = [token for sent in sentence for token in\n sent.token]\n sentence_tokens = [token.word for token in sentence]\n sentence_pos = [token.pos for token in sentence]\n sentence_ner = [token.ner for token in sentence]\n all_sent_tokens.append(' '.join(sentence_tokens))\n all_sent_pos.append(' '.join(sentence_pos))\n all_sent_ner.append(' '.join(sentence_ner))\n y1_sent = sentence[0].tokenBeginIndex\n y2_sent = sentence[-1].tokenBeginIndex\n y1_ans = None\n for i, token in enumerate(sentence):\n if (token.beginChar - 1 <= answer_start <=\n token.endChar):\n y1_ans = sentence[0].tokenBeginIndex + i\n try:\n assert y1_ans != None\n except:\n continue\n y2_ans = y1_ans + len(answer_tokens) - 1\n all_answer_index.append('{},{}'.format(y1_ans, y2_ans))\n all_sent_index.append('{},{}'.format(y1_sent, y2_sent))\n paragraphs.append(' '.join(paragraph_tokens))\n paragraphs_pos.append(' '.join(paragraph_pos))\n paragraphs_ner.append(' '.join(paragraph_ner))\n questions.append(' '.join(question_tokens))\n questions_pos.append(' '.join(question_pos))\n questions_ner.append(' '.join(question_ner))\n answers.append('\\t'.join(all_answer_tokens))\n answers_pos.append('\\t'.join(all_answer_pos))\n answers_ner.append('\\t'.join(all_answer_ner))\n answers_index.append('\\t'.join(all_answer_index))\n sents.append('\\t'.join(all_sent_tokens))\n sents_pos.append('\\t'.join(all_sent_pos))\n sents_ner.append('\\t'.join(all_sent_ner))\n sents_index.append('\\t'.join(all_sent_index))\n ids.append(id)\n with open('{}.tok'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs) + '\\n')\n with open('{}.pos'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_pos) + '\\n')\n with open('{}.ner'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_ner) + '\\n')\n with open('{}.id'.format(para_file), 'a') as f:\n f.write('\\n'.join(ids) + '\\n')\n with open('{}.tok'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions) + '\\n')\n with open('{}.pos'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_pos) + '\\n')\n with open('{}.ner'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_ner) + '\\n')\n with open('{}.tok'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers) + '\\n')\n with open('{}.pos'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_pos) + '\\n')\n with open('{}.ner'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_ner) + '\\n')\n with open('{}.index'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_index) + '\\n')\n with open('{}.tok'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents) + '\\n')\n with open('{}.pos'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_pos) + '\\n')\n with open('{}.ner'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_ner) + '\\n')\n with open('{}.index'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_index) + '\\n')\n label(para_file, answer_file)\n\n\ndef label(para_file, answer_file):\n max_node = 0\n with open('{}.tok'.format(para_file), 'r') as fp, open('{}.label'.\n format(para_file), 'a') as fl, open('{}.index'.format(answer_file), 'r'\n ) as fa:\n while True:\n para = fp.readline()\n if not para:\n break\n words = [p for p in para.strip().split(' ')]\n max_node = max(len(words), max_node)\n answer = fa.readline()\n labels = []\n try:\n start, end = map(int, answer.split('\\t')[0].split(','))\n for i in range(len(words)):\n if start <= i <= end:\n if i == start:\n labels.append('B')\n else:\n labels.append('I')\n else:\n labels.append('O')\n except:\n pass\n fl.write(' '.join(labels) + '\\n')\n return max_node\n\n\ndef get_data(train_json, dev_json, test_title_file, output_dir):\n test_titles = open(test_title_file, 'r').readlines()\n test_titles = set([line.strip() for line in test_titles])\n process(train_json, '{}/train/'.format(output_dir), exclude_titles=\n test_titles)\n process(dev_json, '{}/dev/'.format(output_dir))\n process(train_json, '{}/test/'.format(output_dir), include_titles=\n test_titles)\n\n\ndef get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size,\n vocab_file):\n \"\"\"\n get word embedding matrix from glove\n \"\"\"\n print('Generating word embedding...')\n embedding_dict = {}\n with open(emb_file, 'r', encoding='utf-8') as fh:\n for line in tqdm(fh, total=emb_size):\n array = line.split()\n word = ''.join(array[0:-vec_size])\n vector = list(map(float, array[-vec_size:]))\n embedding_dict[word] = vector\n TRANSLATE = {'-lsb-': '[', '-rsb-': ']', '-lrb-': '(', '-rrb-': ')',\n '-lcb-': '{', '-rcb-': '}', '-LSB-': '[', '-RSB-': ']', '-LRB-':\n '(', '-RRB-': ')', '-LCB-': '{', '-RCB-': '}'}\n SPECIAL_TOKENS = ['<NULL>', '<UNK>', '<S>', '</S>']\n words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x:\n x[1], reverse=True)))\n words = SPECIAL_TOKENS + words\n if vocab_size > 0:\n words = words[:vocab_size]\n with open(vocab_file, 'w') as f:\n f.write('\\n'.join(words[1:]))\n embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))\n word2idx_dict = {}\n unknown_count = 0\n for i, word in enumerate(words):\n word2idx_dict[word] = i\n if word in TRANSLATE:\n word = TRANSLATE[word]\n done = False\n for w in (word, word.lower(), word.upper(), word.capitalize()):\n if w in embedding_dict:\n embedding[i] = embedding_dict[w]\n done = True\n break\n if not done:\n unknown_count += 1\n return embedding, word2idx_dict, unknown_count\n\n\ndef get_tag_embedding(counter, data_type, vec_size):\n \"\"\"\n get pos/ner/label tags' embedding matrix\n \"\"\"\n print('Generating {} tag embedding...'.format(data_type))\n SPECIAL_TOKENS = ['<NULL>', '<UNK>']\n tags = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x\n [1], reverse=True)))\n tags = SPECIAL_TOKENS + tags\n embedding = np.random.normal(scale=0.1, size=(len(tags), vec_size))\n word2idx_dict = {w: i for i, w in enumerate(tags)}\n return embedding, word2idx_dict\n\n\ndef get_vocab(config):\n print('Get the vocabulary...')\n word_counter, char_counter = Counter(), Counter()\n pos_counter, ner_counter, label_counter = Counter(), Counter(), Counter()\n files = [(config.train_para_file, config.train_question_file), (config.\n dev_para_file, config.dev_question_file)]\n for para_file, que_file in files:\n with open('{}.tok'.format(para_file), 'r') as fp, open('{}.tok'.\n format(que_file), 'r') as fq, open('{}.pos'.format(para_file), 'r'\n ) as fpp, open('{}.pos'.format(que_file), 'r') as fqp, open(\n '{}.ner'.format(para_file), 'r') as fpn, open('{}.ner'.format(\n que_file), 'r') as fqn, open('{}.label'.format(para_file), 'r'\n ) as fpl:\n while True:\n para, question = fp.readline(), fq.readline()\n pos, que_pos = fpp.readline(), fqp.readline()\n ner, que_ner = fpn.readline(), fqn.readline()\n label = fpl.readline()\n if not question or not para:\n break\n if config.lower_word:\n para = para.lower()\n question = question.lower()\n para_tokens = para.strip().split(' ')\n que_tokens = question.strip().split(' ')\n pos_tags = pos.strip().split(' ')\n ner_tags = ner.strip().split(' ')\n que_pos_tags = que_pos.strip().split(' ')\n que_ner_tags = que_ner.strip().split(' ')\n labels = label.strip().split(' ')\n for token in (para_tokens + que_tokens):\n word_counter[token] += 1\n for char in list(token):\n char_counter[char] += 1\n for pos_tag in (pos_tags + que_pos_tags):\n pos_counter[pos_tag] += 1\n for ner_tag in (ner_tags + que_ner_tags):\n ner_counter[ner_tag] += 1\n for label in labels:\n label_counter[label] += 1\n word_emb_mat, word2idx_dict, unk_num = get_word_embedding(word_counter,\n emb_file=config.glove_word_file, emb_size=config.glove_word_size,\n vocab_size=config.vocab_size_limit, vec_size=config.glove_dim,\n vocab_file=config.vocab_file)\n char_emb_mat, char2idx_dict = get_tag_embedding(char_counter, 'char',\n vec_size=config.char_dim)\n pos_emb_mat, pos2idx_dict = get_tag_embedding(pos_counter, 'pos',\n vec_size=config.pos_dim)\n ner_emb_mat, ner2idx_dict = get_tag_embedding(ner_counter, 'ner',\n vec_size=config.ner_dim)\n label_emb_mat, label2idx_dict = get_tag_embedding(label_counter,\n 'label', vec_size=config.label_dim)\n print('{} out of {} are not in glove'.format(unk_num, len(word2idx_dict)))\n print('{} chars'.format(char_emb_mat.shape[0]))\n print('{} pos tags, {} ner tags, {} answer labels, {} chars'.format(\n pos_emb_mat.shape[0], ner_emb_mat.shape[0], label_emb_mat.shape[0],\n char_emb_mat.shape[0]))\n save(config.word_emb_file, word_emb_mat, message='word embedding')\n save(config.char_emb_file, char_emb_mat, message='char embedding')\n save(config.pos_emb_file, pos_emb_mat, message='pos embedding')\n save(config.ner_emb_file, ner_emb_mat, message='ner embedding')\n save(config.label_emb_file, label_emb_mat, message='label embedding')\n save(config.word_dictionary, word2idx_dict, message='word dictionary')\n save(config.char_dictionary, char2idx_dict, message='char dictionary')\n save(config.pos_dictionary, pos2idx_dict, message='pos dictionary')\n save(config.ner_dictionary, ner2idx_dict, message='ner dictionary')\n save(config.label_dictionary, label2idx_dict, message='label dictionary')\n print('Dump elmo word embedding...')\n token_embedding_file = config.embedding_file\n dump_token_embeddings(config.vocab_file, config.elmo_options_file,\n config.elmo_weight_file, token_embedding_file)\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('../..')\n<mask token>\n\n\ndef process(json_file, outpur_dir, exclude_titles=None, include_titles=None):\n \"\"\"\n :param json_file: original data in json format\n :param outpur_dir: the output directory of pre-processed data\n :param exclude_titles: article titles to exclude\n :param include_titles: article titles to include\n \"\"\"\n para_file = '{}/paras'.format(outpur_dir)\n question_file = '{}/questions'.format(outpur_dir)\n sent_file = '{}/sents'.format(outpur_dir)\n answer_file = '{}/answers'.format(outpur_dir)\n print('Generating {} raw data...'.format(json_file))\n max_sent, max_sent_len, max_que_len, max_ans_len = 0, 0, 0, 0\n with open(json_file, 'r') as fh, corenlp.CoreNLPClient(annotators=\n 'tokenize ssplit pos ner'.split(), endpoint='http://localhost:9099',\n timeout=50000) as client:\n source = json.load(fh)\n for article in tqdm(source['data']):\n title = article['title']\n if include_titles and title not in include_titles:\n continue\n if exclude_titles and title in exclude_titles:\n continue\n for para in article['paragraphs']:\n paragraphs, questions, answers, sents, ids = [], [], [], [], []\n paragraphs_pos, questions_pos, answers_pos, sents_pos = [], [\n ], [], []\n paragraphs_ner, questions_ner, answers_ner, sents_ner = [], [\n ], [], []\n answers_index, sents_index = [], []\n context = para['context']\n if not context.strip():\n continue\n ann_para = client.annotate(context)\n max_sent = max(max_sent, len(ann_para.sentence))\n max_sent_len = max(max_sent_len, max(map(lambda x: len(x.\n token), ann_para.sentence)))\n (ann_para_tokens, paragraph_tokens, paragraph_pos,\n paragraph_ner) = [], [], [], []\n for sent in ann_para.sentence:\n for token in sent.token:\n ann_para_tokens.append(token)\n paragraph_tokens.append(token.word)\n paragraph_pos.append(token.pos)\n paragraph_ner.append(token.ner)\n for qa in para['qas']:\n ques = qa['question']\n id = qa['id']\n if not ques.strip():\n continue\n ann_que = client.annotate(ques)\n max_que_len = max(max_que_len, len(ann_que.sentence[0].\n token))\n question_tokens, question_pos, question_ner = [], [], []\n for sent in ann_que.sentence:\n for token in sent.token:\n question_tokens.append(token.word)\n question_pos.append(token.pos)\n question_ner.append(token.ner)\n (all_answer_tokens, all_answer_pos, all_answer_ner,\n all_answer_index) = [], [], [], []\n (all_sent_tokens, all_sent_pos, all_sent_ner,\n all_sent_index) = [], [], [], []\n for answer in qa['answers']:\n answer_text = answer['text']\n if not answer_text.strip():\n continue\n ann_ans = client.annotate(answer_text)\n answer_tokens, answer_pos, answer_ner = [], [], []\n for sent in ann_ans.sentence:\n for token in sent.token:\n answer_tokens.append(token.word)\n answer_pos.append(token.pos)\n answer_ner.append(token.ner)\n all_answer_tokens.append(' '.join(answer_tokens))\n all_answer_pos.append(' '.join(answer_pos))\n all_answer_ner.append(' '.join(answer_ner))\n answer_start = answer['answer_start']\n answer_end = answer_start + len(answer_text)\n sentence = []\n for sent in ann_para.sentence:\n if (sent.characterOffsetBegin <= answer_start <=\n sent.characterOffsetEnd or sent.\n characterOffsetBegin <= answer_end <= sent.\n characterOffsetEnd):\n sentence.append(sent)\n sentence = [token for sent in sentence for token in\n sent.token]\n sentence_tokens = [token.word for token in sentence]\n sentence_pos = [token.pos for token in sentence]\n sentence_ner = [token.ner for token in sentence]\n all_sent_tokens.append(' '.join(sentence_tokens))\n all_sent_pos.append(' '.join(sentence_pos))\n all_sent_ner.append(' '.join(sentence_ner))\n y1_sent = sentence[0].tokenBeginIndex\n y2_sent = sentence[-1].tokenBeginIndex\n y1_ans = None\n for i, token in enumerate(sentence):\n if (token.beginChar - 1 <= answer_start <=\n token.endChar):\n y1_ans = sentence[0].tokenBeginIndex + i\n try:\n assert y1_ans != None\n except:\n continue\n y2_ans = y1_ans + len(answer_tokens) - 1\n all_answer_index.append('{},{}'.format(y1_ans, y2_ans))\n all_sent_index.append('{},{}'.format(y1_sent, y2_sent))\n paragraphs.append(' '.join(paragraph_tokens))\n paragraphs_pos.append(' '.join(paragraph_pos))\n paragraphs_ner.append(' '.join(paragraph_ner))\n questions.append(' '.join(question_tokens))\n questions_pos.append(' '.join(question_pos))\n questions_ner.append(' '.join(question_ner))\n answers.append('\\t'.join(all_answer_tokens))\n answers_pos.append('\\t'.join(all_answer_pos))\n answers_ner.append('\\t'.join(all_answer_ner))\n answers_index.append('\\t'.join(all_answer_index))\n sents.append('\\t'.join(all_sent_tokens))\n sents_pos.append('\\t'.join(all_sent_pos))\n sents_ner.append('\\t'.join(all_sent_ner))\n sents_index.append('\\t'.join(all_sent_index))\n ids.append(id)\n with open('{}.tok'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs) + '\\n')\n with open('{}.pos'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_pos) + '\\n')\n with open('{}.ner'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_ner) + '\\n')\n with open('{}.id'.format(para_file), 'a') as f:\n f.write('\\n'.join(ids) + '\\n')\n with open('{}.tok'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions) + '\\n')\n with open('{}.pos'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_pos) + '\\n')\n with open('{}.ner'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_ner) + '\\n')\n with open('{}.tok'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers) + '\\n')\n with open('{}.pos'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_pos) + '\\n')\n with open('{}.ner'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_ner) + '\\n')\n with open('{}.index'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_index) + '\\n')\n with open('{}.tok'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents) + '\\n')\n with open('{}.pos'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_pos) + '\\n')\n with open('{}.ner'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_ner) + '\\n')\n with open('{}.index'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_index) + '\\n')\n label(para_file, answer_file)\n\n\ndef label(para_file, answer_file):\n max_node = 0\n with open('{}.tok'.format(para_file), 'r') as fp, open('{}.label'.\n format(para_file), 'a') as fl, open('{}.index'.format(answer_file), 'r'\n ) as fa:\n while True:\n para = fp.readline()\n if not para:\n break\n words = [p for p in para.strip().split(' ')]\n max_node = max(len(words), max_node)\n answer = fa.readline()\n labels = []\n try:\n start, end = map(int, answer.split('\\t')[0].split(','))\n for i in range(len(words)):\n if start <= i <= end:\n if i == start:\n labels.append('B')\n else:\n labels.append('I')\n else:\n labels.append('O')\n except:\n pass\n fl.write(' '.join(labels) + '\\n')\n return max_node\n\n\ndef get_data(train_json, dev_json, test_title_file, output_dir):\n test_titles = open(test_title_file, 'r').readlines()\n test_titles = set([line.strip() for line in test_titles])\n process(train_json, '{}/train/'.format(output_dir), exclude_titles=\n test_titles)\n process(dev_json, '{}/dev/'.format(output_dir))\n process(train_json, '{}/test/'.format(output_dir), include_titles=\n test_titles)\n\n\ndef get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size,\n vocab_file):\n \"\"\"\n get word embedding matrix from glove\n \"\"\"\n print('Generating word embedding...')\n embedding_dict = {}\n with open(emb_file, 'r', encoding='utf-8') as fh:\n for line in tqdm(fh, total=emb_size):\n array = line.split()\n word = ''.join(array[0:-vec_size])\n vector = list(map(float, array[-vec_size:]))\n embedding_dict[word] = vector\n TRANSLATE = {'-lsb-': '[', '-rsb-': ']', '-lrb-': '(', '-rrb-': ')',\n '-lcb-': '{', '-rcb-': '}', '-LSB-': '[', '-RSB-': ']', '-LRB-':\n '(', '-RRB-': ')', '-LCB-': '{', '-RCB-': '}'}\n SPECIAL_TOKENS = ['<NULL>', '<UNK>', '<S>', '</S>']\n words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x:\n x[1], reverse=True)))\n words = SPECIAL_TOKENS + words\n if vocab_size > 0:\n words = words[:vocab_size]\n with open(vocab_file, 'w') as f:\n f.write('\\n'.join(words[1:]))\n embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))\n word2idx_dict = {}\n unknown_count = 0\n for i, word in enumerate(words):\n word2idx_dict[word] = i\n if word in TRANSLATE:\n word = TRANSLATE[word]\n done = False\n for w in (word, word.lower(), word.upper(), word.capitalize()):\n if w in embedding_dict:\n embedding[i] = embedding_dict[w]\n done = True\n break\n if not done:\n unknown_count += 1\n return embedding, word2idx_dict, unknown_count\n\n\ndef get_tag_embedding(counter, data_type, vec_size):\n \"\"\"\n get pos/ner/label tags' embedding matrix\n \"\"\"\n print('Generating {} tag embedding...'.format(data_type))\n SPECIAL_TOKENS = ['<NULL>', '<UNK>']\n tags = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x\n [1], reverse=True)))\n tags = SPECIAL_TOKENS + tags\n embedding = np.random.normal(scale=0.1, size=(len(tags), vec_size))\n word2idx_dict = {w: i for i, w in enumerate(tags)}\n return embedding, word2idx_dict\n\n\ndef get_vocab(config):\n print('Get the vocabulary...')\n word_counter, char_counter = Counter(), Counter()\n pos_counter, ner_counter, label_counter = Counter(), Counter(), Counter()\n files = [(config.train_para_file, config.train_question_file), (config.\n dev_para_file, config.dev_question_file)]\n for para_file, que_file in files:\n with open('{}.tok'.format(para_file), 'r') as fp, open('{}.tok'.\n format(que_file), 'r') as fq, open('{}.pos'.format(para_file), 'r'\n ) as fpp, open('{}.pos'.format(que_file), 'r') as fqp, open(\n '{}.ner'.format(para_file), 'r') as fpn, open('{}.ner'.format(\n que_file), 'r') as fqn, open('{}.label'.format(para_file), 'r'\n ) as fpl:\n while True:\n para, question = fp.readline(), fq.readline()\n pos, que_pos = fpp.readline(), fqp.readline()\n ner, que_ner = fpn.readline(), fqn.readline()\n label = fpl.readline()\n if not question or not para:\n break\n if config.lower_word:\n para = para.lower()\n question = question.lower()\n para_tokens = para.strip().split(' ')\n que_tokens = question.strip().split(' ')\n pos_tags = pos.strip().split(' ')\n ner_tags = ner.strip().split(' ')\n que_pos_tags = que_pos.strip().split(' ')\n que_ner_tags = que_ner.strip().split(' ')\n labels = label.strip().split(' ')\n for token in (para_tokens + que_tokens):\n word_counter[token] += 1\n for char in list(token):\n char_counter[char] += 1\n for pos_tag in (pos_tags + que_pos_tags):\n pos_counter[pos_tag] += 1\n for ner_tag in (ner_tags + que_ner_tags):\n ner_counter[ner_tag] += 1\n for label in labels:\n label_counter[label] += 1\n word_emb_mat, word2idx_dict, unk_num = get_word_embedding(word_counter,\n emb_file=config.glove_word_file, emb_size=config.glove_word_size,\n vocab_size=config.vocab_size_limit, vec_size=config.glove_dim,\n vocab_file=config.vocab_file)\n char_emb_mat, char2idx_dict = get_tag_embedding(char_counter, 'char',\n vec_size=config.char_dim)\n pos_emb_mat, pos2idx_dict = get_tag_embedding(pos_counter, 'pos',\n vec_size=config.pos_dim)\n ner_emb_mat, ner2idx_dict = get_tag_embedding(ner_counter, 'ner',\n vec_size=config.ner_dim)\n label_emb_mat, label2idx_dict = get_tag_embedding(label_counter,\n 'label', vec_size=config.label_dim)\n print('{} out of {} are not in glove'.format(unk_num, len(word2idx_dict)))\n print('{} chars'.format(char_emb_mat.shape[0]))\n print('{} pos tags, {} ner tags, {} answer labels, {} chars'.format(\n pos_emb_mat.shape[0], ner_emb_mat.shape[0], label_emb_mat.shape[0],\n char_emb_mat.shape[0]))\n save(config.word_emb_file, word_emb_mat, message='word embedding')\n save(config.char_emb_file, char_emb_mat, message='char embedding')\n save(config.pos_emb_file, pos_emb_mat, message='pos embedding')\n save(config.ner_emb_file, ner_emb_mat, message='ner embedding')\n save(config.label_emb_file, label_emb_mat, message='label embedding')\n save(config.word_dictionary, word2idx_dict, message='word dictionary')\n save(config.char_dictionary, char2idx_dict, message='char dictionary')\n save(config.pos_dictionary, pos2idx_dict, message='pos dictionary')\n save(config.ner_dictionary, ner2idx_dict, message='ner dictionary')\n save(config.label_dictionary, label2idx_dict, message='label dictionary')\n print('Dump elmo word embedding...')\n token_embedding_file = config.embedding_file\n dump_token_embeddings(config.vocab_file, config.elmo_options_file,\n config.elmo_weight_file, token_embedding_file)\n\n\nif __name__ == '__main__':\n os.system(\n 'mkdir data; mkdir data/processed; mkdir data/processed/train; mkdir data/processed/dev; mkdir data/processed/test'\n )\n get_data('../../LIB/squad/train-v1.1.json',\n '../../LIB/squad/dev-v1.1.json', '../../LIB/squad/doclist-test.txt',\n 'data/processed')\n",
"step-4": "<mask token>\nimport os\nimport corenlp\nimport numpy as np\nimport ujson as json\nfrom tqdm import tqdm\nfrom collections import Counter\nfrom bilm import dump_token_embeddings\nimport sys\nsys.path.append('../..')\nfrom LIB.utils import save\n\n\ndef process(json_file, outpur_dir, exclude_titles=None, include_titles=None):\n \"\"\"\n :param json_file: original data in json format\n :param outpur_dir: the output directory of pre-processed data\n :param exclude_titles: article titles to exclude\n :param include_titles: article titles to include\n \"\"\"\n para_file = '{}/paras'.format(outpur_dir)\n question_file = '{}/questions'.format(outpur_dir)\n sent_file = '{}/sents'.format(outpur_dir)\n answer_file = '{}/answers'.format(outpur_dir)\n print('Generating {} raw data...'.format(json_file))\n max_sent, max_sent_len, max_que_len, max_ans_len = 0, 0, 0, 0\n with open(json_file, 'r') as fh, corenlp.CoreNLPClient(annotators=\n 'tokenize ssplit pos ner'.split(), endpoint='http://localhost:9099',\n timeout=50000) as client:\n source = json.load(fh)\n for article in tqdm(source['data']):\n title = article['title']\n if include_titles and title not in include_titles:\n continue\n if exclude_titles and title in exclude_titles:\n continue\n for para in article['paragraphs']:\n paragraphs, questions, answers, sents, ids = [], [], [], [], []\n paragraphs_pos, questions_pos, answers_pos, sents_pos = [], [\n ], [], []\n paragraphs_ner, questions_ner, answers_ner, sents_ner = [], [\n ], [], []\n answers_index, sents_index = [], []\n context = para['context']\n if not context.strip():\n continue\n ann_para = client.annotate(context)\n max_sent = max(max_sent, len(ann_para.sentence))\n max_sent_len = max(max_sent_len, max(map(lambda x: len(x.\n token), ann_para.sentence)))\n (ann_para_tokens, paragraph_tokens, paragraph_pos,\n paragraph_ner) = [], [], [], []\n for sent in ann_para.sentence:\n for token in sent.token:\n ann_para_tokens.append(token)\n paragraph_tokens.append(token.word)\n paragraph_pos.append(token.pos)\n paragraph_ner.append(token.ner)\n for qa in para['qas']:\n ques = qa['question']\n id = qa['id']\n if not ques.strip():\n continue\n ann_que = client.annotate(ques)\n max_que_len = max(max_que_len, len(ann_que.sentence[0].\n token))\n question_tokens, question_pos, question_ner = [], [], []\n for sent in ann_que.sentence:\n for token in sent.token:\n question_tokens.append(token.word)\n question_pos.append(token.pos)\n question_ner.append(token.ner)\n (all_answer_tokens, all_answer_pos, all_answer_ner,\n all_answer_index) = [], [], [], []\n (all_sent_tokens, all_sent_pos, all_sent_ner,\n all_sent_index) = [], [], [], []\n for answer in qa['answers']:\n answer_text = answer['text']\n if not answer_text.strip():\n continue\n ann_ans = client.annotate(answer_text)\n answer_tokens, answer_pos, answer_ner = [], [], []\n for sent in ann_ans.sentence:\n for token in sent.token:\n answer_tokens.append(token.word)\n answer_pos.append(token.pos)\n answer_ner.append(token.ner)\n all_answer_tokens.append(' '.join(answer_tokens))\n all_answer_pos.append(' '.join(answer_pos))\n all_answer_ner.append(' '.join(answer_ner))\n answer_start = answer['answer_start']\n answer_end = answer_start + len(answer_text)\n sentence = []\n for sent in ann_para.sentence:\n if (sent.characterOffsetBegin <= answer_start <=\n sent.characterOffsetEnd or sent.\n characterOffsetBegin <= answer_end <= sent.\n characterOffsetEnd):\n sentence.append(sent)\n sentence = [token for sent in sentence for token in\n sent.token]\n sentence_tokens = [token.word for token in sentence]\n sentence_pos = [token.pos for token in sentence]\n sentence_ner = [token.ner for token in sentence]\n all_sent_tokens.append(' '.join(sentence_tokens))\n all_sent_pos.append(' '.join(sentence_pos))\n all_sent_ner.append(' '.join(sentence_ner))\n y1_sent = sentence[0].tokenBeginIndex\n y2_sent = sentence[-1].tokenBeginIndex\n y1_ans = None\n for i, token in enumerate(sentence):\n if (token.beginChar - 1 <= answer_start <=\n token.endChar):\n y1_ans = sentence[0].tokenBeginIndex + i\n try:\n assert y1_ans != None\n except:\n continue\n y2_ans = y1_ans + len(answer_tokens) - 1\n all_answer_index.append('{},{}'.format(y1_ans, y2_ans))\n all_sent_index.append('{},{}'.format(y1_sent, y2_sent))\n paragraphs.append(' '.join(paragraph_tokens))\n paragraphs_pos.append(' '.join(paragraph_pos))\n paragraphs_ner.append(' '.join(paragraph_ner))\n questions.append(' '.join(question_tokens))\n questions_pos.append(' '.join(question_pos))\n questions_ner.append(' '.join(question_ner))\n answers.append('\\t'.join(all_answer_tokens))\n answers_pos.append('\\t'.join(all_answer_pos))\n answers_ner.append('\\t'.join(all_answer_ner))\n answers_index.append('\\t'.join(all_answer_index))\n sents.append('\\t'.join(all_sent_tokens))\n sents_pos.append('\\t'.join(all_sent_pos))\n sents_ner.append('\\t'.join(all_sent_ner))\n sents_index.append('\\t'.join(all_sent_index))\n ids.append(id)\n with open('{}.tok'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs) + '\\n')\n with open('{}.pos'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_pos) + '\\n')\n with open('{}.ner'.format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_ner) + '\\n')\n with open('{}.id'.format(para_file), 'a') as f:\n f.write('\\n'.join(ids) + '\\n')\n with open('{}.tok'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions) + '\\n')\n with open('{}.pos'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_pos) + '\\n')\n with open('{}.ner'.format(question_file), 'a') as f:\n f.write('\\n'.join(questions_ner) + '\\n')\n with open('{}.tok'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers) + '\\n')\n with open('{}.pos'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_pos) + '\\n')\n with open('{}.ner'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_ner) + '\\n')\n with open('{}.index'.format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_index) + '\\n')\n with open('{}.tok'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents) + '\\n')\n with open('{}.pos'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_pos) + '\\n')\n with open('{}.ner'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_ner) + '\\n')\n with open('{}.index'.format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_index) + '\\n')\n label(para_file, answer_file)\n\n\ndef label(para_file, answer_file):\n max_node = 0\n with open('{}.tok'.format(para_file), 'r') as fp, open('{}.label'.\n format(para_file), 'a') as fl, open('{}.index'.format(answer_file), 'r'\n ) as fa:\n while True:\n para = fp.readline()\n if not para:\n break\n words = [p for p in para.strip().split(' ')]\n max_node = max(len(words), max_node)\n answer = fa.readline()\n labels = []\n try:\n start, end = map(int, answer.split('\\t')[0].split(','))\n for i in range(len(words)):\n if start <= i <= end:\n if i == start:\n labels.append('B')\n else:\n labels.append('I')\n else:\n labels.append('O')\n except:\n pass\n fl.write(' '.join(labels) + '\\n')\n return max_node\n\n\ndef get_data(train_json, dev_json, test_title_file, output_dir):\n test_titles = open(test_title_file, 'r').readlines()\n test_titles = set([line.strip() for line in test_titles])\n process(train_json, '{}/train/'.format(output_dir), exclude_titles=\n test_titles)\n process(dev_json, '{}/dev/'.format(output_dir))\n process(train_json, '{}/test/'.format(output_dir), include_titles=\n test_titles)\n\n\ndef get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size,\n vocab_file):\n \"\"\"\n get word embedding matrix from glove\n \"\"\"\n print('Generating word embedding...')\n embedding_dict = {}\n with open(emb_file, 'r', encoding='utf-8') as fh:\n for line in tqdm(fh, total=emb_size):\n array = line.split()\n word = ''.join(array[0:-vec_size])\n vector = list(map(float, array[-vec_size:]))\n embedding_dict[word] = vector\n TRANSLATE = {'-lsb-': '[', '-rsb-': ']', '-lrb-': '(', '-rrb-': ')',\n '-lcb-': '{', '-rcb-': '}', '-LSB-': '[', '-RSB-': ']', '-LRB-':\n '(', '-RRB-': ')', '-LCB-': '{', '-RCB-': '}'}\n SPECIAL_TOKENS = ['<NULL>', '<UNK>', '<S>', '</S>']\n words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x:\n x[1], reverse=True)))\n words = SPECIAL_TOKENS + words\n if vocab_size > 0:\n words = words[:vocab_size]\n with open(vocab_file, 'w') as f:\n f.write('\\n'.join(words[1:]))\n embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))\n word2idx_dict = {}\n unknown_count = 0\n for i, word in enumerate(words):\n word2idx_dict[word] = i\n if word in TRANSLATE:\n word = TRANSLATE[word]\n done = False\n for w in (word, word.lower(), word.upper(), word.capitalize()):\n if w in embedding_dict:\n embedding[i] = embedding_dict[w]\n done = True\n break\n if not done:\n unknown_count += 1\n return embedding, word2idx_dict, unknown_count\n\n\ndef get_tag_embedding(counter, data_type, vec_size):\n \"\"\"\n get pos/ner/label tags' embedding matrix\n \"\"\"\n print('Generating {} tag embedding...'.format(data_type))\n SPECIAL_TOKENS = ['<NULL>', '<UNK>']\n tags = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x\n [1], reverse=True)))\n tags = SPECIAL_TOKENS + tags\n embedding = np.random.normal(scale=0.1, size=(len(tags), vec_size))\n word2idx_dict = {w: i for i, w in enumerate(tags)}\n return embedding, word2idx_dict\n\n\ndef get_vocab(config):\n print('Get the vocabulary...')\n word_counter, char_counter = Counter(), Counter()\n pos_counter, ner_counter, label_counter = Counter(), Counter(), Counter()\n files = [(config.train_para_file, config.train_question_file), (config.\n dev_para_file, config.dev_question_file)]\n for para_file, que_file in files:\n with open('{}.tok'.format(para_file), 'r') as fp, open('{}.tok'.\n format(que_file), 'r') as fq, open('{}.pos'.format(para_file), 'r'\n ) as fpp, open('{}.pos'.format(que_file), 'r') as fqp, open(\n '{}.ner'.format(para_file), 'r') as fpn, open('{}.ner'.format(\n que_file), 'r') as fqn, open('{}.label'.format(para_file), 'r'\n ) as fpl:\n while True:\n para, question = fp.readline(), fq.readline()\n pos, que_pos = fpp.readline(), fqp.readline()\n ner, que_ner = fpn.readline(), fqn.readline()\n label = fpl.readline()\n if not question or not para:\n break\n if config.lower_word:\n para = para.lower()\n question = question.lower()\n para_tokens = para.strip().split(' ')\n que_tokens = question.strip().split(' ')\n pos_tags = pos.strip().split(' ')\n ner_tags = ner.strip().split(' ')\n que_pos_tags = que_pos.strip().split(' ')\n que_ner_tags = que_ner.strip().split(' ')\n labels = label.strip().split(' ')\n for token in (para_tokens + que_tokens):\n word_counter[token] += 1\n for char in list(token):\n char_counter[char] += 1\n for pos_tag in (pos_tags + que_pos_tags):\n pos_counter[pos_tag] += 1\n for ner_tag in (ner_tags + que_ner_tags):\n ner_counter[ner_tag] += 1\n for label in labels:\n label_counter[label] += 1\n word_emb_mat, word2idx_dict, unk_num = get_word_embedding(word_counter,\n emb_file=config.glove_word_file, emb_size=config.glove_word_size,\n vocab_size=config.vocab_size_limit, vec_size=config.glove_dim,\n vocab_file=config.vocab_file)\n char_emb_mat, char2idx_dict = get_tag_embedding(char_counter, 'char',\n vec_size=config.char_dim)\n pos_emb_mat, pos2idx_dict = get_tag_embedding(pos_counter, 'pos',\n vec_size=config.pos_dim)\n ner_emb_mat, ner2idx_dict = get_tag_embedding(ner_counter, 'ner',\n vec_size=config.ner_dim)\n label_emb_mat, label2idx_dict = get_tag_embedding(label_counter,\n 'label', vec_size=config.label_dim)\n print('{} out of {} are not in glove'.format(unk_num, len(word2idx_dict)))\n print('{} chars'.format(char_emb_mat.shape[0]))\n print('{} pos tags, {} ner tags, {} answer labels, {} chars'.format(\n pos_emb_mat.shape[0], ner_emb_mat.shape[0], label_emb_mat.shape[0],\n char_emb_mat.shape[0]))\n save(config.word_emb_file, word_emb_mat, message='word embedding')\n save(config.char_emb_file, char_emb_mat, message='char embedding')\n save(config.pos_emb_file, pos_emb_mat, message='pos embedding')\n save(config.ner_emb_file, ner_emb_mat, message='ner embedding')\n save(config.label_emb_file, label_emb_mat, message='label embedding')\n save(config.word_dictionary, word2idx_dict, message='word dictionary')\n save(config.char_dictionary, char2idx_dict, message='char dictionary')\n save(config.pos_dictionary, pos2idx_dict, message='pos dictionary')\n save(config.ner_dictionary, ner2idx_dict, message='ner dictionary')\n save(config.label_dictionary, label2idx_dict, message='label dictionary')\n print('Dump elmo word embedding...')\n token_embedding_file = config.embedding_file\n dump_token_embeddings(config.vocab_file, config.elmo_options_file,\n config.elmo_weight_file, token_embedding_file)\n\n\nif __name__ == '__main__':\n os.system(\n 'mkdir data; mkdir data/processed; mkdir data/processed/train; mkdir data/processed/dev; mkdir data/processed/test'\n )\n get_data('../../LIB/squad/train-v1.1.json',\n '../../LIB/squad/dev-v1.1.json', '../../LIB/squad/doclist-test.txt',\n 'data/processed')\n",
"step-5": "\"\"\"\nData pre-processing\n\"\"\"\nimport os\nimport corenlp\nimport numpy as np\nimport ujson as json\nfrom tqdm import tqdm\nfrom collections import Counter\nfrom bilm import dump_token_embeddings\nimport sys\nsys.path.append('../..')\n\nfrom LIB.utils import save\n\n\ndef process(json_file, outpur_dir, exclude_titles=None, include_titles=None):\n \"\"\"\n :param json_file: original data in json format\n :param outpur_dir: the output directory of pre-processed data\n :param exclude_titles: article titles to exclude\n :param include_titles: article titles to include\n \"\"\"\n para_file = \"{}/paras\".format(outpur_dir)\n question_file = \"{}/questions\".format(outpur_dir)\n sent_file = \"{}/sents\".format(outpur_dir)\n answer_file = \"{}/answers\".format(outpur_dir)\n print(\"Generating {} raw data...\".format(json_file))\n max_sent, max_sent_len, max_que_len, max_ans_len = 0, 0, 0, 0\n with open(json_file, \"r\") as fh, corenlp.CoreNLPClient(annotators=\"tokenize ssplit pos ner\".split(),\n endpoint=\"http://localhost:9099\", timeout=50000) as client:\n source = json.load(fh)\n for article in tqdm(source[\"data\"]):\n title = article[\"title\"]\n if include_titles and title not in include_titles:\n continue\n if exclude_titles and title in exclude_titles:\n continue\n for para in article[\"paragraphs\"]:\n paragraphs, questions, answers, sents, ids = [], [], [], [], []\n paragraphs_pos, questions_pos, answers_pos, sents_pos = [], [], [], []\n paragraphs_ner, questions_ner, answers_ner, sents_ner = [], [], [], []\n answers_index, sents_index = [], []\n # paragraph\n context = para[\"context\"]\n if not context.strip():\n continue\n ann_para = client.annotate(context)\n max_sent = max(max_sent, len(ann_para.sentence))\n max_sent_len = max(max_sent_len, max(map(lambda x: len(x.token), ann_para.sentence)))\n ann_para_tokens, paragraph_tokens, paragraph_pos, paragraph_ner = [], [], [], []\n for sent in ann_para.sentence:\n for token in sent.token:\n ann_para_tokens.append(token)\n paragraph_tokens.append(token.word)\n paragraph_pos.append(token.pos)\n paragraph_ner.append(token.ner)\n\n # questions\n for qa in para[\"qas\"]:\n # question\n ques = qa[\"question\"]\n id = qa[\"id\"]\n if not ques.strip():\n continue\n ann_que = client.annotate(ques)\n max_que_len = max(max_que_len, len(ann_que.sentence[0].token))\n question_tokens, question_pos, question_ner = [], [], []\n for sent in ann_que.sentence:\n for token in sent.token:\n question_tokens.append(token.word)\n question_pos.append(token.pos)\n question_ner.append(token.ner)\n\n # answer\n all_answer_tokens, all_answer_pos, all_answer_ner, all_answer_index = [], [], [], []\n all_sent_tokens, all_sent_pos, all_sent_ner, all_sent_index = [], [], [], []\n for answer in qa[\"answers\"]:\n answer_text = answer[\"text\"]\n if not answer_text.strip():\n continue\n ann_ans = client.annotate(answer_text)\n answer_tokens, answer_pos, answer_ner = [], [], []\n for sent in ann_ans.sentence:\n for token in sent.token:\n answer_tokens.append(token.word)\n answer_pos.append(token.pos)\n answer_ner.append(token.ner)\n all_answer_tokens.append(' '.join(answer_tokens))\n all_answer_pos.append(' '.join(answer_pos))\n all_answer_ner.append(' '.join(answer_ner))\n\n answer_start = answer['answer_start']\n answer_end = answer_start + len(answer_text)\n # sentence\n sentence = []\n for sent in ann_para.sentence:\n if sent.characterOffsetBegin <= answer_start <= sent.characterOffsetEnd or \\\n sent.characterOffsetBegin <= answer_end <= sent.characterOffsetEnd:\n sentence.append(sent)\n sentence = [token for sent in sentence for token in sent.token]\n sentence_tokens = [token.word for token in sentence]\n sentence_pos = [token.pos for token in sentence]\n sentence_ner = [token.ner for token in sentence]\n all_sent_tokens.append(' '.join(sentence_tokens))\n all_sent_pos.append(' '.join(sentence_pos))\n all_sent_ner.append(' '.join(sentence_ner))\n\n # sentence index\n y1_sent = sentence[0].tokenBeginIndex\n y2_sent = sentence[-1].tokenBeginIndex\n # answer index\n y1_ans = None\n for i, token in enumerate(sentence):\n if token.beginChar - 1 <= answer_start <= token.endChar:\n y1_ans = sentence[0].tokenBeginIndex + i\n try:\n assert y1_ans != None\n except:\n continue\n y2_ans = y1_ans + len(answer_tokens) - 1\n all_answer_index.append(\"{},{}\".format(y1_ans, y2_ans))\n all_sent_index.append(\"{},{}\".format(y1_sent, y2_sent))\n\n paragraphs.append(' '.join(paragraph_tokens))\n paragraphs_pos.append(' '.join(paragraph_pos))\n paragraphs_ner.append(' '.join(paragraph_ner))\n questions.append(' '.join(question_tokens))\n questions_pos.append(' '.join(question_pos))\n questions_ner.append(' '.join(question_ner))\n answers.append('\\t'.join(all_answer_tokens))\n answers_pos.append('\\t'.join(all_answer_pos))\n answers_ner.append('\\t'.join(all_answer_ner))\n answers_index.append('\\t'.join(all_answer_index))\n sents.append('\\t'.join(all_sent_tokens))\n sents_pos.append('\\t'.join(all_sent_pos))\n sents_ner.append('\\t'.join(all_sent_ner))\n sents_index.append('\\t'.join(all_sent_index))\n ids.append(id)\n\n # save para\n with open(\"{}.tok\".format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs) + '\\n')\n with open(\"{}.pos\".format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_pos) + '\\n')\n with open(\"{}.ner\".format(para_file), 'a') as f:\n f.write('\\n'.join(paragraphs_ner) + '\\n')\n with open(\"{}.id\".format(para_file), 'a') as f:\n f.write('\\n'.join(ids) + '\\n')\n # save question\n with open(\"{}.tok\".format(question_file), 'a') as f:\n f.write('\\n'.join(questions) + '\\n')\n with open(\"{}.pos\".format(question_file), 'a') as f:\n f.write('\\n'.join(questions_pos) + '\\n')\n with open(\"{}.ner\".format(question_file), 'a') as f:\n f.write('\\n'.join(questions_ner) + '\\n')\n\n # save answer\n with open(\"{}.tok\".format(answer_file), 'a') as f:\n f.write('\\n'.join(answers) + '\\n')\n with open(\"{}.pos\".format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_pos) + '\\n')\n with open(\"{}.ner\".format(answer_file), 'a') as f:\n f.write('\\n'.join(answers_ner) + '\\n')\n with open(\"{}.index\".format(answer_file), 'a') as f:\n f.write(\"\\n\".join(answers_index) + '\\n')\n\n # save sent\n with open(\"{}.tok\".format(sent_file), 'a') as f:\n f.write('\\n'.join(sents) + '\\n')\n with open(\"{}.pos\".format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_pos) + '\\n')\n with open(\"{}.ner\".format(sent_file), 'a') as f:\n f.write('\\n'.join(sents_ner) + '\\n')\n with open(\"{}.index\".format(sent_file), 'a') as f:\n f.write(\"\\n\".join(sents_index) + '\\n')\n # get BIO labels\n label(para_file, answer_file)\n\n\ndef label(para_file, answer_file):\n # get the answer BIO label for paragraph\n max_node = 0\n with open(\"{}.tok\".format(para_file), 'r') as fp, open(\"{}.label\".format(para_file), 'a') as fl, \\\n open(\"{}.index\".format(answer_file), 'r') as fa:\n while True:\n para = fp.readline()\n if not para:\n break\n words = [p for p in para.strip().split(' ')]\n max_node = max(len(words), max_node)\n answer = fa.readline()\n labels = []\n try:\n start, end = map(int, answer.split('\\t')[0].split(','))\n for i in range(len(words)):\n if start <= i <= end:\n # answer words\n if i == start:\n labels.append('B')\n else:\n labels.append('I')\n else:\n # non answer words\n labels.append('O')\n except:\n pass\n fl.write(' '.join(labels) + '\\n')\n return max_node\n\n\ndef get_data(train_json, dev_json, test_title_file, output_dir):\n test_titles = open(test_title_file, 'r').readlines()\n test_titles = set([line.strip() for line in test_titles])\n\n process(train_json, \"{}/train/\".format(output_dir), exclude_titles=test_titles)\n process(dev_json, \"{}/dev/\".format(output_dir))\n process(train_json, \"{}/test/\".format(output_dir), include_titles=test_titles)\n\n\ndef get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size, vocab_file):\n \"\"\"\n get word embedding matrix from glove\n \"\"\"\n print(\"Generating word embedding...\")\n # load word embeddings\n embedding_dict = {}\n with open(emb_file, \"r\", encoding=\"utf-8\") as fh:\n for line in tqdm(fh, total=emb_size):\n array = line.split()\n word = \"\".join(array[0:-vec_size])\n vector = list(map(float, array[-vec_size:]))\n embedding_dict[word] = vector\n\n TRANSLATE = {\n \"-lsb-\": \"[\", \"-rsb-\": \"]\", \"-lrb-\": \"(\", \"-rrb-\": \")\", \"-lcb-\": \"{\",\n \"-rcb-\": \"}\", \"-LSB-\": \"[\", \"-RSB-\": \"]\", \"-LRB-\": \"(\", \"-RRB-\": \")\",\n \"-LCB-\": \"{\", \"-RCB-\": \"}\"\n }\n SPECIAL_TOKENS = [\"<NULL>\", \"<UNK>\", \"<S>\", \"</S>\"]\n words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x[1], reverse=True)))\n words = SPECIAL_TOKENS + words\n if vocab_size > 0:\n words = words[:vocab_size]\n with open(vocab_file, 'w') as f:\n f.write('\\n'.join(words[1:]))\n embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))\n word2idx_dict = {}\n unknown_count = 0\n for i, word in enumerate(words):\n word2idx_dict[word] = i\n if word in TRANSLATE:\n word = TRANSLATE[word]\n done = False\n for w in (word, word.lower(), word.upper(), word.capitalize()):\n if w in embedding_dict:\n embedding[i] = embedding_dict[w]\n done = True\n break\n if not done:\n unknown_count += 1\n return embedding, word2idx_dict, unknown_count\n\n\ndef get_tag_embedding(counter, data_type, vec_size):\n \"\"\"\n get pos/ner/label tags' embedding matrix\n \"\"\"\n print(\"Generating {} tag embedding...\".format(data_type))\n SPECIAL_TOKENS = [\"<NULL>\", \"<UNK>\"]\n tags = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x[1], reverse=True)))\n tags = SPECIAL_TOKENS + tags\n embedding = np.random.normal(scale=0.1, size=(len(tags), vec_size))\n word2idx_dict = {w: i for i, w in enumerate(tags)}\n return embedding, word2idx_dict\n\n\ndef get_vocab(config):\n print(\"Get the vocabulary...\")\n word_counter, char_counter = Counter(), Counter()\n pos_counter, ner_counter, label_counter = Counter(), Counter(), Counter()\n files = [(config.train_para_file, config.train_question_file), (config.dev_para_file, config.dev_question_file)]\n for para_file, que_file in files:\n with open(\"{}.tok\".format(para_file), 'r') as fp, open(\"{}.tok\".format(que_file), 'r') as fq, \\\n open(\"{}.pos\".format(para_file), 'r') as fpp, open(\"{}.pos\".format(que_file), 'r') as fqp, \\\n open(\"{}.ner\".format(para_file), 'r') as fpn, open(\"{}.ner\".format(que_file), 'r') as fqn, \\\n open(\"{}.label\".format(para_file), 'r') as fpl:\n while True:\n para, question = fp.readline(), fq.readline()\n pos, que_pos = fpp.readline(), fqp.readline()\n ner, que_ner = fpn.readline(), fqn.readline()\n label = fpl.readline()\n if not question or not para:\n break\n if config.lower_word:\n para = para.lower()\n question = question.lower()\n para_tokens = para.strip().split(' ')\n que_tokens = question.strip().split(' ')\n pos_tags = pos.strip().split(' ')\n ner_tags = ner.strip().split(' ')\n que_pos_tags = que_pos.strip().split(' ')\n que_ner_tags = que_ner.strip().split(' ')\n labels = label.strip().split(' ')\n for token in para_tokens + que_tokens:\n word_counter[token] += 1\n for char in list(token):\n char_counter[char] += 1\n for pos_tag in pos_tags + que_pos_tags:\n pos_counter[pos_tag] += 1\n for ner_tag in ner_tags + que_ner_tags:\n ner_counter[ner_tag] += 1\n for label in labels:\n label_counter[label] += 1\n word_emb_mat, word2idx_dict, unk_num = get_word_embedding(word_counter, emb_file=config.glove_word_file,\n emb_size=config.glove_word_size,\n vocab_size=config.vocab_size_limit,\n vec_size=config.glove_dim, vocab_file=config.vocab_file)\n char_emb_mat, char2idx_dict = get_tag_embedding(char_counter, \"char\", vec_size=config.char_dim)\n pos_emb_mat, pos2idx_dict = get_tag_embedding(pos_counter, \"pos\", vec_size=config.pos_dim)\n ner_emb_mat, ner2idx_dict = get_tag_embedding(ner_counter, \"ner\", vec_size=config.ner_dim)\n label_emb_mat, label2idx_dict = get_tag_embedding(label_counter, \"label\", vec_size=config.label_dim)\n print(\"{} out of {} are not in glove\".format(unk_num, len(word2idx_dict)))\n print(\"{} chars\".format(char_emb_mat.shape[0]))\n print(\"{} pos tags, {} ner tags, {} answer labels, {} chars\".format(\n pos_emb_mat.shape[0], ner_emb_mat.shape[0], label_emb_mat.shape[0], char_emb_mat.shape[0]))\n save(config.word_emb_file, word_emb_mat, message=\"word embedding\")\n save(config.char_emb_file, char_emb_mat, message=\"char embedding\")\n save(config.pos_emb_file, pos_emb_mat, message=\"pos embedding\")\n save(config.ner_emb_file, ner_emb_mat, message=\"ner embedding\")\n save(config.label_emb_file, label_emb_mat, message=\"label embedding\")\n save(config.word_dictionary, word2idx_dict, message=\"word dictionary\")\n save(config.char_dictionary, char2idx_dict, message=\"char dictionary\")\n save(config.pos_dictionary, pos2idx_dict, message=\"pos dictionary\")\n save(config.ner_dictionary, ner2idx_dict, message=\"ner dictionary\")\n save(config.label_dictionary, label2idx_dict, message=\"label dictionary\")\n print(\"Dump elmo word embedding...\")\n token_embedding_file = config.embedding_file\n dump_token_embeddings(\n config.vocab_file, config.elmo_options_file, config.elmo_weight_file, token_embedding_file\n )\n\n\nif __name__ == '__main__':\n # process data\n os.system(\"mkdir data; mkdir data/processed; mkdir data/processed/train; \"\n \"mkdir data/processed/dev; mkdir data/processed/test\")\n get_data(\"../../LIB/squad/train-v1.1.json\", \"../../LIB/squad/dev-v1.1.json\",\n \"../../LIB/squad/doclist-test.txt\", \"data/processed\")",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import seaborn as sns
tips = sns.load_dataset('iris')
sns.violinplot(x='species', y='sepal_length', data=tips, palette='rainbow')
|
normal
|
{
"blob_id": "274af2a0b758472ca4116f1dfa47069647babf57",
"index": 8543,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsns.violinplot(x='species', y='sepal_length', data=tips, palette='rainbow')\n",
"step-3": "<mask token>\ntips = sns.load_dataset('iris')\nsns.violinplot(x='species', y='sepal_length', data=tips, palette='rainbow')\n",
"step-4": "import seaborn as sns\ntips = sns.load_dataset('iris')\nsns.violinplot(x='species', y='sepal_length', data=tips, palette='rainbow')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import torch
import torch.nn.functional as F
import csv
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden)
self.predict = torch.nn.Linear(n_hidden, n_output)
def forward(self, x):
h1 = F.relu(self.hidden(x))
y = self.predict(h1)
return y
net = Net(n_feature=40, n_hidden=10, n_output=20)
net.load_state_dict(torch.load('net_data_multi.pkl'))
file_test = open('dataset/test_data.csv','r')
line = file_test.readline()
file_out = open('result_multi.csv','w')
file_out.write('caseid,midprice\n')
case = 1
while case < 143:
line = file_test.readline().split(',')
if len(line) < 9:
case += 1
while case <= 153:
x = torch.FloatTensor(40).zero_()
y = torch.FloatTensor(20).zero_()
for ct in range(10):
line = file_test.readline()
if line == '':
break
line = line.split(',')
x[ct*4] = float(line[6])
x[ct*4+1] = float(line[7])/10000
x[ct*4+2] = float(line[8])
x[ct*4+3] = float(line[9])/10000
prediction = net(x)
average = 0
for k in range(10):
average += prediction.data.numpy()[k]
average = 1.0*average/10
file_out.write(str(case)+','+str(average)+'\n')
#print(str(case)+','+str(average)+'\n')
line = file_test.readline()
case += 1
file_test.close()
file_out.close()
print('test complete')
|
normal
|
{
"blob_id": "e221553f866de8b3e175197a40982506bf8c1ef9",
"index": 205,
"step-1": "<mask token>\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden = torch.nn.Linear(n_feature, n_hidden)\n self.predict = torch.nn.Linear(n_hidden, n_output)\n\n def forward(self, x):\n h1 = F.relu(self.hidden(x))\n y = self.predict(h1)\n return y\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden = torch.nn.Linear(n_feature, n_hidden)\n self.predict = torch.nn.Linear(n_hidden, n_output)\n\n def forward(self, x):\n h1 = F.relu(self.hidden(x))\n y = self.predict(h1)\n return y\n\n\n<mask token>\nnet.load_state_dict(torch.load('net_data_multi.pkl'))\n<mask token>\nfile_out.write('caseid,midprice\\n')\n<mask token>\nwhile case < 143:\n line = file_test.readline().split(',')\n if len(line) < 9:\n case += 1\nwhile case <= 153:\n x = torch.FloatTensor(40).zero_()\n y = torch.FloatTensor(20).zero_()\n for ct in range(10):\n line = file_test.readline()\n if line == '':\n break\n line = line.split(',')\n x[ct * 4] = float(line[6])\n x[ct * 4 + 1] = float(line[7]) / 10000\n x[ct * 4 + 2] = float(line[8])\n x[ct * 4 + 3] = float(line[9]) / 10000\n prediction = net(x)\n average = 0\n for k in range(10):\n average += prediction.data.numpy()[k]\n average = 1.0 * average / 10\n file_out.write(str(case) + ',' + str(average) + '\\n')\n line = file_test.readline()\n case += 1\nfile_test.close()\nfile_out.close()\nprint('test complete')\n",
"step-3": "<mask token>\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden = torch.nn.Linear(n_feature, n_hidden)\n self.predict = torch.nn.Linear(n_hidden, n_output)\n\n def forward(self, x):\n h1 = F.relu(self.hidden(x))\n y = self.predict(h1)\n return y\n\n\nnet = Net(n_feature=40, n_hidden=10, n_output=20)\nnet.load_state_dict(torch.load('net_data_multi.pkl'))\nfile_test = open('dataset/test_data.csv', 'r')\nline = file_test.readline()\nfile_out = open('result_multi.csv', 'w')\nfile_out.write('caseid,midprice\\n')\ncase = 1\nwhile case < 143:\n line = file_test.readline().split(',')\n if len(line) < 9:\n case += 1\nwhile case <= 153:\n x = torch.FloatTensor(40).zero_()\n y = torch.FloatTensor(20).zero_()\n for ct in range(10):\n line = file_test.readline()\n if line == '':\n break\n line = line.split(',')\n x[ct * 4] = float(line[6])\n x[ct * 4 + 1] = float(line[7]) / 10000\n x[ct * 4 + 2] = float(line[8])\n x[ct * 4 + 3] = float(line[9]) / 10000\n prediction = net(x)\n average = 0\n for k in range(10):\n average += prediction.data.numpy()[k]\n average = 1.0 * average / 10\n file_out.write(str(case) + ',' + str(average) + '\\n')\n line = file_test.readline()\n case += 1\nfile_test.close()\nfile_out.close()\nprint('test complete')\n",
"step-4": "import torch\nimport torch.nn.functional as F\nimport csv\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden = torch.nn.Linear(n_feature, n_hidden)\n self.predict = torch.nn.Linear(n_hidden, n_output)\n\n def forward(self, x):\n h1 = F.relu(self.hidden(x))\n y = self.predict(h1)\n return y\n\n\nnet = Net(n_feature=40, n_hidden=10, n_output=20)\nnet.load_state_dict(torch.load('net_data_multi.pkl'))\nfile_test = open('dataset/test_data.csv', 'r')\nline = file_test.readline()\nfile_out = open('result_multi.csv', 'w')\nfile_out.write('caseid,midprice\\n')\ncase = 1\nwhile case < 143:\n line = file_test.readline().split(',')\n if len(line) < 9:\n case += 1\nwhile case <= 153:\n x = torch.FloatTensor(40).zero_()\n y = torch.FloatTensor(20).zero_()\n for ct in range(10):\n line = file_test.readline()\n if line == '':\n break\n line = line.split(',')\n x[ct * 4] = float(line[6])\n x[ct * 4 + 1] = float(line[7]) / 10000\n x[ct * 4 + 2] = float(line[8])\n x[ct * 4 + 3] = float(line[9]) / 10000\n prediction = net(x)\n average = 0\n for k in range(10):\n average += prediction.data.numpy()[k]\n average = 1.0 * average / 10\n file_out.write(str(case) + ',' + str(average) + '\\n')\n line = file_test.readline()\n case += 1\nfile_test.close()\nfile_out.close()\nprint('test complete')\n",
"step-5": "import torch\nimport torch.nn.functional as F\nimport csv\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden = torch.nn.Linear(n_feature, n_hidden)\n self.predict = torch.nn.Linear(n_hidden, n_output)\n\n def forward(self, x):\n h1 = F.relu(self.hidden(x))\n y = self.predict(h1) \n return y\n\n\nnet = Net(n_feature=40, n_hidden=10, n_output=20)\n\nnet.load_state_dict(torch.load('net_data_multi.pkl'))\n\nfile_test = open('dataset/test_data.csv','r')\nline = file_test.readline()\n\nfile_out = open('result_multi.csv','w')\nfile_out.write('caseid,midprice\\n')\n\ncase = 1\n\nwhile case < 143:\n line = file_test.readline().split(',')\n if len(line) < 9:\n case += 1\n \nwhile case <= 153:\n x = torch.FloatTensor(40).zero_()\n y = torch.FloatTensor(20).zero_()\n\n for ct in range(10):\n line = file_test.readline()\n if line == '':\n break\n line = line.split(',')\n\n x[ct*4] = float(line[6])\n x[ct*4+1] = float(line[7])/10000\n x[ct*4+2] = float(line[8])\n x[ct*4+3] = float(line[9])/10000\n\n prediction = net(x)\n\n average = 0\n for k in range(10):\n average += prediction.data.numpy()[k]\n average = 1.0*average/10\n\n file_out.write(str(case)+','+str(average)+'\\n')\n #print(str(case)+','+str(average)+'\\n')\n\n line = file_test.readline()\n case += 1\n\nfile_test.close()\nfile_out.close()\nprint('test complete')\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python
from setuptools import setup, find_packages
#if sys.argv[-1] == 'publish':
# os.system('python setup.py sdist upload')
# sys.exit()
with open('bace/__init__.py') as fid:
for line in fid:
if line.startswith('__version__'):
VERSION = line.strip().split()[-1][1:-1]
break
with open('requirements.txt') as fid:
INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
readme = open('README.md').read()
doclink = """
Documentation
-------------
The full documentation is at http://bace.rtfd.org."""
VERSION = '1.0.0'
setup(
name='bace',
version=VERSION,
description='bace',
long_description=readme + '\n\n' + doclink + '\n\n',
author='Krzysztof Joachimiak',
url='https://github.com/krzjoa/bace',
packages=find_packages(where='.', exclude=('tests')),
package_dir={'bace': 'bace'},
include_package_data=True,
install_requires=INSTALL_REQUIRES,
license='MIT',
zip_safe=False,
keywords='bayes',
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: Scientific/Engineering',
],
)
|
normal
|
{
"blob_id": "d28571214805df766c2cc2f45a6b5bea88d7ac18",
"index": 9371,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('bace/__init__.py') as fid:\n for line in fid:\n if line.startswith('__version__'):\n VERSION = line.strip().split()[-1][1:-1]\n break\nwith open('requirements.txt') as fid:\n INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]\n<mask token>\nsetup(name='bace', version=VERSION, description='bace', long_description=\n readme + '\\n\\n' + doclink + '\\n\\n', author='Krzysztof Joachimiak', url=\n 'https://github.com/krzjoa/bace', packages=find_packages(where='.',\n exclude='tests'), package_dir={'bace': 'bace'}, include_package_data=\n True, install_requires=INSTALL_REQUIRES, license='MIT', zip_safe=False,\n keywords='bayes', classifiers=['Programming Language :: Python',\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers', 'Natural Language :: English',\n 'Topic :: Scientific/Engineering'])\n",
"step-3": "<mask token>\nwith open('bace/__init__.py') as fid:\n for line in fid:\n if line.startswith('__version__'):\n VERSION = line.strip().split()[-1][1:-1]\n break\nwith open('requirements.txt') as fid:\n INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]\nreadme = open('README.md').read()\ndoclink = \"\"\"\nDocumentation\n-------------\nThe full documentation is at http://bace.rtfd.org.\"\"\"\nVERSION = '1.0.0'\nsetup(name='bace', version=VERSION, description='bace', long_description=\n readme + '\\n\\n' + doclink + '\\n\\n', author='Krzysztof Joachimiak', url=\n 'https://github.com/krzjoa/bace', packages=find_packages(where='.',\n exclude='tests'), package_dir={'bace': 'bace'}, include_package_data=\n True, install_requires=INSTALL_REQUIRES, license='MIT', zip_safe=False,\n keywords='bayes', classifiers=['Programming Language :: Python',\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers', 'Natural Language :: English',\n 'Topic :: Scientific/Engineering'])\n",
"step-4": "from setuptools import setup, find_packages\nwith open('bace/__init__.py') as fid:\n for line in fid:\n if line.startswith('__version__'):\n VERSION = line.strip().split()[-1][1:-1]\n break\nwith open('requirements.txt') as fid:\n INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]\nreadme = open('README.md').read()\ndoclink = \"\"\"\nDocumentation\n-------------\nThe full documentation is at http://bace.rtfd.org.\"\"\"\nVERSION = '1.0.0'\nsetup(name='bace', version=VERSION, description='bace', long_description=\n readme + '\\n\\n' + doclink + '\\n\\n', author='Krzysztof Joachimiak', url=\n 'https://github.com/krzjoa/bace', packages=find_packages(where='.',\n exclude='tests'), package_dir={'bace': 'bace'}, include_package_data=\n True, install_requires=INSTALL_REQUIRES, license='MIT', zip_safe=False,\n keywords='bayes', classifiers=['Programming Language :: Python',\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers', 'Natural Language :: English',\n 'Topic :: Scientific/Engineering'])\n",
"step-5": "#!/usr/bin/env python\nfrom setuptools import setup, find_packages\n\n#if sys.argv[-1] == 'publish':\n# os.system('python setup.py sdist upload')\n# sys.exit()\n\nwith open('bace/__init__.py') as fid:\n for line in fid:\n if line.startswith('__version__'):\n VERSION = line.strip().split()[-1][1:-1]\n break\n \nwith open('requirements.txt') as fid:\n INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]\n\nreadme = open('README.md').read()\ndoclink = \"\"\"\nDocumentation\n-------------\nThe full documentation is at http://bace.rtfd.org.\"\"\"\n\nVERSION = '1.0.0'\n\nsetup(\n name='bace',\n version=VERSION,\n description='bace',\n long_description=readme + '\\n\\n' + doclink + '\\n\\n',\n author='Krzysztof Joachimiak',\n url='https://github.com/krzjoa/bace',\n packages=find_packages(where='.', exclude=('tests')),\n package_dir={'bace': 'bace'},\n include_package_data=True,\n install_requires=INSTALL_REQUIRES,\n license='MIT',\n zip_safe=False,\n keywords='bayes',\n classifiers=[\n 'Programming Language :: Python',\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Topic :: Scientific/Engineering',\n ],\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from multiprocessing import Process, Queue
def f(q):
for i in range(0,100):
print("come on baby")
q.put([42, None, 'hello'])
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
for j in range(0, 2000):
if j == 1800:
print(q.get())
print(j)
# 특징 main process 와 subprocess 가 각각 실행되다가 1800 에서 subprocess 가 실행될때까지 기다려줌
# 0
# 1
# 2
# 3
# 4
# 5
# 6
# 7
# 8
# 9
# 10
# 11
# 12
# 13
# 14
# 15
# 16
# 17
# ...
# ...
# 1276
# 1277
# 1278
# 1279
# 1280
# 1281
# 1282
# 1283
# 1284
# 1285
# 1286
# 1287
# 1288
# 1289
# 1290
# 1291
# 1292
# 1293
# 1294
# 1295
# come on baby
# 1296
# come on baby
# 1297
# come on baby
# 1298
# come on baby
# 1299
# come on baby
# 1300
# come on baby
# 1301
# come on baby
# 1302
# come on baby
# 1303
# 1304
# come on baby
# 1305
# come on baby
# 1306
# come on baby
# 1307
# come on baby
# 1308
# come on baby
# 1309
# come on baby
# 1310
# come on baby
# 1311
# come on baby
# 1312
# come on baby
# 1313
# come on baby
# 1314
# come on baby
# 1315
# come on baby
# 1316
# come on baby
# 1317
# come on baby
# 1318
# come on baby
# 1319
# come on baby
# 1320
# come on baby
# 1321
# come on baby
# 1322
# come on baby
# 1323
# come on baby
# 1324
# come on baby
# 1325
# come on baby
# 1326
# come on baby
# 1327
# come on baby
# 1328
# come on baby
# 1329
# come on baby
# 1330
# come on baby
# 1331
# come on baby
# 1332
# come on baby
# 1333
# come on baby
# 1334
# come on baby
# 1335
# come on baby
# 1336
# come on baby
# 1337
# come on baby
# 1338
# come on baby
# 1339
# come on baby
# 1340
# come on baby
# 1341
# come on baby
# 1342
# come on baby
# 1343
# come on baby
# 1344
# come on baby
# 1345
# come on baby
# 1346
# come on baby
# 1347
# come on baby
# 1348
# come on baby
# 1349
# come on baby
# 1350
# come on baby
# 1351
# come on baby
# 1352
# come on baby
# 1353
# come on baby
# 1354
# come on baby
# 1355
# come on baby
# 1356
# come on baby
# 1357
# come on baby
# 1358
# come on baby
# 1359
# come on baby
# 1360
# come on baby
# 1361
# come on baby
# 1362
# come on baby
# 1363
# come on baby
# 1364
# come on baby
# 1365
# come on baby
# 1366
# come on baby
# 1367
# come on baby
# 1368
# come on baby
# 1369
# come on baby
# 1370
# come on baby
# 1371
# come on baby
# 1372
# come on baby
# 1373
# come on baby
# 1374
# come on baby
# 1375
# come on baby
# 1376
# come on baby
# 1377
# come on baby
# 1378
# come on baby
# 1379
# come on baby
# 1380
# come on baby
# 1381
# come on baby
# 1382
# come on baby
# 1383
# come on baby
# 1384
# come on baby
# 1385
# come on baby
# 1386
# come on baby
# 1387
# come on baby
# 1388
# come on baby
# 1389
# come on baby
# 1390
# come on baby
# 1391
# come on baby
# 1392
# come on baby
# 1393
# come on baby
# 1394
# come on baby
# 1395
# come on baby
# 1396
# 1397
# 1398
# 1399
# 1400
# 1401
# 1402
# 1403
# 1404
# 1405
# ...
# ...
# 1786
# 1787
# 1788
# 1789
# 1790
# 1791
# 1792
# 1793
# 1794
# 1795
# 1796
# 1797
# 1798
# 1799
# [42, None, 'hello']
# 1800
# 1801
# 1802
# 1803
# 1804
# 1805
# 1806
# 1807
# 1808
# 1809
# ...
# ...
# 1989
# 1990
# 1991
# 1992
# 1993
# 1994
# 1995
# 1996
# 1997
# 1998
# 1999
|
normal
|
{
"blob_id": "c7258d77db2fe6e1470c972ddd94b2ed02f48003",
"index": 3390,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef f(q):\n for i in range(0, 100):\n print('come on baby')\n q.put([42, None, 'hello'])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef f(q):\n for i in range(0, 100):\n print('come on baby')\n q.put([42, None, 'hello'])\n\n\nif __name__ == '__main__':\n q = Queue()\n p = Process(target=f, args=(q,))\n p.start()\n for j in range(0, 2000):\n if j == 1800:\n print(q.get())\n print(j)\n",
"step-4": "from multiprocessing import Process, Queue\n\n\ndef f(q):\n for i in range(0, 100):\n print('come on baby')\n q.put([42, None, 'hello'])\n\n\nif __name__ == '__main__':\n q = Queue()\n p = Process(target=f, args=(q,))\n p.start()\n for j in range(0, 2000):\n if j == 1800:\n print(q.get())\n print(j)\n",
"step-5": "from multiprocessing import Process, Queue\n\ndef f(q):\n for i in range(0,100):\n print(\"come on baby\")\n q.put([42, None, 'hello'])\n\n\nif __name__ == '__main__':\n q = Queue()\n p = Process(target=f, args=(q,))\n p.start()\n for j in range(0, 2000):\n if j == 1800:\n print(q.get())\n print(j)\n\n\n# 특징 main process 와 subprocess 가 각각 실행되다가 1800 에서 subprocess 가 실행될때까지 기다려줌\n\n# 0\n# 1\n# 2\n# 3\n# 4\n# 5\n# 6\n# 7\n# 8\n# 9\n# 10\n# 11\n# 12\n# 13\n# 14\n# 15\n# 16\n# 17\n\n\n# ...\n# ...\n\n\n# 1276\n# 1277\n# 1278\n# 1279\n# 1280\n# 1281\n# 1282\n# 1283\n# 1284\n# 1285\n# 1286\n# 1287\n# 1288\n# 1289\n# 1290\n# 1291\n# 1292\n# 1293\n# 1294\n# 1295\n# come on baby\n# 1296\n# come on baby\n# 1297\n# come on baby\n# 1298\n# come on baby\n# 1299\n# come on baby\n# 1300\n# come on baby\n# 1301\n# come on baby\n# 1302\n# come on baby\n# 1303\n# 1304\n# come on baby\n# 1305\n# come on baby\n# 1306\n# come on baby\n# 1307\n# come on baby\n# 1308\n# come on baby\n# 1309\n# come on baby\n# 1310\n# come on baby\n# 1311\n# come on baby\n# 1312\n# come on baby\n# 1313\n# come on baby\n# 1314\n# come on baby\n# 1315\n# come on baby\n# 1316\n# come on baby\n# 1317\n# come on baby\n# 1318\n# come on baby\n# 1319\n# come on baby\n# 1320\n# come on baby\n# 1321\n# come on baby\n# 1322\n# come on baby\n# 1323\n# come on baby\n# 1324\n# come on baby\n# 1325\n# come on baby\n# 1326\n# come on baby\n# 1327\n# come on baby\n# 1328\n# come on baby\n# 1329\n# come on baby\n# 1330\n# come on baby\n# 1331\n# come on baby\n# 1332\n# come on baby\n# 1333\n# come on baby\n# 1334\n# come on baby\n# 1335\n# come on baby\n# 1336\n# come on baby\n# 1337\n# come on baby\n# 1338\n# come on baby\n# 1339\n# come on baby\n# 1340\n# come on baby\n# 1341\n# come on baby\n# 1342\n# come on baby\n# 1343\n# come on baby\n# 1344\n# come on baby\n# 1345\n# come on baby\n# 1346\n# come on baby\n# 1347\n# come on baby\n# 1348\n# come on baby\n# 1349\n# come on baby\n# 1350\n# come on baby\n# 1351\n# come on baby\n# 1352\n# come on baby\n# 1353\n# come on baby\n# 1354\n# come on baby\n# 1355\n# come on baby\n# 1356\n# come on baby\n# 1357\n# come on baby\n# 1358\n# come on baby\n# 1359\n# come on baby\n# 1360\n# come on baby\n# 1361\n# come on baby\n# 1362\n# come on baby\n# 1363\n# come on baby\n# 1364\n# come on baby\n# 1365\n# come on baby\n# 1366\n# come on baby\n# 1367\n# come on baby\n# 1368\n# come on baby\n# 1369\n# come on baby\n# 1370\n# come on baby\n# 1371\n# come on baby\n# 1372\n# come on baby\n# 1373\n# come on baby\n# 1374\n# come on baby\n# 1375\n# come on baby\n# 1376\n# come on baby\n# 1377\n# come on baby\n# 1378\n# come on baby\n# 1379\n# come on baby\n# 1380\n# come on baby\n# 1381\n# come on baby\n# 1382\n# come on baby\n# 1383\n# come on baby\n# 1384\n# come on baby\n# 1385\n# come on baby\n# 1386\n# come on baby\n# 1387\n# come on baby\n# 1388\n# come on baby\n# 1389\n# come on baby\n# 1390\n# come on baby\n# 1391\n# come on baby\n# 1392\n# come on baby\n# 1393\n# come on baby\n# 1394\n# come on baby\n# 1395\n# come on baby\n# 1396\n# 1397\n# 1398\n# 1399\n# 1400\n# 1401\n# 1402\n# 1403\n# 1404\n# 1405\n\n\n# ...\n# ...\n\n\n# 1786\n# 1787\n# 1788\n# 1789\n# 1790\n# 1791\n# 1792\n# 1793\n# 1794\n# 1795\n# 1796\n# 1797\n# 1798\n# 1799\n# [42, None, 'hello']\n# 1800\n# 1801\n# 1802\n# 1803\n# 1804\n# 1805\n# 1806\n# 1807\n# 1808\n# 1809\n\n\n# ...\n# ...\n\n\n# 1989\n# 1990\n# 1991\n# 1992\n# 1993\n# 1994\n# 1995\n# 1996\n# 1997\n# 1998\n# 1999\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import pprint
import math
import sys
import datetime as dt
from pathlib import Path
import RotateCipher
import ShiftCipher
import TranspositionCipher
def process_textfile(
string_path: str,
encryption_algorithm: str,
algorithm_key: float,
output_folderpath: str = str(
Path(os.path.expandvars("$HOME")).anchor
) + r"/EncryptDecrypt/",
output_filename: str = r"EncryptDecrypt.txt",
to_decrypt=False,
**kwargs
):
encryption_algorithm = encryption_algorithm.lower()
available_algorithms = ["rotate", "transposition"]
if encryption_algorithm not in available_algorithms:
pprint.pprint(
["Enter an algorithm from the list. Not case-sensitive.",
available_algorithms]
)
return None
# A single dictionary may be passed as a **kwarg if it is the
# ONLY KEY-WORD ARGUMENT. Else, error is thrown.
lst_kwargs = list(kwargs.values())
if len(lst_kwargs) == 1 and (isinstance(lst_kwargs[0], dict)):
kwargs = lst_kwargs[0]
# Key in **kwargs overwrites `algorithm_key` function parameter.
if "algorithm_key" in kwargs:
algorithm_key = float(kwargs["algorithm_key"])
# Convert strings saying "True" or "False" to booleans.
for key, value in kwargs.items():
str_value = str(value)
if str_value.lower() == "False":
kwargs[key] = False
elif str_value.lower() == "True":
kwargs[key] = True
output_filename = ('/' + output_filename)
if not (output_filename.endswith(".txt")):
output_filename += ".txt"
full_outputpath = output_folderpath + output_filename
path_input = Path(string_path)
# fileobj_target = open(path_input, 'r') # Only for Python 3.6 and later.
fileobj_target = open(str(path_input), 'r')
lst_input = fileobj_target.readlines()
# str_input = '\n'.join(lst_input)
str_input = "".join(lst_input)
output_string = "None"
print(
"""Started processing.
Key-word arguments for %s algorithm:""" % encryption_algorithm
)
pprint.pprint(kwargs)
if (encryption_algorithm == "transposition") and to_decrypt is True:
output_string = ''.join(
TranspositionCipher.decrypt_transposition(
str_input, int(algorithm_key)
)
)
elif encryption_algorithm == "transposition" and not to_decrypt:
output_string = ''.join(
TranspositionCipher.encrypt_transposition(
str_input, int(algorithm_key)
)
)
elif encryption_algorithm == "rotate":
warning = """
When the algorithm is set to rotate, the "to_decrypt" parameter
is ignored. To decrypt, set the key-word argument shift left
so that it reverses the shift direction during encryption.
Ex: If the text was shifted left, i.e. values were swapped
with those "higher" up on the list read from left to right, pass
the key-word argument shift_left=False to decrypt.
RotateCipher's methods can return a list. However, it is
forced to always return a string. Passing return_list=True as
a key-word argument will have no effect. The argument is not
passed to RotateCipher.
"""
# pprint.pprint(warning) # Included literl \n and single quotes.
print(warning)
to_shiftleft = True
if "shift_left" in kwargs:
to_shiftleft = kwargs["shift_left"]
process_numbers = False
if "shift_numbers" in kwargs:
process_numbers = kwargs["shift_numbers"]
output_string = RotateCipher.rot13_e(
string=str_input,
shift_left=to_shiftleft,
rotations=int(algorithm_key),
# return_list=kwargs["return_list"], # Removed for safety.
shift_numbers=process_numbers
)
if not (os.path.exists(output_folderpath)):
os.mkdir(output_folderpath)
fileobj_output = open(
full_outputpath,
'a' # Create a file and open it for writing. Append if exists.
)
fileobj_output.write(
"\n=====\nEncryptDecrypt Output on\n%s\n=====\n" %
dt.datetime.now()
)
fileobj_output.write(output_string)
fileobj_output.close()
print("Done processing. Output folder:\n{}".format(
Path(full_outputpath)
)
)
return {
"output_file": Path(full_outputpath).resolve(),
"output_text": output_string
}
def manual_test():
dict_processedtext = process_textfile(
string_path=r"C:\Users\Rives\Downloads\Quizzes\Quiz 0 Overwrite Number 1.txt",
encryption_algorithm="rotate",
algorithm_key=1,
shift_left=True
)
print("Encrypt ROT1 with default values.")
# pprint.pprint(
# dict_processedtext
# )
print(dict_processedtext["output_file"])
dict_processedtext2 = process_textfile(
string_path=dict_processedtext["output_file"],
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Decryptions",
output_filename="Quiz 0 Overwrite Number 1 Decrypted",
shift_left=False
)
print("Decrypt ROT1 with all values user-supplied.")
print(dict_processedtext["output_file"])
for i in range(2):
dict_processedtext3a = process_textfile(
string_path=r"C:\Users\Rives\Downloads\Quizzes\Quiz 0 Overwrite Number 2.txt",
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Encryptions"
)
print(dict_processedtext3a["output_file"])
dict_processedtext3b = process_textfile(
string_path=dict_processedtext3a["output_file"],
encryption_algorithm="rotate",
algorithm_key=1,
output_folderpath=r"C:\Users\Rives\Downloads\Decryptions",
output_filename="Quiz 0 Overwrite Number 2 Decrypted",
shift_left=False
)
print(dict_processedtext3b["output_file"])
return None
def main():
while True:
print("Press Enter or New Line to skip entering any input.\t")
task = input("Encrypt or decrypt? Encrypts by default. Press E/D.\t")
algo = input("Algorithm? Uses Rotate by default.\t")
algorithm_key = float(input("Key? Uses 1 by default.\t"))
input_filepath = input(
"""Mandatory / Required.
Full path of target file. Includes file name and extension.\n""")
output_folder = input(
"Optional. Give the path of the output folder.\n"
)
output_file = input(
"Optional. Default output file name is EncryptDecrypt.txt.\n")
keyword_arguments = input(
"""Last question. Depends on algorithm.
Format: "key=value,key2,value2,...".
Use comma with no space as separator for two or more items.\n"""
)
while len(input_filepath) == 0:
input_filepath = input(
"""Mandatory / Required.
Full path of target file.
Includes file name and extension.\n"""
)
dict_kwargs = dict()
for pair in keyword_arguments.split(','):
try:
key, pair = tuple(pair.split('='))
dict_kwargs[key] = pair
except ValueError:
break
to_decrypt = False
if task.lower().startswith('d'):
to_decrypt = True
if len(output_folder) == 0:
output_folder = str(Path.cwd().parent / r"/EncryptDecrypt/")
if len(output_file) == 0:
output_file = "EncryptDecrypt.txt"
if len(algo) == 0:
algo = "rotate"
pprint.pprint(
process_textfile(
string_path=input_filepath,
encryption_algorithm=algo,
algorithm_key=algorithm_key,
output_folderpath=output_folder,
output_filename=output_file,
to_decrypt=to_decrypt,
kwargs_dict=dict_kwargs
)
)
print(
"""Done Running.
Press Q to quit, any other key to process another file.""")
to_quit = input()
if to_quit.lower().startswith("q"):
sys.exit()
else:
continue
# manual_test()
return None
if __name__ == "__main__":
main()
"""
Notes:
*
The declared parameter data types in python functions are not enforced as of
version 3.4.
*
For some reason, even if the name "key" was a parameter for process_textfile,
it was being passed to rot13_e as a string. In the function process_textfile,
Visual Basic also listed "key" as a string when passed to rot13_e even though
the function definition specified its data type as a float and the user input
for "key" was also converted to a float in the main function. This was caused
by a for-loop. When VS Code followed the definition of key (F12) when it
was passed to rot13_e, VS Code pointed to the temporary variable "key" in a
for-loop. The parameter name was changed as a quick fix.
- Adding an else clause to the for-loop did not fix it.
- The for-loop declaration was funciton-level code while the call to rot13_e
that bugged was inside an else-clause. The else-clause holding the call to
rot13_e was also function-level, same as the for-loop declaration. The call
to RotateCipher.rot13_e was assigned to output_string.
"""
|
normal
|
{
"blob_id": "5dccd015a90927e8d2a9c0ea4b11b24bfd4bb65e",
"index": 5690,
"step-1": "<mask token>\n\n\ndef manual_test():\n dict_processedtext = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 1.txt',\n encryption_algorithm='rotate', algorithm_key=1, shift_left=True)\n print('Encrypt ROT1 with default values.')\n print(dict_processedtext['output_file'])\n dict_processedtext2 = process_textfile(string_path=dict_processedtext[\n 'output_file'], encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions',\n output_filename='Quiz 0 Overwrite Number 1 Decrypted', shift_left=False\n )\n print('Decrypt ROT1 with all values user-supplied.')\n print(dict_processedtext['output_file'])\n for i in range(2):\n dict_processedtext3a = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 2.txt'\n , encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Encryptions')\n print(dict_processedtext3a['output_file'])\n dict_processedtext3b = process_textfile(string_path=\n dict_processedtext3a['output_file'], encryption_algorithm=\n 'rotate', algorithm_key=1, output_folderpath=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions', output_filename=\n 'Quiz 0 Overwrite Number 2 Decrypted', shift_left=False)\n print(dict_processedtext3b['output_file'])\n return None\n\n\ndef main():\n while True:\n print('Press Enter or New Line to skip entering any input.\\t')\n task = input('Encrypt or decrypt? Encrypts by default. Press E/D.\\t')\n algo = input('Algorithm? Uses Rotate by default.\\t')\n algorithm_key = float(input('Key? Uses 1 by default.\\t'))\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file. Includes file name and extension.\n\"\"\"\n )\n output_folder = input('Optional. Give the path of the output folder.\\n'\n )\n output_file = input(\n 'Optional. Default output file name is EncryptDecrypt.txt.\\n')\n keyword_arguments = input(\n \"\"\"Last question. Depends on algorithm.\n Format: \"key=value,key2,value2,...\".\n Use comma with no space as separator for two or more items.\n\"\"\"\n )\n while len(input_filepath) == 0:\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file.\n Includes file name and extension.\n\"\"\"\n )\n dict_kwargs = dict()\n for pair in keyword_arguments.split(','):\n try:\n key, pair = tuple(pair.split('='))\n dict_kwargs[key] = pair\n except ValueError:\n break\n to_decrypt = False\n if task.lower().startswith('d'):\n to_decrypt = True\n if len(output_folder) == 0:\n output_folder = str(Path.cwd().parent / '/EncryptDecrypt/')\n if len(output_file) == 0:\n output_file = 'EncryptDecrypt.txt'\n if len(algo) == 0:\n algo = 'rotate'\n pprint.pprint(process_textfile(string_path=input_filepath,\n encryption_algorithm=algo, algorithm_key=algorithm_key,\n output_folderpath=output_folder, output_filename=output_file,\n to_decrypt=to_decrypt, kwargs_dict=dict_kwargs))\n print(\n 'Done Running.\\n Press Q to quit, any other key to process another file.'\n )\n to_quit = input()\n if to_quit.lower().startswith('q'):\n sys.exit()\n else:\n continue\n return None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef process_textfile(string_path: str, encryption_algorithm: str,\n algorithm_key: float, output_folderpath: str=str(Path(os.path.\n expandvars('$HOME')).anchor) + '/EncryptDecrypt/', output_filename: str\n ='EncryptDecrypt.txt', to_decrypt=False, **kwargs):\n encryption_algorithm = encryption_algorithm.lower()\n available_algorithms = ['rotate', 'transposition']\n if encryption_algorithm not in available_algorithms:\n pprint.pprint([\n 'Enter an algorithm from the list. Not case-sensitive.',\n available_algorithms])\n return None\n lst_kwargs = list(kwargs.values())\n if len(lst_kwargs) == 1 and isinstance(lst_kwargs[0], dict):\n kwargs = lst_kwargs[0]\n if 'algorithm_key' in kwargs:\n algorithm_key = float(kwargs['algorithm_key'])\n for key, value in kwargs.items():\n str_value = str(value)\n if str_value.lower() == 'False':\n kwargs[key] = False\n elif str_value.lower() == 'True':\n kwargs[key] = True\n output_filename = '/' + output_filename\n if not output_filename.endswith('.txt'):\n output_filename += '.txt'\n full_outputpath = output_folderpath + output_filename\n path_input = Path(string_path)\n fileobj_target = open(str(path_input), 'r')\n lst_input = fileobj_target.readlines()\n str_input = ''.join(lst_input)\n output_string = 'None'\n print(\n \"\"\"Started processing.\n Key-word arguments for %s algorithm:\"\"\" %\n encryption_algorithm)\n pprint.pprint(kwargs)\n if encryption_algorithm == 'transposition' and to_decrypt is True:\n output_string = ''.join(TranspositionCipher.decrypt_transposition(\n str_input, int(algorithm_key)))\n elif encryption_algorithm == 'transposition' and not to_decrypt:\n output_string = ''.join(TranspositionCipher.encrypt_transposition(\n str_input, int(algorithm_key)))\n elif encryption_algorithm == 'rotate':\n warning = \"\"\"\n When the algorithm is set to rotate, the \"to_decrypt\" parameter\n is ignored. To decrypt, set the key-word argument shift left\n so that it reverses the shift direction during encryption.\n Ex: If the text was shifted left, i.e. values were swapped\n with those \"higher\" up on the list read from left to right, pass\n the key-word argument shift_left=False to decrypt.\n\n RotateCipher's methods can return a list. However, it is\n forced to always return a string. Passing return_list=True as\n a key-word argument will have no effect. The argument is not\n passed to RotateCipher.\n \"\"\"\n print(warning)\n to_shiftleft = True\n if 'shift_left' in kwargs:\n to_shiftleft = kwargs['shift_left']\n process_numbers = False\n if 'shift_numbers' in kwargs:\n process_numbers = kwargs['shift_numbers']\n output_string = RotateCipher.rot13_e(string=str_input, shift_left=\n to_shiftleft, rotations=int(algorithm_key), shift_numbers=\n process_numbers)\n if not os.path.exists(output_folderpath):\n os.mkdir(output_folderpath)\n fileobj_output = open(full_outputpath, 'a')\n fileobj_output.write('\\n=====\\nEncryptDecrypt Output on\\n%s\\n=====\\n' %\n dt.datetime.now())\n fileobj_output.write(output_string)\n fileobj_output.close()\n print('Done processing. Output folder:\\n{}'.format(Path(full_outputpath)))\n return {'output_file': Path(full_outputpath).resolve(), 'output_text':\n output_string}\n\n\ndef manual_test():\n dict_processedtext = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 1.txt',\n encryption_algorithm='rotate', algorithm_key=1, shift_left=True)\n print('Encrypt ROT1 with default values.')\n print(dict_processedtext['output_file'])\n dict_processedtext2 = process_textfile(string_path=dict_processedtext[\n 'output_file'], encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions',\n output_filename='Quiz 0 Overwrite Number 1 Decrypted', shift_left=False\n )\n print('Decrypt ROT1 with all values user-supplied.')\n print(dict_processedtext['output_file'])\n for i in range(2):\n dict_processedtext3a = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 2.txt'\n , encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Encryptions')\n print(dict_processedtext3a['output_file'])\n dict_processedtext3b = process_textfile(string_path=\n dict_processedtext3a['output_file'], encryption_algorithm=\n 'rotate', algorithm_key=1, output_folderpath=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions', output_filename=\n 'Quiz 0 Overwrite Number 2 Decrypted', shift_left=False)\n print(dict_processedtext3b['output_file'])\n return None\n\n\ndef main():\n while True:\n print('Press Enter or New Line to skip entering any input.\\t')\n task = input('Encrypt or decrypt? Encrypts by default. Press E/D.\\t')\n algo = input('Algorithm? Uses Rotate by default.\\t')\n algorithm_key = float(input('Key? Uses 1 by default.\\t'))\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file. Includes file name and extension.\n\"\"\"\n )\n output_folder = input('Optional. Give the path of the output folder.\\n'\n )\n output_file = input(\n 'Optional. Default output file name is EncryptDecrypt.txt.\\n')\n keyword_arguments = input(\n \"\"\"Last question. Depends on algorithm.\n Format: \"key=value,key2,value2,...\".\n Use comma with no space as separator for two or more items.\n\"\"\"\n )\n while len(input_filepath) == 0:\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file.\n Includes file name and extension.\n\"\"\"\n )\n dict_kwargs = dict()\n for pair in keyword_arguments.split(','):\n try:\n key, pair = tuple(pair.split('='))\n dict_kwargs[key] = pair\n except ValueError:\n break\n to_decrypt = False\n if task.lower().startswith('d'):\n to_decrypt = True\n if len(output_folder) == 0:\n output_folder = str(Path.cwd().parent / '/EncryptDecrypt/')\n if len(output_file) == 0:\n output_file = 'EncryptDecrypt.txt'\n if len(algo) == 0:\n algo = 'rotate'\n pprint.pprint(process_textfile(string_path=input_filepath,\n encryption_algorithm=algo, algorithm_key=algorithm_key,\n output_folderpath=output_folder, output_filename=output_file,\n to_decrypt=to_decrypt, kwargs_dict=dict_kwargs))\n print(\n 'Done Running.\\n Press Q to quit, any other key to process another file.'\n )\n to_quit = input()\n if to_quit.lower().startswith('q'):\n sys.exit()\n else:\n continue\n return None\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef process_textfile(string_path: str, encryption_algorithm: str,\n algorithm_key: float, output_folderpath: str=str(Path(os.path.\n expandvars('$HOME')).anchor) + '/EncryptDecrypt/', output_filename: str\n ='EncryptDecrypt.txt', to_decrypt=False, **kwargs):\n encryption_algorithm = encryption_algorithm.lower()\n available_algorithms = ['rotate', 'transposition']\n if encryption_algorithm not in available_algorithms:\n pprint.pprint([\n 'Enter an algorithm from the list. Not case-sensitive.',\n available_algorithms])\n return None\n lst_kwargs = list(kwargs.values())\n if len(lst_kwargs) == 1 and isinstance(lst_kwargs[0], dict):\n kwargs = lst_kwargs[0]\n if 'algorithm_key' in kwargs:\n algorithm_key = float(kwargs['algorithm_key'])\n for key, value in kwargs.items():\n str_value = str(value)\n if str_value.lower() == 'False':\n kwargs[key] = False\n elif str_value.lower() == 'True':\n kwargs[key] = True\n output_filename = '/' + output_filename\n if not output_filename.endswith('.txt'):\n output_filename += '.txt'\n full_outputpath = output_folderpath + output_filename\n path_input = Path(string_path)\n fileobj_target = open(str(path_input), 'r')\n lst_input = fileobj_target.readlines()\n str_input = ''.join(lst_input)\n output_string = 'None'\n print(\n \"\"\"Started processing.\n Key-word arguments for %s algorithm:\"\"\" %\n encryption_algorithm)\n pprint.pprint(kwargs)\n if encryption_algorithm == 'transposition' and to_decrypt is True:\n output_string = ''.join(TranspositionCipher.decrypt_transposition(\n str_input, int(algorithm_key)))\n elif encryption_algorithm == 'transposition' and not to_decrypt:\n output_string = ''.join(TranspositionCipher.encrypt_transposition(\n str_input, int(algorithm_key)))\n elif encryption_algorithm == 'rotate':\n warning = \"\"\"\n When the algorithm is set to rotate, the \"to_decrypt\" parameter\n is ignored. To decrypt, set the key-word argument shift left\n so that it reverses the shift direction during encryption.\n Ex: If the text was shifted left, i.e. values were swapped\n with those \"higher\" up on the list read from left to right, pass\n the key-word argument shift_left=False to decrypt.\n\n RotateCipher's methods can return a list. However, it is\n forced to always return a string. Passing return_list=True as\n a key-word argument will have no effect. The argument is not\n passed to RotateCipher.\n \"\"\"\n print(warning)\n to_shiftleft = True\n if 'shift_left' in kwargs:\n to_shiftleft = kwargs['shift_left']\n process_numbers = False\n if 'shift_numbers' in kwargs:\n process_numbers = kwargs['shift_numbers']\n output_string = RotateCipher.rot13_e(string=str_input, shift_left=\n to_shiftleft, rotations=int(algorithm_key), shift_numbers=\n process_numbers)\n if not os.path.exists(output_folderpath):\n os.mkdir(output_folderpath)\n fileobj_output = open(full_outputpath, 'a')\n fileobj_output.write('\\n=====\\nEncryptDecrypt Output on\\n%s\\n=====\\n' %\n dt.datetime.now())\n fileobj_output.write(output_string)\n fileobj_output.close()\n print('Done processing. Output folder:\\n{}'.format(Path(full_outputpath)))\n return {'output_file': Path(full_outputpath).resolve(), 'output_text':\n output_string}\n\n\ndef manual_test():\n dict_processedtext = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 1.txt',\n encryption_algorithm='rotate', algorithm_key=1, shift_left=True)\n print('Encrypt ROT1 with default values.')\n print(dict_processedtext['output_file'])\n dict_processedtext2 = process_textfile(string_path=dict_processedtext[\n 'output_file'], encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions',\n output_filename='Quiz 0 Overwrite Number 1 Decrypted', shift_left=False\n )\n print('Decrypt ROT1 with all values user-supplied.')\n print(dict_processedtext['output_file'])\n for i in range(2):\n dict_processedtext3a = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 2.txt'\n , encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Encryptions')\n print(dict_processedtext3a['output_file'])\n dict_processedtext3b = process_textfile(string_path=\n dict_processedtext3a['output_file'], encryption_algorithm=\n 'rotate', algorithm_key=1, output_folderpath=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions', output_filename=\n 'Quiz 0 Overwrite Number 2 Decrypted', shift_left=False)\n print(dict_processedtext3b['output_file'])\n return None\n\n\ndef main():\n while True:\n print('Press Enter or New Line to skip entering any input.\\t')\n task = input('Encrypt or decrypt? Encrypts by default. Press E/D.\\t')\n algo = input('Algorithm? Uses Rotate by default.\\t')\n algorithm_key = float(input('Key? Uses 1 by default.\\t'))\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file. Includes file name and extension.\n\"\"\"\n )\n output_folder = input('Optional. Give the path of the output folder.\\n'\n )\n output_file = input(\n 'Optional. Default output file name is EncryptDecrypt.txt.\\n')\n keyword_arguments = input(\n \"\"\"Last question. Depends on algorithm.\n Format: \"key=value,key2,value2,...\".\n Use comma with no space as separator for two or more items.\n\"\"\"\n )\n while len(input_filepath) == 0:\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file.\n Includes file name and extension.\n\"\"\"\n )\n dict_kwargs = dict()\n for pair in keyword_arguments.split(','):\n try:\n key, pair = tuple(pair.split('='))\n dict_kwargs[key] = pair\n except ValueError:\n break\n to_decrypt = False\n if task.lower().startswith('d'):\n to_decrypt = True\n if len(output_folder) == 0:\n output_folder = str(Path.cwd().parent / '/EncryptDecrypt/')\n if len(output_file) == 0:\n output_file = 'EncryptDecrypt.txt'\n if len(algo) == 0:\n algo = 'rotate'\n pprint.pprint(process_textfile(string_path=input_filepath,\n encryption_algorithm=algo, algorithm_key=algorithm_key,\n output_folderpath=output_folder, output_filename=output_file,\n to_decrypt=to_decrypt, kwargs_dict=dict_kwargs))\n print(\n 'Done Running.\\n Press Q to quit, any other key to process another file.'\n )\n to_quit = input()\n if to_quit.lower().startswith('q'):\n sys.exit()\n else:\n continue\n return None\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-4": "import os\nimport pprint\nimport math\nimport sys\nimport datetime as dt\nfrom pathlib import Path\nimport RotateCipher\nimport ShiftCipher\nimport TranspositionCipher\n\n\ndef process_textfile(string_path: str, encryption_algorithm: str,\n algorithm_key: float, output_folderpath: str=str(Path(os.path.\n expandvars('$HOME')).anchor) + '/EncryptDecrypt/', output_filename: str\n ='EncryptDecrypt.txt', to_decrypt=False, **kwargs):\n encryption_algorithm = encryption_algorithm.lower()\n available_algorithms = ['rotate', 'transposition']\n if encryption_algorithm not in available_algorithms:\n pprint.pprint([\n 'Enter an algorithm from the list. Not case-sensitive.',\n available_algorithms])\n return None\n lst_kwargs = list(kwargs.values())\n if len(lst_kwargs) == 1 and isinstance(lst_kwargs[0], dict):\n kwargs = lst_kwargs[0]\n if 'algorithm_key' in kwargs:\n algorithm_key = float(kwargs['algorithm_key'])\n for key, value in kwargs.items():\n str_value = str(value)\n if str_value.lower() == 'False':\n kwargs[key] = False\n elif str_value.lower() == 'True':\n kwargs[key] = True\n output_filename = '/' + output_filename\n if not output_filename.endswith('.txt'):\n output_filename += '.txt'\n full_outputpath = output_folderpath + output_filename\n path_input = Path(string_path)\n fileobj_target = open(str(path_input), 'r')\n lst_input = fileobj_target.readlines()\n str_input = ''.join(lst_input)\n output_string = 'None'\n print(\n \"\"\"Started processing.\n Key-word arguments for %s algorithm:\"\"\" %\n encryption_algorithm)\n pprint.pprint(kwargs)\n if encryption_algorithm == 'transposition' and to_decrypt is True:\n output_string = ''.join(TranspositionCipher.decrypt_transposition(\n str_input, int(algorithm_key)))\n elif encryption_algorithm == 'transposition' and not to_decrypt:\n output_string = ''.join(TranspositionCipher.encrypt_transposition(\n str_input, int(algorithm_key)))\n elif encryption_algorithm == 'rotate':\n warning = \"\"\"\n When the algorithm is set to rotate, the \"to_decrypt\" parameter\n is ignored. To decrypt, set the key-word argument shift left\n so that it reverses the shift direction during encryption.\n Ex: If the text was shifted left, i.e. values were swapped\n with those \"higher\" up on the list read from left to right, pass\n the key-word argument shift_left=False to decrypt.\n\n RotateCipher's methods can return a list. However, it is\n forced to always return a string. Passing return_list=True as\n a key-word argument will have no effect. The argument is not\n passed to RotateCipher.\n \"\"\"\n print(warning)\n to_shiftleft = True\n if 'shift_left' in kwargs:\n to_shiftleft = kwargs['shift_left']\n process_numbers = False\n if 'shift_numbers' in kwargs:\n process_numbers = kwargs['shift_numbers']\n output_string = RotateCipher.rot13_e(string=str_input, shift_left=\n to_shiftleft, rotations=int(algorithm_key), shift_numbers=\n process_numbers)\n if not os.path.exists(output_folderpath):\n os.mkdir(output_folderpath)\n fileobj_output = open(full_outputpath, 'a')\n fileobj_output.write('\\n=====\\nEncryptDecrypt Output on\\n%s\\n=====\\n' %\n dt.datetime.now())\n fileobj_output.write(output_string)\n fileobj_output.close()\n print('Done processing. Output folder:\\n{}'.format(Path(full_outputpath)))\n return {'output_file': Path(full_outputpath).resolve(), 'output_text':\n output_string}\n\n\ndef manual_test():\n dict_processedtext = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 1.txt',\n encryption_algorithm='rotate', algorithm_key=1, shift_left=True)\n print('Encrypt ROT1 with default values.')\n print(dict_processedtext['output_file'])\n dict_processedtext2 = process_textfile(string_path=dict_processedtext[\n 'output_file'], encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions',\n output_filename='Quiz 0 Overwrite Number 1 Decrypted', shift_left=False\n )\n print('Decrypt ROT1 with all values user-supplied.')\n print(dict_processedtext['output_file'])\n for i in range(2):\n dict_processedtext3a = process_textfile(string_path=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Quizzes\\\\Quiz 0 Overwrite Number 2.txt'\n , encryption_algorithm='rotate', algorithm_key=1,\n output_folderpath='C:\\\\Users\\\\Rives\\\\Downloads\\\\Encryptions')\n print(dict_processedtext3a['output_file'])\n dict_processedtext3b = process_textfile(string_path=\n dict_processedtext3a['output_file'], encryption_algorithm=\n 'rotate', algorithm_key=1, output_folderpath=\n 'C:\\\\Users\\\\Rives\\\\Downloads\\\\Decryptions', output_filename=\n 'Quiz 0 Overwrite Number 2 Decrypted', shift_left=False)\n print(dict_processedtext3b['output_file'])\n return None\n\n\ndef main():\n while True:\n print('Press Enter or New Line to skip entering any input.\\t')\n task = input('Encrypt or decrypt? Encrypts by default. Press E/D.\\t')\n algo = input('Algorithm? Uses Rotate by default.\\t')\n algorithm_key = float(input('Key? Uses 1 by default.\\t'))\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file. Includes file name and extension.\n\"\"\"\n )\n output_folder = input('Optional. Give the path of the output folder.\\n'\n )\n output_file = input(\n 'Optional. Default output file name is EncryptDecrypt.txt.\\n')\n keyword_arguments = input(\n \"\"\"Last question. Depends on algorithm.\n Format: \"key=value,key2,value2,...\".\n Use comma with no space as separator for two or more items.\n\"\"\"\n )\n while len(input_filepath) == 0:\n input_filepath = input(\n \"\"\"Mandatory / Required.\n Full path of target file.\n Includes file name and extension.\n\"\"\"\n )\n dict_kwargs = dict()\n for pair in keyword_arguments.split(','):\n try:\n key, pair = tuple(pair.split('='))\n dict_kwargs[key] = pair\n except ValueError:\n break\n to_decrypt = False\n if task.lower().startswith('d'):\n to_decrypt = True\n if len(output_folder) == 0:\n output_folder = str(Path.cwd().parent / '/EncryptDecrypt/')\n if len(output_file) == 0:\n output_file = 'EncryptDecrypt.txt'\n if len(algo) == 0:\n algo = 'rotate'\n pprint.pprint(process_textfile(string_path=input_filepath,\n encryption_algorithm=algo, algorithm_key=algorithm_key,\n output_folderpath=output_folder, output_filename=output_file,\n to_decrypt=to_decrypt, kwargs_dict=dict_kwargs))\n print(\n 'Done Running.\\n Press Q to quit, any other key to process another file.'\n )\n to_quit = input()\n if to_quit.lower().startswith('q'):\n sys.exit()\n else:\n continue\n return None\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-5": "import os\r\nimport pprint\r\nimport math\r\nimport sys\r\nimport datetime as dt\r\nfrom pathlib import Path\r\n\r\nimport RotateCipher\r\nimport ShiftCipher\r\nimport TranspositionCipher\r\n\r\n\r\ndef process_textfile(\r\n string_path: str,\r\n encryption_algorithm: str,\r\n algorithm_key: float,\r\n output_folderpath: str = str(\r\n Path(os.path.expandvars(\"$HOME\")).anchor\r\n ) + r\"/EncryptDecrypt/\",\r\n output_filename: str = r\"EncryptDecrypt.txt\",\r\n to_decrypt=False,\r\n **kwargs\r\n ):\r\n\r\n encryption_algorithm = encryption_algorithm.lower()\r\n available_algorithms = [\"rotate\", \"transposition\"]\r\n if encryption_algorithm not in available_algorithms:\r\n pprint.pprint(\r\n [\"Enter an algorithm from the list. Not case-sensitive.\",\r\n available_algorithms]\r\n )\r\n return None\r\n\r\n # A single dictionary may be passed as a **kwarg if it is the\r\n # ONLY KEY-WORD ARGUMENT. Else, error is thrown.\r\n lst_kwargs = list(kwargs.values())\r\n if len(lst_kwargs) == 1 and (isinstance(lst_kwargs[0], dict)):\r\n kwargs = lst_kwargs[0]\r\n\r\n # Key in **kwargs overwrites `algorithm_key` function parameter.\r\n if \"algorithm_key\" in kwargs:\r\n algorithm_key = float(kwargs[\"algorithm_key\"])\r\n\r\n # Convert strings saying \"True\" or \"False\" to booleans.\r\n for key, value in kwargs.items():\r\n str_value = str(value)\r\n if str_value.lower() == \"False\":\r\n kwargs[key] = False\r\n elif str_value.lower() == \"True\":\r\n kwargs[key] = True\r\n\r\n output_filename = ('/' + output_filename)\r\n if not (output_filename.endswith(\".txt\")):\r\n output_filename += \".txt\"\r\n\r\n full_outputpath = output_folderpath + output_filename\r\n path_input = Path(string_path)\r\n\r\n # fileobj_target = open(path_input, 'r') # Only for Python 3.6 and later.\r\n fileobj_target = open(str(path_input), 'r')\r\n lst_input = fileobj_target.readlines()\r\n # str_input = '\\n'.join(lst_input)\r\n str_input = \"\".join(lst_input)\r\n output_string = \"None\"\r\n\r\n print(\r\n \"\"\"Started processing.\r\n Key-word arguments for %s algorithm:\"\"\" % encryption_algorithm\r\n )\r\n pprint.pprint(kwargs)\r\n\r\n if (encryption_algorithm == \"transposition\") and to_decrypt is True:\r\n output_string = ''.join(\r\n TranspositionCipher.decrypt_transposition(\r\n str_input, int(algorithm_key)\r\n )\r\n )\r\n elif encryption_algorithm == \"transposition\" and not to_decrypt:\r\n output_string = ''.join(\r\n TranspositionCipher.encrypt_transposition(\r\n str_input, int(algorithm_key)\r\n )\r\n )\r\n elif encryption_algorithm == \"rotate\":\r\n warning = \"\"\"\r\n When the algorithm is set to rotate, the \"to_decrypt\" parameter\r\n is ignored. To decrypt, set the key-word argument shift left\r\n so that it reverses the shift direction during encryption.\r\n Ex: If the text was shifted left, i.e. values were swapped\r\n with those \"higher\" up on the list read from left to right, pass\r\n the key-word argument shift_left=False to decrypt.\r\n\r\n RotateCipher's methods can return a list. However, it is\r\n forced to always return a string. Passing return_list=True as\r\n a key-word argument will have no effect. The argument is not\r\n passed to RotateCipher.\r\n \"\"\"\r\n # pprint.pprint(warning) # Included literl \\n and single quotes.\r\n print(warning)\r\n\r\n to_shiftleft = True\r\n if \"shift_left\" in kwargs:\r\n to_shiftleft = kwargs[\"shift_left\"]\r\n\r\n process_numbers = False\r\n if \"shift_numbers\" in kwargs:\r\n process_numbers = kwargs[\"shift_numbers\"]\r\n\r\n output_string = RotateCipher.rot13_e(\r\n string=str_input,\r\n shift_left=to_shiftleft,\r\n rotations=int(algorithm_key),\r\n # return_list=kwargs[\"return_list\"], # Removed for safety.\r\n shift_numbers=process_numbers\r\n )\r\n\r\n if not (os.path.exists(output_folderpath)):\r\n os.mkdir(output_folderpath)\r\n\r\n fileobj_output = open(\r\n full_outputpath,\r\n 'a' # Create a file and open it for writing. Append if exists.\r\n )\r\n fileobj_output.write(\r\n \"\\n=====\\nEncryptDecrypt Output on\\n%s\\n=====\\n\" %\r\n dt.datetime.now()\r\n )\r\n fileobj_output.write(output_string)\r\n fileobj_output.close()\r\n print(\"Done processing. Output folder:\\n{}\".format(\r\n Path(full_outputpath)\r\n )\r\n )\r\n\r\n return {\r\n \"output_file\": Path(full_outputpath).resolve(),\r\n \"output_text\": output_string\r\n }\r\n\r\n\r\ndef manual_test():\r\n dict_processedtext = process_textfile(\r\n string_path=r\"C:\\Users\\Rives\\Downloads\\Quizzes\\Quiz 0 Overwrite Number 1.txt\",\r\n encryption_algorithm=\"rotate\",\r\n algorithm_key=1,\r\n shift_left=True\r\n )\r\n print(\"Encrypt ROT1 with default values.\")\r\n # pprint.pprint(\r\n # dict_processedtext\r\n # )\r\n print(dict_processedtext[\"output_file\"])\r\n\r\n dict_processedtext2 = process_textfile(\r\n string_path=dict_processedtext[\"output_file\"],\r\n encryption_algorithm=\"rotate\",\r\n algorithm_key=1,\r\n output_folderpath=r\"C:\\Users\\Rives\\Downloads\\Decryptions\",\r\n output_filename=\"Quiz 0 Overwrite Number 1 Decrypted\",\r\n shift_left=False\r\n )\r\n print(\"Decrypt ROT1 with all values user-supplied.\")\r\n print(dict_processedtext[\"output_file\"])\r\n\r\n for i in range(2):\r\n dict_processedtext3a = process_textfile(\r\n string_path=r\"C:\\Users\\Rives\\Downloads\\Quizzes\\Quiz 0 Overwrite Number 2.txt\",\r\n encryption_algorithm=\"rotate\",\r\n algorithm_key=1,\r\n output_folderpath=r\"C:\\Users\\Rives\\Downloads\\Encryptions\"\r\n )\r\n print(dict_processedtext3a[\"output_file\"])\r\n\r\n dict_processedtext3b = process_textfile(\r\n string_path=dict_processedtext3a[\"output_file\"],\r\n encryption_algorithm=\"rotate\",\r\n algorithm_key=1,\r\n output_folderpath=r\"C:\\Users\\Rives\\Downloads\\Decryptions\",\r\n output_filename=\"Quiz 0 Overwrite Number 2 Decrypted\",\r\n shift_left=False\r\n )\r\n print(dict_processedtext3b[\"output_file\"])\r\n\r\n return None\r\n\r\n\r\ndef main():\r\n\r\n while True:\r\n print(\"Press Enter or New Line to skip entering any input.\\t\")\r\n task = input(\"Encrypt or decrypt? Encrypts by default. Press E/D.\\t\")\r\n algo = input(\"Algorithm? Uses Rotate by default.\\t\")\r\n algorithm_key = float(input(\"Key? Uses 1 by default.\\t\"))\r\n input_filepath = input(\r\n \"\"\"Mandatory / Required.\r\n Full path of target file. Includes file name and extension.\\n\"\"\")\r\n output_folder = input(\r\n \"Optional. Give the path of the output folder.\\n\"\r\n )\r\n output_file = input(\r\n \"Optional. Default output file name is EncryptDecrypt.txt.\\n\")\r\n keyword_arguments = input(\r\n \"\"\"Last question. Depends on algorithm.\r\n Format: \"key=value,key2,value2,...\".\r\n Use comma with no space as separator for two or more items.\\n\"\"\"\r\n )\r\n\r\n while len(input_filepath) == 0:\r\n input_filepath = input(\r\n \"\"\"Mandatory / Required.\r\n Full path of target file.\r\n Includes file name and extension.\\n\"\"\"\r\n )\r\n\r\n dict_kwargs = dict()\r\n for pair in keyword_arguments.split(','):\r\n try:\r\n key, pair = tuple(pair.split('='))\r\n dict_kwargs[key] = pair\r\n except ValueError:\r\n break\r\n\r\n to_decrypt = False\r\n if task.lower().startswith('d'):\r\n to_decrypt = True\r\n\r\n if len(output_folder) == 0:\r\n output_folder = str(Path.cwd().parent / r\"/EncryptDecrypt/\")\r\n\r\n if len(output_file) == 0:\r\n output_file = \"EncryptDecrypt.txt\"\r\n\r\n if len(algo) == 0:\r\n algo = \"rotate\"\r\n\r\n pprint.pprint(\r\n process_textfile(\r\n string_path=input_filepath,\r\n encryption_algorithm=algo,\r\n algorithm_key=algorithm_key,\r\n output_folderpath=output_folder,\r\n output_filename=output_file,\r\n to_decrypt=to_decrypt,\r\n kwargs_dict=dict_kwargs\r\n )\r\n )\r\n print(\r\n \"\"\"Done Running.\r\n Press Q to quit, any other key to process another file.\"\"\")\r\n\r\n to_quit = input()\r\n if to_quit.lower().startswith(\"q\"):\r\n sys.exit()\r\n else:\r\n continue\r\n # manual_test()\r\n\r\n return None\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\"\"\"\r\nNotes:\r\n\r\n*\r\nThe declared parameter data types in python functions are not enforced as of\r\nversion 3.4.\r\n\r\n*\r\nFor some reason, even if the name \"key\" was a parameter for process_textfile,\r\nit was being passed to rot13_e as a string. In the function process_textfile,\r\nVisual Basic also listed \"key\" as a string when passed to rot13_e even though\r\nthe function definition specified its data type as a float and the user input\r\nfor \"key\" was also converted to a float in the main function. This was caused\r\nby a for-loop. When VS Code followed the definition of key (F12) when it\r\nwas passed to rot13_e, VS Code pointed to the temporary variable \"key\" in a\r\nfor-loop. The parameter name was changed as a quick fix.\r\n\r\n- Adding an else clause to the for-loop did not fix it.\r\n- The for-loop declaration was funciton-level code while the call to rot13_e\r\nthat bugged was inside an else-clause. The else-clause holding the call to\r\nrot13_e was also function-level, same as the for-loop declaration. The call\r\nto RotateCipher.rot13_e was assigned to output_string.\r\n\"\"\"\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/python
# ~~~~~============== HOW TO RUN ==============~~~~~
# 1) Configure things in CONFIGURATION section
# 2) Change permissions: chmod +x bot.py
# 3) Run in loop: while true; do ./bot.py; sleep 1; done
from __future__ import print_function
import sys
import socket
import json
import time
# ~~~~~============== CONFIGURATION ==============~~~~~
# replace REPLACEME with your team name!
team_name="BULBASAUR"
# This variable dictates whether or not the bot is connecting to the prod
# or test exchange. Be careful with this switch!
test_mode = True
# This setting changes which test exchange is connected to.
# 0 is prod-like
# 1 is slower
# 2 is empty
test_exchange_index=0
prod_exchange_hostname="production"
port=25000 + (test_exchange_index if test_mode else 0)
exchange_hostname = "test-exch-" + team_name if test_mode else prod_exchange_hostname
# ~~~~~============== NETWORKING CODE ==============~~~~~
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((exchange_hostname, port))
return s.makefile('rw', 1)
def write_to_exchange(exchange, obj):
json.dump(obj, exchange)
exchange.write("\n")
def read_from_exchange(exchange):
return json.loads(exchange.readline())
# ~~~~~============== MAIN LOOP ==============~~~~~
exchange = None
orders_placed = 0
pending_orders = []
pending_buy_orders = {"BOND": 0, "VALBZ": 0, "VALE": 0, "XLF": 0}
pending_sell_orders = {"BOND": 0, "VALBZ": 0, "VALE": 0, "XLF": 0}
positions = {"BOND": 0, "VALBZ": 0, "VALE": 0, "XLF": 0}
vale_buy_pending_id = None
vale_sell_pending_id = None
vale_sell = 0
vale_buy = 0
xlf_buy_pending_id = None
xlf_sell_pending_id = None
xlf_sell = 0
xlf_buy = 0
def main():
global exchange
exchange = connect()
hello()
hello_from_exchange = read_from_exchange(exchange)
# A common mistake people make is to call write_to_exchange() > 1
# time for every read_from_exchange() response.
# Since many write messages generate marketdata, this will cause an
# exponential explosion in pending messages. Please, don't do that!
print("The exchange replied:", hello_from_exchange, file=sys.stderr)
global positions
positions["BOND"] = hello_from_exchange["symbols"][0]["position"]
positions["VALE"] = hello_from_exchange["symbols"][5]["position"]
positions["VALBZ"] = hello_from_exchange["symbols"][4]["position"]
positions["XLF"] = hello_from_exchange["symbols"][7]["position"]
add("BOND", "BUY", 999, 100 - positions["BOND"])
add("BOND", "SELL", 1001, 100 + positions["BOND"])
while (True):
server_msg = read_from_exchange(exchange)
buy_sell_vale()
buy_sell_xlf()
listen_for_fills(server_msg)
listen_for_book(server_msg)
listen_for_errors(server_msg)
def hello():
write_to_exchange(exchange, {"type": "hello", "team": team_name.upper()})
def add(symbol, direction, price, size):
# Update order id to be order placed number
global orders_placed
orders_placed += 1
# Add to pending orders list
global pending_orders
pending_orders.append(orders_placed)
#if symbol == "VALE":
print("Order Placed: " + str(orders_placed) + " Position: " + str(positions[symbol])+ " Size: " + str(size) + " Dir: " + direction + " Symbol: " + symbol + " Price: " + str(price) + "")
# Increment Buy Orders If Necessary
if (direction == "BUY"):
global pending_buy_orders
pending_buy_orders[symbol] += size
elif (direction == "SELL"):
global pending_sell_orders
pending_sell_orders[symbol] += size
# Add order to exchange
write_to_exchange(exchange, {"type": "add", "order_id": orders_placed, "symbol": symbol,
"dir":direction, "price":price, "size": size })
#
read_from_exchange(exchange)
def cancel(order_id):
write_to_exchange(exchange, {"type": "cancel", "order_id": order_id})
def listen_for_fills(server_msg):
if (server_msg["type"] == "fill"):
# Get info of filled order
order_num = server_msg["order_id"]
symbol = server_msg["symbol"]
size = server_msg["size"]
direction = server_msg["dir"]
global positions
# Update bond order fill and buy/sell as necessary
if (symbol == "BOND"):
# print("Bond Order Partially Filled: " + str(order_num))
if (direction == "BUY"):
pending_buy_orders[symbol] -= size
add("BOND", "SELL", 1001, size)
elif (direction == "SELL"):
pending_sell_orders[symbol] -= size
add("BOND", "BUY", 999, size)
# Update Vale Order fill and hedge as necessary
if (symbol == "VALE"):
print("Vale Order Filled: " + str(order_num) + " " + direction + " Size: " + str(size))
if (direction == "BUY"):
pending_buy_orders[symbol] -= size
positions["VALE"] += size
elif (direction == "SELL"):
positions["VALE"] -= size
pending_sell_orders[symbol] -= size
if (symbol == "XLF"):
print("XLF Order Filled: " + str(order_num) + " " + direction + " Size: " + str(size))
if (direction == "BUY"):
pending_buy_orders[symbol] -= size
positions["XLF"] += size
elif (direction == "SELL"):
positions["XLF"] -= size
pending_sell_orders[symbol] -= size
def listen_for_book(server_msg):
if (server_msg["type"] == "book"):
global vale_sell
global vale_buy
global xlf_sell
global xlf_buy
if (server_msg["symbol"] == "VALE"):
if len(server_msg["sell"]) > 0:
vale_sell = server_msg["sell"][0][0]
if len(server_msg["buy"]) > 0:
vale_buy = server_msg["buy"][0][0]
if (server_msg["symbol"] == "XLF"):
if len(server_msg["sell"]) > 0:
xlf_sell = server_msg["sell"][0][0]
if len(server_msg["buy"]) > 0:
xlf_buy = server_msg["buy"][0][0]
def buy_sell_vale():
if vale_buy > 0 and vale_sell > 0:
global pending_sell_orders
global pending_buy_orders
if ( pending_buy_orders["VALE"] + positions["VALE"] < 10):
global vale_buy_pending_id
if vale_buy_pending_id:
cancel(vale_buy_pending_id)
pending_buy_orders["VALE"] = 0
vale_buy_pending_id = None
print("Cancel VALE BUY Order: " + str(orders_placed))
time.sleep(1)
num_stock = 10 - positions["VALE"]
add("VALE", "BUY", vale_buy + 1, 10 - positions["VALE"])
vale_buy_pending_id = orders_placed
elif (positions["VALE"] - pending_sell_orders["VALE"] > -10):
global vale_sell_pending_id
if vale_sell_pending_id:
print("Cancel VALE Sell Order: " + str(orders_placed))
cancel(vale_sell_pending_id)
pending_sell_orders["VALE"] = 0
vale_sell_pending_id = None
time.sleep(1)
num_stock = 10 - positions["VALE"]
add("VALE", "SELL", vale_sell - 1, num_stock)
vale_sell_pending_id = orders_placed
def buy_sell_xlf():
if xlf_buy > 0 and xlf_sell > 0:
global pending_sell_orders
global pending_buy_orders
if ( pending_buy_orders["XLF"] + positions["XLF"] < 100):
global xlf_buy_pending_id
if xlf_buy_pending_id:
cancel(xlf_buy_pending_id)
pending_buy_orders["XLF"] = 0
xlf_buy_pending_id = None
print("Cancel XLF Order: " + str(orders_placed))
time.sleep(1)
add("XLF", "BUY", xlf_buy + 1, 100 - positions["XLF"])
xlf_buy_pending_id = orders_placed
elif (positions["XLF"] - pending_sell_orders["XLF"] > -100):
global xlf_sell_pending_id
if xlf_sell_pending_id:
print("Cancel XLF Order: " + str(orders_placed))
cancel(xlf_sell_pending_id)
pending_sell_orders["XLF"] = 0
xlf_sell_pending_id = None
time.sleep(1)
add("XLF", "SELL", xlf_sell - 1, 100 + positions["XLF"])
xlf_sell_pending_id = orders_placed
def listen_for_errors(server_msg):
if (server_msg["type"] == "reject"):
print("ERROR: ORDER FAILED, id: " + str(server_msg["order_id"]) + " " + server_msg["error"])
if (server_msg["type"] == "error"):
print("ERROR: ORDER FAILED, id: " + str(id) + " " + server_msg["error"])
if (server_msg["type"] == "ack"):
print("Order Completed: " + str(server_msg["order_id"]))
if (server_msg["type"] == "out"):
print("Order Successfully Canceled: " + str(server_msg["order_id"]))
#add("BOND", "BUY", 999, 100 - positions["BOND"])
#add("BOND", "SELL", 1001, 100 + positions["BOND"])
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "56c5c515de8490f2e3516563e037c375aba03667",
"index": 3221,
"step-1": "<mask token>\n\n\ndef connect():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((exchange_hostname, port))\n return s.makefile('rw', 1)\n\n\ndef write_to_exchange(exchange, obj):\n json.dump(obj, exchange)\n exchange.write('\\n')\n\n\n<mask token>\n\n\ndef hello():\n write_to_exchange(exchange, {'type': 'hello', 'team': team_name.upper()})\n\n\ndef add(symbol, direction, price, size):\n global orders_placed\n orders_placed += 1\n global pending_orders\n pending_orders.append(orders_placed)\n print('Order Placed: ' + str(orders_placed) + ' Position: ' + str(\n positions[symbol]) + ' Size: ' + str(size) + ' Dir: ' + direction +\n ' Symbol: ' + symbol + ' Price: ' + str(price) + '')\n if direction == 'BUY':\n global pending_buy_orders\n pending_buy_orders[symbol] += size\n elif direction == 'SELL':\n global pending_sell_orders\n pending_sell_orders[symbol] += size\n write_to_exchange(exchange, {'type': 'add', 'order_id': orders_placed,\n 'symbol': symbol, 'dir': direction, 'price': price, 'size': size})\n read_from_exchange(exchange)\n\n\n<mask token>\n\n\ndef buy_sell_xlf():\n if xlf_buy > 0 and xlf_sell > 0:\n global pending_sell_orders\n global pending_buy_orders\n if pending_buy_orders['XLF'] + positions['XLF'] < 100:\n global xlf_buy_pending_id\n if xlf_buy_pending_id:\n cancel(xlf_buy_pending_id)\n pending_buy_orders['XLF'] = 0\n xlf_buy_pending_id = None\n print('Cancel XLF Order: ' + str(orders_placed))\n time.sleep(1)\n add('XLF', 'BUY', xlf_buy + 1, 100 - positions['XLF'])\n xlf_buy_pending_id = orders_placed\n elif positions['XLF'] - pending_sell_orders['XLF'] > -100:\n global xlf_sell_pending_id\n if xlf_sell_pending_id:\n print('Cancel XLF Order: ' + str(orders_placed))\n cancel(xlf_sell_pending_id)\n pending_sell_orders['XLF'] = 0\n xlf_sell_pending_id = None\n time.sleep(1)\n add('XLF', 'SELL', xlf_sell - 1, 100 + positions['XLF'])\n xlf_sell_pending_id = orders_placed\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef connect():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((exchange_hostname, port))\n return s.makefile('rw', 1)\n\n\ndef write_to_exchange(exchange, obj):\n json.dump(obj, exchange)\n exchange.write('\\n')\n\n\ndef read_from_exchange(exchange):\n return json.loads(exchange.readline())\n\n\n<mask token>\n\n\ndef hello():\n write_to_exchange(exchange, {'type': 'hello', 'team': team_name.upper()})\n\n\ndef add(symbol, direction, price, size):\n global orders_placed\n orders_placed += 1\n global pending_orders\n pending_orders.append(orders_placed)\n print('Order Placed: ' + str(orders_placed) + ' Position: ' + str(\n positions[symbol]) + ' Size: ' + str(size) + ' Dir: ' + direction +\n ' Symbol: ' + symbol + ' Price: ' + str(price) + '')\n if direction == 'BUY':\n global pending_buy_orders\n pending_buy_orders[symbol] += size\n elif direction == 'SELL':\n global pending_sell_orders\n pending_sell_orders[symbol] += size\n write_to_exchange(exchange, {'type': 'add', 'order_id': orders_placed,\n 'symbol': symbol, 'dir': direction, 'price': price, 'size': size})\n read_from_exchange(exchange)\n\n\ndef cancel(order_id):\n write_to_exchange(exchange, {'type': 'cancel', 'order_id': order_id})\n\n\ndef listen_for_fills(server_msg):\n if server_msg['type'] == 'fill':\n order_num = server_msg['order_id']\n symbol = server_msg['symbol']\n size = server_msg['size']\n direction = server_msg['dir']\n global positions\n if symbol == 'BOND':\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n add('BOND', 'SELL', 1001, size)\n elif direction == 'SELL':\n pending_sell_orders[symbol] -= size\n add('BOND', 'BUY', 999, size)\n if symbol == 'VALE':\n print('Vale Order Filled: ' + str(order_num) + ' ' + direction +\n ' Size: ' + str(size))\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n positions['VALE'] += size\n elif direction == 'SELL':\n positions['VALE'] -= size\n pending_sell_orders[symbol] -= size\n if symbol == 'XLF':\n print('XLF Order Filled: ' + str(order_num) + ' ' + direction +\n ' Size: ' + str(size))\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n positions['XLF'] += size\n elif direction == 'SELL':\n positions['XLF'] -= size\n pending_sell_orders[symbol] -= size\n\n\n<mask token>\n\n\ndef buy_sell_xlf():\n if xlf_buy > 0 and xlf_sell > 0:\n global pending_sell_orders\n global pending_buy_orders\n if pending_buy_orders['XLF'] + positions['XLF'] < 100:\n global xlf_buy_pending_id\n if xlf_buy_pending_id:\n cancel(xlf_buy_pending_id)\n pending_buy_orders['XLF'] = 0\n xlf_buy_pending_id = None\n print('Cancel XLF Order: ' + str(orders_placed))\n time.sleep(1)\n add('XLF', 'BUY', xlf_buy + 1, 100 - positions['XLF'])\n xlf_buy_pending_id = orders_placed\n elif positions['XLF'] - pending_sell_orders['XLF'] > -100:\n global xlf_sell_pending_id\n if xlf_sell_pending_id:\n print('Cancel XLF Order: ' + str(orders_placed))\n cancel(xlf_sell_pending_id)\n pending_sell_orders['XLF'] = 0\n xlf_sell_pending_id = None\n time.sleep(1)\n add('XLF', 'SELL', xlf_sell - 1, 100 + positions['XLF'])\n xlf_sell_pending_id = orders_placed\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef connect():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((exchange_hostname, port))\n return s.makefile('rw', 1)\n\n\ndef write_to_exchange(exchange, obj):\n json.dump(obj, exchange)\n exchange.write('\\n')\n\n\ndef read_from_exchange(exchange):\n return json.loads(exchange.readline())\n\n\n<mask token>\n\n\ndef hello():\n write_to_exchange(exchange, {'type': 'hello', 'team': team_name.upper()})\n\n\ndef add(symbol, direction, price, size):\n global orders_placed\n orders_placed += 1\n global pending_orders\n pending_orders.append(orders_placed)\n print('Order Placed: ' + str(orders_placed) + ' Position: ' + str(\n positions[symbol]) + ' Size: ' + str(size) + ' Dir: ' + direction +\n ' Symbol: ' + symbol + ' Price: ' + str(price) + '')\n if direction == 'BUY':\n global pending_buy_orders\n pending_buy_orders[symbol] += size\n elif direction == 'SELL':\n global pending_sell_orders\n pending_sell_orders[symbol] += size\n write_to_exchange(exchange, {'type': 'add', 'order_id': orders_placed,\n 'symbol': symbol, 'dir': direction, 'price': price, 'size': size})\n read_from_exchange(exchange)\n\n\ndef cancel(order_id):\n write_to_exchange(exchange, {'type': 'cancel', 'order_id': order_id})\n\n\ndef listen_for_fills(server_msg):\n if server_msg['type'] == 'fill':\n order_num = server_msg['order_id']\n symbol = server_msg['symbol']\n size = server_msg['size']\n direction = server_msg['dir']\n global positions\n if symbol == 'BOND':\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n add('BOND', 'SELL', 1001, size)\n elif direction == 'SELL':\n pending_sell_orders[symbol] -= size\n add('BOND', 'BUY', 999, size)\n if symbol == 'VALE':\n print('Vale Order Filled: ' + str(order_num) + ' ' + direction +\n ' Size: ' + str(size))\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n positions['VALE'] += size\n elif direction == 'SELL':\n positions['VALE'] -= size\n pending_sell_orders[symbol] -= size\n if symbol == 'XLF':\n print('XLF Order Filled: ' + str(order_num) + ' ' + direction +\n ' Size: ' + str(size))\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n positions['XLF'] += size\n elif direction == 'SELL':\n positions['XLF'] -= size\n pending_sell_orders[symbol] -= size\n\n\ndef listen_for_book(server_msg):\n if server_msg['type'] == 'book':\n global vale_sell\n global vale_buy\n global xlf_sell\n global xlf_buy\n if server_msg['symbol'] == 'VALE':\n if len(server_msg['sell']) > 0:\n vale_sell = server_msg['sell'][0][0]\n if len(server_msg['buy']) > 0:\n vale_buy = server_msg['buy'][0][0]\n if server_msg['symbol'] == 'XLF':\n if len(server_msg['sell']) > 0:\n xlf_sell = server_msg['sell'][0][0]\n if len(server_msg['buy']) > 0:\n xlf_buy = server_msg['buy'][0][0]\n\n\n<mask token>\n\n\ndef buy_sell_xlf():\n if xlf_buy > 0 and xlf_sell > 0:\n global pending_sell_orders\n global pending_buy_orders\n if pending_buy_orders['XLF'] + positions['XLF'] < 100:\n global xlf_buy_pending_id\n if xlf_buy_pending_id:\n cancel(xlf_buy_pending_id)\n pending_buy_orders['XLF'] = 0\n xlf_buy_pending_id = None\n print('Cancel XLF Order: ' + str(orders_placed))\n time.sleep(1)\n add('XLF', 'BUY', xlf_buy + 1, 100 - positions['XLF'])\n xlf_buy_pending_id = orders_placed\n elif positions['XLF'] - pending_sell_orders['XLF'] > -100:\n global xlf_sell_pending_id\n if xlf_sell_pending_id:\n print('Cancel XLF Order: ' + str(orders_placed))\n cancel(xlf_sell_pending_id)\n pending_sell_orders['XLF'] = 0\n xlf_sell_pending_id = None\n time.sleep(1)\n add('XLF', 'SELL', xlf_sell - 1, 100 + positions['XLF'])\n xlf_sell_pending_id = orders_placed\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef connect():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((exchange_hostname, port))\n return s.makefile('rw', 1)\n\n\ndef write_to_exchange(exchange, obj):\n json.dump(obj, exchange)\n exchange.write('\\n')\n\n\ndef read_from_exchange(exchange):\n return json.loads(exchange.readline())\n\n\n<mask token>\n\n\ndef hello():\n write_to_exchange(exchange, {'type': 'hello', 'team': team_name.upper()})\n\n\ndef add(symbol, direction, price, size):\n global orders_placed\n orders_placed += 1\n global pending_orders\n pending_orders.append(orders_placed)\n print('Order Placed: ' + str(orders_placed) + ' Position: ' + str(\n positions[symbol]) + ' Size: ' + str(size) + ' Dir: ' + direction +\n ' Symbol: ' + symbol + ' Price: ' + str(price) + '')\n if direction == 'BUY':\n global pending_buy_orders\n pending_buy_orders[symbol] += size\n elif direction == 'SELL':\n global pending_sell_orders\n pending_sell_orders[symbol] += size\n write_to_exchange(exchange, {'type': 'add', 'order_id': orders_placed,\n 'symbol': symbol, 'dir': direction, 'price': price, 'size': size})\n read_from_exchange(exchange)\n\n\ndef cancel(order_id):\n write_to_exchange(exchange, {'type': 'cancel', 'order_id': order_id})\n\n\ndef listen_for_fills(server_msg):\n if server_msg['type'] == 'fill':\n order_num = server_msg['order_id']\n symbol = server_msg['symbol']\n size = server_msg['size']\n direction = server_msg['dir']\n global positions\n if symbol == 'BOND':\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n add('BOND', 'SELL', 1001, size)\n elif direction == 'SELL':\n pending_sell_orders[symbol] -= size\n add('BOND', 'BUY', 999, size)\n if symbol == 'VALE':\n print('Vale Order Filled: ' + str(order_num) + ' ' + direction +\n ' Size: ' + str(size))\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n positions['VALE'] += size\n elif direction == 'SELL':\n positions['VALE'] -= size\n pending_sell_orders[symbol] -= size\n if symbol == 'XLF':\n print('XLF Order Filled: ' + str(order_num) + ' ' + direction +\n ' Size: ' + str(size))\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n positions['XLF'] += size\n elif direction == 'SELL':\n positions['XLF'] -= size\n pending_sell_orders[symbol] -= size\n\n\ndef listen_for_book(server_msg):\n if server_msg['type'] == 'book':\n global vale_sell\n global vale_buy\n global xlf_sell\n global xlf_buy\n if server_msg['symbol'] == 'VALE':\n if len(server_msg['sell']) > 0:\n vale_sell = server_msg['sell'][0][0]\n if len(server_msg['buy']) > 0:\n vale_buy = server_msg['buy'][0][0]\n if server_msg['symbol'] == 'XLF':\n if len(server_msg['sell']) > 0:\n xlf_sell = server_msg['sell'][0][0]\n if len(server_msg['buy']) > 0:\n xlf_buy = server_msg['buy'][0][0]\n\n\n<mask token>\n\n\ndef buy_sell_xlf():\n if xlf_buy > 0 and xlf_sell > 0:\n global pending_sell_orders\n global pending_buy_orders\n if pending_buy_orders['XLF'] + positions['XLF'] < 100:\n global xlf_buy_pending_id\n if xlf_buy_pending_id:\n cancel(xlf_buy_pending_id)\n pending_buy_orders['XLF'] = 0\n xlf_buy_pending_id = None\n print('Cancel XLF Order: ' + str(orders_placed))\n time.sleep(1)\n add('XLF', 'BUY', xlf_buy + 1, 100 - positions['XLF'])\n xlf_buy_pending_id = orders_placed\n elif positions['XLF'] - pending_sell_orders['XLF'] > -100:\n global xlf_sell_pending_id\n if xlf_sell_pending_id:\n print('Cancel XLF Order: ' + str(orders_placed))\n cancel(xlf_sell_pending_id)\n pending_sell_orders['XLF'] = 0\n xlf_sell_pending_id = None\n time.sleep(1)\n add('XLF', 'SELL', xlf_sell - 1, 100 + positions['XLF'])\n xlf_sell_pending_id = orders_placed\n\n\ndef listen_for_errors(server_msg):\n if server_msg['type'] == 'reject':\n print('ERROR: ORDER FAILED, id: ' + str(server_msg['order_id']) +\n ' ' + server_msg['error'])\n if server_msg['type'] == 'error':\n print('ERROR: ORDER FAILED, id: ' + str(id) + ' ' + server_msg['error']\n )\n if server_msg['type'] == 'ack':\n print('Order Completed: ' + str(server_msg['order_id']))\n if server_msg['type'] == 'out':\n print('Order Successfully Canceled: ' + str(server_msg['order_id']))\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/python\n\n# ~~~~~============== HOW TO RUN ==============~~~~~\n# 1) Configure things in CONFIGURATION section\n# 2) Change permissions: chmod +x bot.py\n# 3) Run in loop: while true; do ./bot.py; sleep 1; done\n\nfrom __future__ import print_function\n\nimport sys\nimport socket\nimport json\nimport time\n\n# ~~~~~============== CONFIGURATION ==============~~~~~\n# replace REPLACEME with your team name!\nteam_name=\"BULBASAUR\"\n# This variable dictates whether or not the bot is connecting to the prod\n# or test exchange. Be careful with this switch!\ntest_mode = True\n\n# This setting changes which test exchange is connected to.\n# 0 is prod-like\n# 1 is slower\n# 2 is empty\ntest_exchange_index=0\nprod_exchange_hostname=\"production\"\n\nport=25000 + (test_exchange_index if test_mode else 0)\nexchange_hostname = \"test-exch-\" + team_name if test_mode else prod_exchange_hostname\n\n# ~~~~~============== NETWORKING CODE ==============~~~~~\ndef connect():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((exchange_hostname, port))\n return s.makefile('rw', 1)\n\ndef write_to_exchange(exchange, obj):\n json.dump(obj, exchange)\n exchange.write(\"\\n\")\n\ndef read_from_exchange(exchange):\n return json.loads(exchange.readline())\n\n\n# ~~~~~============== MAIN LOOP ==============~~~~~\nexchange = None\norders_placed = 0\npending_orders = []\npending_buy_orders = {\"BOND\": 0, \"VALBZ\": 0, \"VALE\": 0, \"XLF\": 0}\npending_sell_orders = {\"BOND\": 0, \"VALBZ\": 0, \"VALE\": 0, \"XLF\": 0}\npositions = {\"BOND\": 0, \"VALBZ\": 0, \"VALE\": 0, \"XLF\": 0}\nvale_buy_pending_id = None\nvale_sell_pending_id = None\nvale_sell = 0\nvale_buy = 0\n\nxlf_buy_pending_id = None\nxlf_sell_pending_id = None\nxlf_sell = 0\nxlf_buy = 0\n\ndef main():\n global exchange\n exchange = connect()\n hello()\n hello_from_exchange = read_from_exchange(exchange)\n # A common mistake people make is to call write_to_exchange() > 1\n # time for every read_from_exchange() response.\n # Since many write messages generate marketdata, this will cause an\n # exponential explosion in pending messages. Please, don't do that!\n print(\"The exchange replied:\", hello_from_exchange, file=sys.stderr)\n global positions\n positions[\"BOND\"] = hello_from_exchange[\"symbols\"][0][\"position\"]\n positions[\"VALE\"] = hello_from_exchange[\"symbols\"][5][\"position\"]\n positions[\"VALBZ\"] = hello_from_exchange[\"symbols\"][4][\"position\"]\n positions[\"XLF\"] = hello_from_exchange[\"symbols\"][7][\"position\"]\n\n add(\"BOND\", \"BUY\", 999, 100 - positions[\"BOND\"])\n add(\"BOND\", \"SELL\", 1001, 100 + positions[\"BOND\"])\n\n while (True):\n server_msg = read_from_exchange(exchange)\n buy_sell_vale()\n buy_sell_xlf()\n listen_for_fills(server_msg)\n listen_for_book(server_msg)\n listen_for_errors(server_msg)\n \ndef hello():\n write_to_exchange(exchange, {\"type\": \"hello\", \"team\": team_name.upper()})\n\ndef add(symbol, direction, price, size):\n # Update order id to be order placed number\n global orders_placed\n orders_placed += 1\n # Add to pending orders list\n global pending_orders\n pending_orders.append(orders_placed)\n #if symbol == \"VALE\":\n print(\"Order Placed: \" + str(orders_placed) + \" Position: \" + str(positions[symbol])+ \" Size: \" + str(size) + \" Dir: \" + direction + \" Symbol: \" + symbol + \" Price: \" + str(price) + \"\")\n\n # Increment Buy Orders If Necessary\n if (direction == \"BUY\"):\n global pending_buy_orders\n pending_buy_orders[symbol] += size\n elif (direction == \"SELL\"):\n global pending_sell_orders\n pending_sell_orders[symbol] += size\n # Add order to exchange\n write_to_exchange(exchange, {\"type\": \"add\", \"order_id\": orders_placed, \"symbol\": symbol,\n \"dir\":direction, \"price\":price, \"size\": size })\n # \n read_from_exchange(exchange)\n\ndef cancel(order_id):\n write_to_exchange(exchange, {\"type\": \"cancel\", \"order_id\": order_id}) \n\ndef listen_for_fills(server_msg):\n if (server_msg[\"type\"] == \"fill\"):\n # Get info of filled order\n order_num = server_msg[\"order_id\"]\n symbol = server_msg[\"symbol\"]\n size = server_msg[\"size\"]\n direction = server_msg[\"dir\"]\n global positions\n # Update bond order fill and buy/sell as necessary\n if (symbol == \"BOND\"):\n # print(\"Bond Order Partially Filled: \" + str(order_num))\n if (direction == \"BUY\"):\n pending_buy_orders[symbol] -= size\n add(\"BOND\", \"SELL\", 1001, size)\n elif (direction == \"SELL\"):\n pending_sell_orders[symbol] -= size\n add(\"BOND\", \"BUY\", 999, size)\n # Update Vale Order fill and hedge as necessary\n if (symbol == \"VALE\"):\n print(\"Vale Order Filled: \" + str(order_num) + \" \" + direction + \" Size: \" + str(size))\n if (direction == \"BUY\"):\n pending_buy_orders[symbol] -= size\n positions[\"VALE\"] += size\n elif (direction == \"SELL\"):\n positions[\"VALE\"] -= size\n pending_sell_orders[symbol] -= size\n if (symbol == \"XLF\"):\n print(\"XLF Order Filled: \" + str(order_num) + \" \" + direction + \" Size: \" + str(size))\n if (direction == \"BUY\"):\n pending_buy_orders[symbol] -= size\n positions[\"XLF\"] += size\n elif (direction == \"SELL\"):\n positions[\"XLF\"] -= size\n pending_sell_orders[symbol] -= size\n\ndef listen_for_book(server_msg):\n if (server_msg[\"type\"] == \"book\"):\n global vale_sell\n global vale_buy\n global xlf_sell\n global xlf_buy\n if (server_msg[\"symbol\"] == \"VALE\"):\n if len(server_msg[\"sell\"]) > 0:\n vale_sell = server_msg[\"sell\"][0][0]\n if len(server_msg[\"buy\"]) > 0:\n vale_buy = server_msg[\"buy\"][0][0]\n if (server_msg[\"symbol\"] == \"XLF\"):\n if len(server_msg[\"sell\"]) > 0:\n xlf_sell = server_msg[\"sell\"][0][0]\n if len(server_msg[\"buy\"]) > 0:\n xlf_buy = server_msg[\"buy\"][0][0]\n\ndef buy_sell_vale():\n if vale_buy > 0 and vale_sell > 0:\n global pending_sell_orders\n global pending_buy_orders\n if ( pending_buy_orders[\"VALE\"] + positions[\"VALE\"] < 10):\n global vale_buy_pending_id\n if vale_buy_pending_id:\n cancel(vale_buy_pending_id)\n pending_buy_orders[\"VALE\"] = 0\n vale_buy_pending_id = None\n print(\"Cancel VALE BUY Order: \" + str(orders_placed))\n time.sleep(1)\n num_stock = 10 - positions[\"VALE\"]\n add(\"VALE\", \"BUY\", vale_buy + 1, 10 - positions[\"VALE\"])\n\n vale_buy_pending_id = orders_placed\n elif (positions[\"VALE\"] - pending_sell_orders[\"VALE\"] > -10):\n global vale_sell_pending_id\n if vale_sell_pending_id:\n print(\"Cancel VALE Sell Order: \" + str(orders_placed))\n cancel(vale_sell_pending_id)\n pending_sell_orders[\"VALE\"] = 0\n vale_sell_pending_id = None\n time.sleep(1)\n num_stock = 10 - positions[\"VALE\"]\n add(\"VALE\", \"SELL\", vale_sell - 1, num_stock)\n vale_sell_pending_id = orders_placed\n\ndef buy_sell_xlf():\n if xlf_buy > 0 and xlf_sell > 0:\n global pending_sell_orders\n global pending_buy_orders\n if ( pending_buy_orders[\"XLF\"] + positions[\"XLF\"] < 100):\n global xlf_buy_pending_id\n if xlf_buy_pending_id:\n cancel(xlf_buy_pending_id)\n pending_buy_orders[\"XLF\"] = 0\n xlf_buy_pending_id = None\n print(\"Cancel XLF Order: \" + str(orders_placed))\n time.sleep(1)\n add(\"XLF\", \"BUY\", xlf_buy + 1, 100 - positions[\"XLF\"])\n xlf_buy_pending_id = orders_placed\n elif (positions[\"XLF\"] - pending_sell_orders[\"XLF\"] > -100):\n global xlf_sell_pending_id\n if xlf_sell_pending_id:\n print(\"Cancel XLF Order: \" + str(orders_placed))\n cancel(xlf_sell_pending_id)\n pending_sell_orders[\"XLF\"] = 0\n xlf_sell_pending_id = None\n time.sleep(1)\n add(\"XLF\", \"SELL\", xlf_sell - 1, 100 + positions[\"XLF\"])\n xlf_sell_pending_id = orders_placed\n\ndef listen_for_errors(server_msg):\n if (server_msg[\"type\"] == \"reject\"):\n print(\"ERROR: ORDER FAILED, id: \" + str(server_msg[\"order_id\"]) + \" \" + server_msg[\"error\"])\n if (server_msg[\"type\"] == \"error\"):\n print(\"ERROR: ORDER FAILED, id: \" + str(id) + \" \" + server_msg[\"error\"])\n if (server_msg[\"type\"] == \"ack\"):\n print(\"Order Completed: \" + str(server_msg[\"order_id\"]))\n if (server_msg[\"type\"] == \"out\"):\n print(\"Order Successfully Canceled: \" + str(server_msg[\"order_id\"]))\n\n #add(\"BOND\", \"BUY\", 999, 100 - positions[\"BOND\"])\n #add(\"BOND\", \"SELL\", 1001, 100 + positions[\"BOND\"])\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
5,
8,
9,
10,
16
]
}
|
[
5,
8,
9,
10,
16
] |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from kneed import KneeLocator
#Create a panda data frame from the csv file
df = pd.read_csv('ClusterPlot.csv', usecols=['V1','V2'])
#Convert the panda data frame to a NumPy array
arr = df.to_numpy()
#Code used to visualise the data and check if the import worked correctly
#Now commented out but retained for debugging.
#plt.scatter(arr[:,0],arr[:,1], label='True Position')
#plt.show()
# Create an array to store the Sum of Squared Errors or the cluster inertia
# for the k-clusters in multiple runs of the K-Means algo with different
# number of clusters assumed
distortions = []
for i in range(1,11):
km = KMeans(n_clusters=i, init='random',
n_init=10, max_iter=300,
tol=1e-04, random_state=0)
km.fit(arr)
distortions.append(km.inertia_)
# Find the elbow or knee from the plot of no. of clusters vs distortion for that
# number. This algorithm locates the knee and that is used to provide the Number
# of clusters to the main run of K-means algo.
kn = KneeLocator(range(1,11), distortions, curve='convex', direction='decreasing')
print('The number of clusters are: ' + str(kn.knee))
#plot the no. of clusters vs distortion graph and annotate the elbow point
plt.plot(range(1, 11), distortions, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
#From the sciKitLearn clustering algorithms, the K-means clustering
#algorithm is used.
km = KMeans(
n_clusters=kn.knee, init='random',
n_init=10, max_iter=300,
tol=1e-04, random_state=0
)
#Obtain the cluster labels by running the K-means algorithm with
# the parameters defined above.
y_km = km.fit_predict(arr)
#Color Array
colors = ['lightgreen','orange','lightblue','azure', 'crimson','lightpink','black','gold', 'coral', 'navy']
#Marker Array
markers = ['s','o','v', '^', '<', '>', 'h', 'H', 'D', 'd']
#Plot the clusters.
for i in range(0, 3):
plt.scatter(
arr[y_km == i, 0], arr[y_km == i, 1],
s=50, c=colors[i],
marker=markers[i], edgecolor='black',
label='cluster ' + str(i+1)
)
# Plotting the centroids for all the clusters.
plt.scatter(
km.cluster_centers_[:, 0], km.cluster_centers_[:, 1],
s=250, marker='*',
c='red', edgecolor='black',
label='centroids'
)
plt.legend(scatterpoints=1)
plt.grid()
plt.show()
|
normal
|
{
"blob_id": "09417014963172fc71b4268aafdec1405c04f34d",
"index": 3472,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, 11):\n km = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=\n 0.0001, random_state=0)\n km.fit(arr)\n distortions.append(km.inertia_)\n<mask token>\nprint('The number of clusters are: ' + str(kn.knee))\nplt.plot(range(1, 11), distortions, marker='o')\nplt.xlabel('Number of clusters')\nplt.ylabel('Distortion')\nplt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')\nplt.show()\n<mask token>\nfor i in range(0, 3):\n plt.scatter(arr[y_km == i, 0], arr[y_km == i, 1], s=50, c=colors[i],\n marker=markers[i], edgecolor='black', label='cluster ' + str(i + 1))\nplt.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=250,\n marker='*', c='red', edgecolor='black', label='centroids')\nplt.legend(scatterpoints=1)\nplt.grid()\nplt.show()\n",
"step-3": "<mask token>\ndf = pd.read_csv('ClusterPlot.csv', usecols=['V1', 'V2'])\narr = df.to_numpy()\ndistortions = []\nfor i in range(1, 11):\n km = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=\n 0.0001, random_state=0)\n km.fit(arr)\n distortions.append(km.inertia_)\nkn = KneeLocator(range(1, 11), distortions, curve='convex', direction=\n 'decreasing')\nprint('The number of clusters are: ' + str(kn.knee))\nplt.plot(range(1, 11), distortions, marker='o')\nplt.xlabel('Number of clusters')\nplt.ylabel('Distortion')\nplt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')\nplt.show()\nkm = KMeans(n_clusters=kn.knee, init='random', n_init=10, max_iter=300, tol\n =0.0001, random_state=0)\ny_km = km.fit_predict(arr)\ncolors = ['lightgreen', 'orange', 'lightblue', 'azure', 'crimson',\n 'lightpink', 'black', 'gold', 'coral', 'navy']\nmarkers = ['s', 'o', 'v', '^', '<', '>', 'h', 'H', 'D', 'd']\nfor i in range(0, 3):\n plt.scatter(arr[y_km == i, 0], arr[y_km == i, 1], s=50, c=colors[i],\n marker=markers[i], edgecolor='black', label='cluster ' + str(i + 1))\nplt.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=250,\n marker='*', c='red', edgecolor='black', label='centroids')\nplt.legend(scatterpoints=1)\nplt.grid()\nplt.show()\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom kneed import KneeLocator\ndf = pd.read_csv('ClusterPlot.csv', usecols=['V1', 'V2'])\narr = df.to_numpy()\ndistortions = []\nfor i in range(1, 11):\n km = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=\n 0.0001, random_state=0)\n km.fit(arr)\n distortions.append(km.inertia_)\nkn = KneeLocator(range(1, 11), distortions, curve='convex', direction=\n 'decreasing')\nprint('The number of clusters are: ' + str(kn.knee))\nplt.plot(range(1, 11), distortions, marker='o')\nplt.xlabel('Number of clusters')\nplt.ylabel('Distortion')\nplt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')\nplt.show()\nkm = KMeans(n_clusters=kn.knee, init='random', n_init=10, max_iter=300, tol\n =0.0001, random_state=0)\ny_km = km.fit_predict(arr)\ncolors = ['lightgreen', 'orange', 'lightblue', 'azure', 'crimson',\n 'lightpink', 'black', 'gold', 'coral', 'navy']\nmarkers = ['s', 'o', 'v', '^', '<', '>', 'h', 'H', 'D', 'd']\nfor i in range(0, 3):\n plt.scatter(arr[y_km == i, 0], arr[y_km == i, 1], s=50, c=colors[i],\n marker=markers[i], edgecolor='black', label='cluster ' + str(i + 1))\nplt.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=250,\n marker='*', c='red', edgecolor='black', label='centroids')\nplt.legend(scatterpoints=1)\nplt.grid()\nplt.show()\n",
"step-5": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom kneed import KneeLocator\n\n#Create a panda data frame from the csv file\ndf = pd.read_csv('ClusterPlot.csv', usecols=['V1','V2'])\n\n#Convert the panda data frame to a NumPy array\narr = df.to_numpy()\n\n#Code used to visualise the data and check if the import worked correctly\n#Now commented out but retained for debugging.\n#plt.scatter(arr[:,0],arr[:,1], label='True Position')\n#plt.show()\n\n# Create an array to store the Sum of Squared Errors or the cluster inertia\n# for the k-clusters in multiple runs of the K-Means algo with different\n# number of clusters assumed\n\ndistortions = []\n\nfor i in range(1,11):\n km = KMeans(n_clusters=i, init='random',\n n_init=10, max_iter=300,\n tol=1e-04, random_state=0)\n km.fit(arr)\n distortions.append(km.inertia_)\n\n# Find the elbow or knee from the plot of no. of clusters vs distortion for that\n# number. This algorithm locates the knee and that is used to provide the Number\n# of clusters to the main run of K-means algo.\n\nkn = KneeLocator(range(1,11), distortions, curve='convex', direction='decreasing')\nprint('The number of clusters are: ' + str(kn.knee))\n\n#plot the no. of clusters vs distortion graph and annotate the elbow point\n\nplt.plot(range(1, 11), distortions, marker='o')\nplt.xlabel('Number of clusters')\nplt.ylabel('Distortion')\nplt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')\nplt.show()\n\n\n\n#From the sciKitLearn clustering algorithms, the K-means clustering\n#algorithm is used.\nkm = KMeans(\n n_clusters=kn.knee, init='random',\n n_init=10, max_iter=300,\n tol=1e-04, random_state=0\n)\n\n#Obtain the cluster labels by running the K-means algorithm with\n# the parameters defined above.\ny_km = km.fit_predict(arr)\n\n#Color Array\ncolors = ['lightgreen','orange','lightblue','azure', 'crimson','lightpink','black','gold', 'coral', 'navy']\n\n#Marker Array\nmarkers = ['s','o','v', '^', '<', '>', 'h', 'H', 'D', 'd']\n\n#Plot the clusters.\nfor i in range(0, 3):\n plt.scatter(\n arr[y_km == i, 0], arr[y_km == i, 1],\n s=50, c=colors[i],\n marker=markers[i], edgecolor='black',\n label='cluster ' + str(i+1)\n)\n\n# Plotting the centroids for all the clusters.\nplt.scatter(\n km.cluster_centers_[:, 0], km.cluster_centers_[:, 1],\n s=250, marker='*',\n c='red', edgecolor='black',\n label='centroids'\n)\n\nplt.legend(scatterpoints=1)\nplt.grid()\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
a, b = map(int, input().split())
def mult(a, b):
if a > 9 or b > 9 or a < 1 or b < 1:
print(-1)
else:
print(a * b)
mult(a, b)
|
normal
|
{
"blob_id": "991fa5f9c83a1821e62f7baacbc56a4d31982312",
"index": 3681,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef mult(a, b):\n if a > 9 or b > 9 or a < 1 or b < 1:\n print(-1)\n else:\n print(a * b)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef mult(a, b):\n if a > 9 or b > 9 or a < 1 or b < 1:\n print(-1)\n else:\n print(a * b)\n\n\nmult(a, b)\n",
"step-4": "a, b = map(int, input().split())\n\n\ndef mult(a, b):\n if a > 9 or b > 9 or a < 1 or b < 1:\n print(-1)\n else:\n print(a * b)\n\n\nmult(a, b)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os, datetime
import urllib
from flask import (Flask, flash, json, jsonify, redirect, render_template,
request, session, url_for)
import util.database as db
template_path=os.path.dirname(__file__)+"/templates"
file=""
if template_path!="/templates":
app = Flask("__main__",template_folder=os.path.dirname(__file__)+"/templates",static_folder=os.path.dirname(__file__)+"/static")
file = open(os.path.dirname(__file__)+'/data/keys.json')
else:
app = Flask("__main__")
file = open('./data/keys.json')
app.secret_key = os.urandom(32)
content = file.read()
keys = json.loads(content)
# has a 5000 calls/day limit
PIXABAY_KEY = keys['Pixabay']
PIXABAY_STUB = "https://pixabay.com/api/?key=" + PIXABAY_KEY + "&q=" #separate words with "+"
@app.route('/')
def home():
if "username" in session:
id_num=db.search_user_list(session["username"], is_usrname=True)[0][2]
finavail=db.search_finance_list(id_num)
goalavail=db.search_goal_list(id_num)
if finavail:
session["finances"]=session["username"]
if goalavail:
session["goals"]=session["username"]
set_goal = db.search_goal_list(id_num)
print(set_goal)
if set_goal != []:
user_id = db.search_user_list(session['username'], is_usrname=True)[0][2]
g = db.search_goal_list(user_id)
b = db.search_finance_list(user_id)
t = db.search_time_list(user_id)
date_now = datetime.date.today()
price = g
perc = g
delta_months = 0
if g != []:
g = g[0][0]
if price != []:
price = price[0][1]
if perc != []:
perc = perc[0][2]
##function to get difference in months between 2 dates
def months_between(date1,date2):
if date1>date2:
date1,date2=date2,date1
m1=date1.year*12+date1.month
m2=date2.year*12+date2.month
months=m2-m1
if date1.day>date2.day:
months-=1
elif date1.day==date2.day:
seconds1=date1.hour*3600+date1.minute+date1.second
seconds2=date2.hour*3600+date2.minute+date2.second
if seconds1>seconds2:
months-=1
return months
if t != []:
t = t[0][0]
delta_months = months_between(datetime.datetime.strptime(t,'%Y-%m-%d'), datetime.datetime.strptime(str(date_now),'%Y-%m-%d'))
print(delta_months)
img = db.search_image_list(user_id)
if img != []:
img = img[0][0]
if b != []:
bal = b[0][0]
inc = b[0][1]
print(b)
print(g)
print(price)
print(perc)
print(img)
if g or price:
if b:
print("Used the first one")
perc_complete = (delta_months * (perc / 100.0) * inc)/price
print(perc_complete)
if perc_complete > 1:
perc_complete = 1
return render_template('home.html',fin=finavail,goal=goalavail, set_goal= set_goal, goal_name =g, goal_price=price,perc_inc = perc, image=img, bal=bal, income=inc, months= delta_months, perc_comp = perc_complete * 100 )
return render_template('home.html',fin=finavail,goal=goalavail)
return render_template('home.html',fin=finavail,goal=goalavail)
return render_template('home.html')
@app.route('/register')
def register():
return render_template('register.html')
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/auth', methods=['POST'])
def auth():
user = request.form.get("user")
paswrd = request.form.get('pass')
if request.form.get("submit")=="Register":
paswrd2 = request.form.get("pass2")
print(paswrd)
print(paswrd2)
if paswrd != paswrd2:
flash("Passwords Do Not Match")
return redirect(url_for('register'))
if db.register(user, paswrd):
flash("Registered successfully")
session['username'] = request.form['user']
else:
flash("Unable to register the user")
return redirect(url_for('register'))
print("Username has been registered previously!")
else:
match=db.search_user_list(user, is_usrname=True)
if len(match)>0:
if match[0][1]==paswrd:
session["username"]=request.form["user"]
else:
flash("wrong Password")
return redirect(url_for('login'))
else:
flash("User not found")
return redirect(url_for('login'))
return redirect(url_for('home'))
@app.route('/finances')
def finance():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
items = db.search_finance_list(user_id)
daily = db.search_expense_list(user_id, is_id=True)
monthly = db.search_monthly_list(user_id, is_id=True)
ratings = db.search_rating_list(user_id, is_id=True)
print(ratings)
print(f"Unlike month, this is daily: {daily}\n")
w = dict([ (x[0], x[1]) for x in daily ])
s = dict([ (x[0], x[1]) for x in monthly ])
r = dict([ (x[0], x[1]) for x in ratings ])
print(f"THIS is monthly: {monthly}")
print(f"THIS is s: {s}")
print(f"These are the ratings: {r}")
total = 0
m_total = 0
for x in w.values():
total += float(x)
for x in s.values():
m_total += float(x)
if items != []:
bal,income,i = items[0]
diction = {"Balance":bal, "Income":income}
return render_template('findata.html',
diction=diction,
daily=w,
months = s,
total=total,
mtotal = m_total,completed=True, ratings=r)
return render_template('findata.html')
@app.route('/fincalc', methods=['POST'])
def calc():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
# print(request.form)
session["finances"]=session["username"]
bal = request.form['balance'][1:]
monthly = request.form['monthly-inputs']
income = request.form['income'][1:]
# print(request.form)
s = request.form
d_rates = request.form['daily-importance']
m_rates = request.form['monthly-importance']
print(d_rates)
user_id = db.search_user_list(session['username'])[0][2]
daily_dict = json.loads(d_rates)
monthly_dict = json.loads(m_rates)
print(daily_dict)
print(monthly_dict)
dai_im = dict([x for x in daily_dict.values()]) # {expenseName: rating, expenseName2: rating, ...}
mon_im = dict([x for x in monthly_dict.values()])
file=os.path.dirname(__file__)+f'/static/ratings.csv'
stringg = "{"
try:
with open(file) as f: # if readable, file already exists
print("File found, not creating...")
f.close()
except Exception as e:
print(e)
with open(file, 'a+') as f: # creates the file
print("File not found, creating...")
f.write(f"ratings,id\n")
f.close()
for item in mon_im:
db.add_rating(item, mon_im[item], user_id)
stringg += "'" + item + "'" + " : " + "'" + str(mon_im[item]) + "'" + " "
for item in dai_im:
db.add_rating(item, dai_im[item], user_id)
stringg += "'" + item + "'" + " : " + "'" + str(dai_im[item]) + "'" + " "
stringg += "}," + str(user_id) + "\n"
with open(file, "r") as f:
lines = f.readlines()
with open(file, "w") as f:
for line in lines:
if str(user_id) != line.strip("\n").split(",")[1]:
f.write(line)
f.write(stringg)
f.close()
daily = request.form['all-inputs']
print(f"This is daily: {monthly}")
daily = json.loads(daily) # dictionary
monthly = json.loads(monthly)
print(f"This is daily now {monthly}")
w = dict([x for x in daily.values()]) # {expense1: $$$, expense2: $$$, ...}
m = dict([x for x in monthly.values()])
print(f"\nThis is calculated m:{m}\n")
db.add_finances(bal, m, income, w, user_id)
flash("Finances updated")
return redirect(url_for('home'))
@app.route('/goals')
def goals():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
g = db.search_goal_list(user_id)
b = db.search_finance_list(user_id)
t = db.search_time_list(user_id)
date_now = datetime.date.today()
price = g
perc = g
delta_months = 0
if g != []:
g = g[0][0]
if price != []:
price = price[0][1]
if perc != []:
perc = perc[0][2]
##function to get difference in months between 2 dates
def months_between(date1,date2):
if date1>date2:
date1,date2=date2,date1
m1=date1.year*12+date1.month
m2=date2.year*12+date2.month
months=m2-m1
if date1.day>date2.day:
months-=1
elif date1.day==date2.day:
seconds1=date1.hour*3600+date1.minute+date1.second
seconds2=date2.hour*3600+date2.minute+date2.second
if seconds1>seconds2:
months-=1
return months
if t != []:
t = t[0][0]
delta_months = months_between(datetime.datetime.strptime(t,'%Y-%m-%d'), datetime.datetime.strptime(str(date_now),'%Y-%m-%d'))
print(delta_months)
img = db.search_image_list(user_id)
if img != []:
img = img[0][0]
if b != []:
bal = b[0][0]
inc = b[0][1]
print(b)
print(g)
print(price)
print(perc)
print(img)
if g or price:
if b:
print("Used the first one")
perc_complete = (delta_months * (perc / 100.0) * inc)/price
print(perc_complete)
if perc_complete > 1:
perc_complete = 1
return render_template('goals.html', goal=g, goal_price=price,perc_inc = perc, image=img, bal=bal, income=inc, months= delta_months, perc_comp = perc_complete * 100 )
else:
print("Used the second")
return render_template('goals.html', goal=g, goal_price=price,perc_inc = perc, image=img)
else:
if b:
return render_template('goals.html', bal=bal, income=inc)
else:
return render_template('goals.html')
@app.route('/gcalc', methods=['POST'])
def gcalc():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
goal_name = request.form['goal']
goal_price = request.form['goal_price'][1:]
percentage = request.form['slide']
print("This is percentage:")
print(percentage)
print("gcalc")
print(goal_name)
print(goal_price)
user_id = db.search_user_list(session['username'])[0][2]
db.add_goals(goal_name, goal_price, percentage, user_id)
a = db.search_image_list(user_id)
print(a)
# optimization to save on api calls
if a == [] or a[0][2] != goal_name:
try:
l = urllib.request.urlopen(PIXABAY_STUB + goal_name.replace(' ', '+') + "&image_type=photo")
p = json.loads(l.read())
img = p['hits'][0]['webformatURL']
except:
return render_template('error.html', err="Cannot connect to API", fix="Try refreshing or contacting the site owner")
else:
img = a[0][1]
db.add_images(img, goal_name, user_id)
flash(f"Goal for {goal_name} at ${goal_price} has been added!")
return redirect(url_for('home'))
@app.route('/sankey')
def sankey():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
return render_template('sankey.html',idnum=user_id)
@app.route('/pie')
def pie():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
return render_template('pie.html',idnum=user_id)
@app.route('/area')
def area():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
goal=db.search_goal_list(user_id)
if goal == []:
return redirect(url_for('goals'))
daily=db.search_expense_list(user_id)
monthly=db.search_monthly_list(user_id)
dadict={}
modict={}
print(goal)
ratings={}
for names in daily:
dadict[names[0]]=names[1]
for names in monthly:
modict[names[0]]=names[1]
print(dadict,modict)
percent=0
for names in db.search_rating_list(user_id):
print(names)
if names[0] in modict:
percent=(modict[names[0]]*12)/goal[0][1]
if names[0] in dadict:
percent=(dadict[names[0]]*30*12)/goal[0][1]
if names[1]<=6 and percent >=0.05:
ratings[names[0]]=(names[1],percent)
print(ratings)
return render_template('area.html',idnum=user_id,ratings=ratings)
@app.route('/logout')
def logout():
if 'username' in session:
session.pop('username')
return redirect(url_for('home'))
@app.route('/account')
def account():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
# print(db.search_user_list(session['username']))
user_list = json.dumps(db.search_user_list(ret_all=True))
print(json.dumps(db.search_user_list(ret_all=True)))
return render_template('accounts.html', user_list=user_list)
@app.route('/update', methods=["POST"])
def update():
print('this is the updates')
update_dict = request.form['all-options']
update_dict = json.loads(update_dict)
print(request.form)
user_ids = db.search_user_list(session['username'])
user = user_ids[0][-1]
print(user)
db.update_user_list(update_dict['username'] or user_ids[0][0], update_dict['password'] or user_ids[0][1], user)
db.reset_statistics(user, update_dict['reset'])
session.pop('username')
session['username'] = update_dict['username'] or user_ids[0][0] # change username in session
flash("Account information updated successfully")
return redirect(url_for('home'))
@app.route('/del')
def delete():
if 'username' not in session:
flash("Woops. You can't be here")
return redirect(url_for('login'))
user = db.search_user_list(session['username'])[0][-1]
print(user)
db.update_user_list(None, None, user, rem=True)
flash("User successfully removed")
session.pop('username')
return redirect(url_for('home'))
if __name__ == "__main__":
app.debug = True
app.run()
|
normal
|
{
"blob_id": "5c20eefe8111d44a36e69b873a71377ee7bfa23d",
"index": 6768,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef home():\n if 'username' in session:\n id_num = db.search_user_list(session['username'], is_usrname=True)[0][2\n ]\n finavail = db.search_finance_list(id_num)\n goalavail = db.search_goal_list(id_num)\n if finavail:\n session['finances'] = session['username']\n if goalavail:\n session['goals'] = session['username']\n set_goal = db.search_goal_list(id_num)\n print(set_goal)\n if set_goal != []:\n user_id = db.search_user_list(session['username'], is_usrname=True\n )[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now),\n '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('home.html', fin=finavail, goal=\n goalavail, set_goal=set_goal, goal_name=g,\n goal_price=price, perc_inc=perc, image=img, bal=bal,\n income=inc, months=delta_months, perc_comp=\n perc_complete * 100)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html')\n\n\n<mask token>\n\n\[email protected]('/login')\ndef login():\n return render_template('login.html')\n\n\[email protected]('/auth', methods=['POST'])\ndef auth():\n user = request.form.get('user')\n paswrd = request.form.get('pass')\n if request.form.get('submit') == 'Register':\n paswrd2 = request.form.get('pass2')\n print(paswrd)\n print(paswrd2)\n if paswrd != paswrd2:\n flash('Passwords Do Not Match')\n return redirect(url_for('register'))\n if db.register(user, paswrd):\n flash('Registered successfully')\n session['username'] = request.form['user']\n else:\n flash('Unable to register the user')\n return redirect(url_for('register'))\n print('Username has been registered previously!')\n else:\n match = db.search_user_list(user, is_usrname=True)\n if len(match) > 0:\n if match[0][1] == paswrd:\n session['username'] = request.form['user']\n else:\n flash('wrong Password')\n return redirect(url_for('login'))\n else:\n flash('User not found')\n return redirect(url_for('login'))\n return redirect(url_for('home'))\n\n\[email protected]('/finances')\ndef finance():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n items = db.search_finance_list(user_id)\n daily = db.search_expense_list(user_id, is_id=True)\n monthly = db.search_monthly_list(user_id, is_id=True)\n ratings = db.search_rating_list(user_id, is_id=True)\n print(ratings)\n print(f'Unlike month, this is daily: {daily}\\n')\n w = dict([(x[0], x[1]) for x in daily])\n s = dict([(x[0], x[1]) for x in monthly])\n r = dict([(x[0], x[1]) for x in ratings])\n print(f'THIS is monthly: {monthly}')\n print(f'THIS is s: {s}')\n print(f'These are the ratings: {r}')\n total = 0\n m_total = 0\n for x in w.values():\n total += float(x)\n for x in s.values():\n m_total += float(x)\n if items != []:\n bal, income, i = items[0]\n diction = {'Balance': bal, 'Income': income}\n return render_template('findata.html', diction=diction, daily=w,\n months=s, total=total, mtotal=m_total, completed=True, ratings=r)\n return render_template('findata.html')\n\n\[email protected]('/fincalc', methods=['POST'])\ndef calc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n session['finances'] = session['username']\n bal = request.form['balance'][1:]\n monthly = request.form['monthly-inputs']\n income = request.form['income'][1:]\n s = request.form\n d_rates = request.form['daily-importance']\n m_rates = request.form['monthly-importance']\n print(d_rates)\n user_id = db.search_user_list(session['username'])[0][2]\n daily_dict = json.loads(d_rates)\n monthly_dict = json.loads(m_rates)\n print(daily_dict)\n print(monthly_dict)\n dai_im = dict([x for x in daily_dict.values()])\n mon_im = dict([x for x in monthly_dict.values()])\n file = os.path.dirname(__file__) + f'/static/ratings.csv'\n stringg = '{'\n try:\n with open(file) as f:\n print('File found, not creating...')\n f.close()\n except Exception as e:\n print(e)\n with open(file, 'a+') as f:\n print('File not found, creating...')\n f.write(f'ratings,id\\n')\n f.close()\n for item in mon_im:\n db.add_rating(item, mon_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(mon_im[item]\n ) + \"'\" + ' '\n for item in dai_im:\n db.add_rating(item, dai_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(dai_im[item]\n ) + \"'\" + ' '\n stringg += '},' + str(user_id) + '\\n'\n with open(file, 'r') as f:\n lines = f.readlines()\n with open(file, 'w') as f:\n for line in lines:\n if str(user_id) != line.strip('\\n').split(',')[1]:\n f.write(line)\n f.write(stringg)\n f.close()\n daily = request.form['all-inputs']\n print(f'This is daily: {monthly}')\n daily = json.loads(daily)\n monthly = json.loads(monthly)\n print(f'This is daily now {monthly}')\n w = dict([x for x in daily.values()])\n m = dict([x for x in monthly.values()])\n print(f'\\nThis is calculated m:{m}\\n')\n db.add_finances(bal, m, income, w, user_id)\n flash('Finances updated')\n return redirect(url_for('home'))\n\n\[email protected]('/goals')\ndef goals():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now), '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img, bal=bal, income=inc, months=\n delta_months, perc_comp=perc_complete * 100)\n else:\n print('Used the second')\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img)\n elif b:\n return render_template('goals.html', bal=bal, income=inc)\n else:\n return render_template('goals.html')\n\n\[email protected]('/gcalc', methods=['POST'])\ndef gcalc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n goal_name = request.form['goal']\n goal_price = request.form['goal_price'][1:]\n percentage = request.form['slide']\n print('This is percentage:')\n print(percentage)\n print('gcalc')\n print(goal_name)\n print(goal_price)\n user_id = db.search_user_list(session['username'])[0][2]\n db.add_goals(goal_name, goal_price, percentage, user_id)\n a = db.search_image_list(user_id)\n print(a)\n if a == [] or a[0][2] != goal_name:\n try:\n l = urllib.request.urlopen(PIXABAY_STUB + goal_name.replace(' ',\n '+') + '&image_type=photo')\n p = json.loads(l.read())\n img = p['hits'][0]['webformatURL']\n except:\n return render_template('error.html', err=\n 'Cannot connect to API', fix=\n 'Try refreshing or contacting the site owner')\n else:\n img = a[0][1]\n db.add_images(img, goal_name, user_id)\n flash(f'Goal for {goal_name} at ${goal_price} has been added!')\n return redirect(url_for('home'))\n\n\[email protected]('/sankey')\ndef sankey():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('sankey.html', idnum=user_id)\n\n\[email protected]('/pie')\ndef pie():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('pie.html', idnum=user_id)\n\n\[email protected]('/area')\ndef area():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n goal = db.search_goal_list(user_id)\n if goal == []:\n return redirect(url_for('goals'))\n daily = db.search_expense_list(user_id)\n monthly = db.search_monthly_list(user_id)\n dadict = {}\n modict = {}\n print(goal)\n ratings = {}\n for names in daily:\n dadict[names[0]] = names[1]\n for names in monthly:\n modict[names[0]] = names[1]\n print(dadict, modict)\n percent = 0\n for names in db.search_rating_list(user_id):\n print(names)\n if names[0] in modict:\n percent = modict[names[0]] * 12 / goal[0][1]\n if names[0] in dadict:\n percent = dadict[names[0]] * 30 * 12 / goal[0][1]\n if names[1] <= 6 and percent >= 0.05:\n ratings[names[0]] = names[1], percent\n print(ratings)\n return render_template('area.html', idnum=user_id, ratings=ratings)\n\n\[email protected]('/logout')\ndef logout():\n if 'username' in session:\n session.pop('username')\n return redirect(url_for('home'))\n\n\[email protected]('/account')\ndef account():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_list = json.dumps(db.search_user_list(ret_all=True))\n print(json.dumps(db.search_user_list(ret_all=True)))\n return render_template('accounts.html', user_list=user_list)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n print('this is the updates')\n update_dict = request.form['all-options']\n update_dict = json.loads(update_dict)\n print(request.form)\n user_ids = db.search_user_list(session['username'])\n user = user_ids[0][-1]\n print(user)\n db.update_user_list(update_dict['username'] or user_ids[0][0], \n update_dict['password'] or user_ids[0][1], user)\n db.reset_statistics(user, update_dict['reset'])\n session.pop('username')\n session['username'] = update_dict['username'] or user_ids[0][0]\n flash('Account information updated successfully')\n return redirect(url_for('home'))\n\n\[email protected]('/del')\ndef delete():\n if 'username' not in session:\n flash(\"Woops. You can't be here\")\n return redirect(url_for('login'))\n user = db.search_user_list(session['username'])[0][-1]\n print(user)\n db.update_user_list(None, None, user, rem=True)\n flash('User successfully removed')\n session.pop('username')\n return redirect(url_for('home'))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef home():\n if 'username' in session:\n id_num = db.search_user_list(session['username'], is_usrname=True)[0][2\n ]\n finavail = db.search_finance_list(id_num)\n goalavail = db.search_goal_list(id_num)\n if finavail:\n session['finances'] = session['username']\n if goalavail:\n session['goals'] = session['username']\n set_goal = db.search_goal_list(id_num)\n print(set_goal)\n if set_goal != []:\n user_id = db.search_user_list(session['username'], is_usrname=True\n )[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now),\n '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('home.html', fin=finavail, goal=\n goalavail, set_goal=set_goal, goal_name=g,\n goal_price=price, perc_inc=perc, image=img, bal=bal,\n income=inc, months=delta_months, perc_comp=\n perc_complete * 100)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html')\n\n\[email protected]('/register')\ndef register():\n return render_template('register.html')\n\n\[email protected]('/login')\ndef login():\n return render_template('login.html')\n\n\[email protected]('/auth', methods=['POST'])\ndef auth():\n user = request.form.get('user')\n paswrd = request.form.get('pass')\n if request.form.get('submit') == 'Register':\n paswrd2 = request.form.get('pass2')\n print(paswrd)\n print(paswrd2)\n if paswrd != paswrd2:\n flash('Passwords Do Not Match')\n return redirect(url_for('register'))\n if db.register(user, paswrd):\n flash('Registered successfully')\n session['username'] = request.form['user']\n else:\n flash('Unable to register the user')\n return redirect(url_for('register'))\n print('Username has been registered previously!')\n else:\n match = db.search_user_list(user, is_usrname=True)\n if len(match) > 0:\n if match[0][1] == paswrd:\n session['username'] = request.form['user']\n else:\n flash('wrong Password')\n return redirect(url_for('login'))\n else:\n flash('User not found')\n return redirect(url_for('login'))\n return redirect(url_for('home'))\n\n\[email protected]('/finances')\ndef finance():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n items = db.search_finance_list(user_id)\n daily = db.search_expense_list(user_id, is_id=True)\n monthly = db.search_monthly_list(user_id, is_id=True)\n ratings = db.search_rating_list(user_id, is_id=True)\n print(ratings)\n print(f'Unlike month, this is daily: {daily}\\n')\n w = dict([(x[0], x[1]) for x in daily])\n s = dict([(x[0], x[1]) for x in monthly])\n r = dict([(x[0], x[1]) for x in ratings])\n print(f'THIS is monthly: {monthly}')\n print(f'THIS is s: {s}')\n print(f'These are the ratings: {r}')\n total = 0\n m_total = 0\n for x in w.values():\n total += float(x)\n for x in s.values():\n m_total += float(x)\n if items != []:\n bal, income, i = items[0]\n diction = {'Balance': bal, 'Income': income}\n return render_template('findata.html', diction=diction, daily=w,\n months=s, total=total, mtotal=m_total, completed=True, ratings=r)\n return render_template('findata.html')\n\n\[email protected]('/fincalc', methods=['POST'])\ndef calc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n session['finances'] = session['username']\n bal = request.form['balance'][1:]\n monthly = request.form['monthly-inputs']\n income = request.form['income'][1:]\n s = request.form\n d_rates = request.form['daily-importance']\n m_rates = request.form['monthly-importance']\n print(d_rates)\n user_id = db.search_user_list(session['username'])[0][2]\n daily_dict = json.loads(d_rates)\n monthly_dict = json.loads(m_rates)\n print(daily_dict)\n print(monthly_dict)\n dai_im = dict([x for x in daily_dict.values()])\n mon_im = dict([x for x in monthly_dict.values()])\n file = os.path.dirname(__file__) + f'/static/ratings.csv'\n stringg = '{'\n try:\n with open(file) as f:\n print('File found, not creating...')\n f.close()\n except Exception as e:\n print(e)\n with open(file, 'a+') as f:\n print('File not found, creating...')\n f.write(f'ratings,id\\n')\n f.close()\n for item in mon_im:\n db.add_rating(item, mon_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(mon_im[item]\n ) + \"'\" + ' '\n for item in dai_im:\n db.add_rating(item, dai_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(dai_im[item]\n ) + \"'\" + ' '\n stringg += '},' + str(user_id) + '\\n'\n with open(file, 'r') as f:\n lines = f.readlines()\n with open(file, 'w') as f:\n for line in lines:\n if str(user_id) != line.strip('\\n').split(',')[1]:\n f.write(line)\n f.write(stringg)\n f.close()\n daily = request.form['all-inputs']\n print(f'This is daily: {monthly}')\n daily = json.loads(daily)\n monthly = json.loads(monthly)\n print(f'This is daily now {monthly}')\n w = dict([x for x in daily.values()])\n m = dict([x for x in monthly.values()])\n print(f'\\nThis is calculated m:{m}\\n')\n db.add_finances(bal, m, income, w, user_id)\n flash('Finances updated')\n return redirect(url_for('home'))\n\n\[email protected]('/goals')\ndef goals():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now), '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img, bal=bal, income=inc, months=\n delta_months, perc_comp=perc_complete * 100)\n else:\n print('Used the second')\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img)\n elif b:\n return render_template('goals.html', bal=bal, income=inc)\n else:\n return render_template('goals.html')\n\n\[email protected]('/gcalc', methods=['POST'])\ndef gcalc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n goal_name = request.form['goal']\n goal_price = request.form['goal_price'][1:]\n percentage = request.form['slide']\n print('This is percentage:')\n print(percentage)\n print('gcalc')\n print(goal_name)\n print(goal_price)\n user_id = db.search_user_list(session['username'])[0][2]\n db.add_goals(goal_name, goal_price, percentage, user_id)\n a = db.search_image_list(user_id)\n print(a)\n if a == [] or a[0][2] != goal_name:\n try:\n l = urllib.request.urlopen(PIXABAY_STUB + goal_name.replace(' ',\n '+') + '&image_type=photo')\n p = json.loads(l.read())\n img = p['hits'][0]['webformatURL']\n except:\n return render_template('error.html', err=\n 'Cannot connect to API', fix=\n 'Try refreshing or contacting the site owner')\n else:\n img = a[0][1]\n db.add_images(img, goal_name, user_id)\n flash(f'Goal for {goal_name} at ${goal_price} has been added!')\n return redirect(url_for('home'))\n\n\[email protected]('/sankey')\ndef sankey():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('sankey.html', idnum=user_id)\n\n\[email protected]('/pie')\ndef pie():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('pie.html', idnum=user_id)\n\n\[email protected]('/area')\ndef area():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n goal = db.search_goal_list(user_id)\n if goal == []:\n return redirect(url_for('goals'))\n daily = db.search_expense_list(user_id)\n monthly = db.search_monthly_list(user_id)\n dadict = {}\n modict = {}\n print(goal)\n ratings = {}\n for names in daily:\n dadict[names[0]] = names[1]\n for names in monthly:\n modict[names[0]] = names[1]\n print(dadict, modict)\n percent = 0\n for names in db.search_rating_list(user_id):\n print(names)\n if names[0] in modict:\n percent = modict[names[0]] * 12 / goal[0][1]\n if names[0] in dadict:\n percent = dadict[names[0]] * 30 * 12 / goal[0][1]\n if names[1] <= 6 and percent >= 0.05:\n ratings[names[0]] = names[1], percent\n print(ratings)\n return render_template('area.html', idnum=user_id, ratings=ratings)\n\n\[email protected]('/logout')\ndef logout():\n if 'username' in session:\n session.pop('username')\n return redirect(url_for('home'))\n\n\[email protected]('/account')\ndef account():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_list = json.dumps(db.search_user_list(ret_all=True))\n print(json.dumps(db.search_user_list(ret_all=True)))\n return render_template('accounts.html', user_list=user_list)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n print('this is the updates')\n update_dict = request.form['all-options']\n update_dict = json.loads(update_dict)\n print(request.form)\n user_ids = db.search_user_list(session['username'])\n user = user_ids[0][-1]\n print(user)\n db.update_user_list(update_dict['username'] or user_ids[0][0], \n update_dict['password'] or user_ids[0][1], user)\n db.reset_statistics(user, update_dict['reset'])\n session.pop('username')\n session['username'] = update_dict['username'] or user_ids[0][0]\n flash('Account information updated successfully')\n return redirect(url_for('home'))\n\n\[email protected]('/del')\ndef delete():\n if 'username' not in session:\n flash(\"Woops. You can't be here\")\n return redirect(url_for('login'))\n user = db.search_user_list(session['username'])[0][-1]\n print(user)\n db.update_user_list(None, None, user, rem=True)\n flash('User successfully removed')\n session.pop('username')\n return redirect(url_for('home'))\n\n\n<mask token>\n",
"step-3": "<mask token>\nif template_path != '/templates':\n app = Flask('__main__', template_folder=os.path.dirname(__file__) +\n '/templates', static_folder=os.path.dirname(__file__) + '/static')\n file = open(os.path.dirname(__file__) + '/data/keys.json')\nelse:\n app = Flask('__main__')\n file = open('./data/keys.json')\n<mask token>\n\n\[email protected]('/')\ndef home():\n if 'username' in session:\n id_num = db.search_user_list(session['username'], is_usrname=True)[0][2\n ]\n finavail = db.search_finance_list(id_num)\n goalavail = db.search_goal_list(id_num)\n if finavail:\n session['finances'] = session['username']\n if goalavail:\n session['goals'] = session['username']\n set_goal = db.search_goal_list(id_num)\n print(set_goal)\n if set_goal != []:\n user_id = db.search_user_list(session['username'], is_usrname=True\n )[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now),\n '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('home.html', fin=finavail, goal=\n goalavail, set_goal=set_goal, goal_name=g,\n goal_price=price, perc_inc=perc, image=img, bal=bal,\n income=inc, months=delta_months, perc_comp=\n perc_complete * 100)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html')\n\n\[email protected]('/register')\ndef register():\n return render_template('register.html')\n\n\[email protected]('/login')\ndef login():\n return render_template('login.html')\n\n\[email protected]('/auth', methods=['POST'])\ndef auth():\n user = request.form.get('user')\n paswrd = request.form.get('pass')\n if request.form.get('submit') == 'Register':\n paswrd2 = request.form.get('pass2')\n print(paswrd)\n print(paswrd2)\n if paswrd != paswrd2:\n flash('Passwords Do Not Match')\n return redirect(url_for('register'))\n if db.register(user, paswrd):\n flash('Registered successfully')\n session['username'] = request.form['user']\n else:\n flash('Unable to register the user')\n return redirect(url_for('register'))\n print('Username has been registered previously!')\n else:\n match = db.search_user_list(user, is_usrname=True)\n if len(match) > 0:\n if match[0][1] == paswrd:\n session['username'] = request.form['user']\n else:\n flash('wrong Password')\n return redirect(url_for('login'))\n else:\n flash('User not found')\n return redirect(url_for('login'))\n return redirect(url_for('home'))\n\n\[email protected]('/finances')\ndef finance():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n items = db.search_finance_list(user_id)\n daily = db.search_expense_list(user_id, is_id=True)\n monthly = db.search_monthly_list(user_id, is_id=True)\n ratings = db.search_rating_list(user_id, is_id=True)\n print(ratings)\n print(f'Unlike month, this is daily: {daily}\\n')\n w = dict([(x[0], x[1]) for x in daily])\n s = dict([(x[0], x[1]) for x in monthly])\n r = dict([(x[0], x[1]) for x in ratings])\n print(f'THIS is monthly: {monthly}')\n print(f'THIS is s: {s}')\n print(f'These are the ratings: {r}')\n total = 0\n m_total = 0\n for x in w.values():\n total += float(x)\n for x in s.values():\n m_total += float(x)\n if items != []:\n bal, income, i = items[0]\n diction = {'Balance': bal, 'Income': income}\n return render_template('findata.html', diction=diction, daily=w,\n months=s, total=total, mtotal=m_total, completed=True, ratings=r)\n return render_template('findata.html')\n\n\[email protected]('/fincalc', methods=['POST'])\ndef calc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n session['finances'] = session['username']\n bal = request.form['balance'][1:]\n monthly = request.form['monthly-inputs']\n income = request.form['income'][1:]\n s = request.form\n d_rates = request.form['daily-importance']\n m_rates = request.form['monthly-importance']\n print(d_rates)\n user_id = db.search_user_list(session['username'])[0][2]\n daily_dict = json.loads(d_rates)\n monthly_dict = json.loads(m_rates)\n print(daily_dict)\n print(monthly_dict)\n dai_im = dict([x for x in daily_dict.values()])\n mon_im = dict([x for x in monthly_dict.values()])\n file = os.path.dirname(__file__) + f'/static/ratings.csv'\n stringg = '{'\n try:\n with open(file) as f:\n print('File found, not creating...')\n f.close()\n except Exception as e:\n print(e)\n with open(file, 'a+') as f:\n print('File not found, creating...')\n f.write(f'ratings,id\\n')\n f.close()\n for item in mon_im:\n db.add_rating(item, mon_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(mon_im[item]\n ) + \"'\" + ' '\n for item in dai_im:\n db.add_rating(item, dai_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(dai_im[item]\n ) + \"'\" + ' '\n stringg += '},' + str(user_id) + '\\n'\n with open(file, 'r') as f:\n lines = f.readlines()\n with open(file, 'w') as f:\n for line in lines:\n if str(user_id) != line.strip('\\n').split(',')[1]:\n f.write(line)\n f.write(stringg)\n f.close()\n daily = request.form['all-inputs']\n print(f'This is daily: {monthly}')\n daily = json.loads(daily)\n monthly = json.loads(monthly)\n print(f'This is daily now {monthly}')\n w = dict([x for x in daily.values()])\n m = dict([x for x in monthly.values()])\n print(f'\\nThis is calculated m:{m}\\n')\n db.add_finances(bal, m, income, w, user_id)\n flash('Finances updated')\n return redirect(url_for('home'))\n\n\[email protected]('/goals')\ndef goals():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now), '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img, bal=bal, income=inc, months=\n delta_months, perc_comp=perc_complete * 100)\n else:\n print('Used the second')\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img)\n elif b:\n return render_template('goals.html', bal=bal, income=inc)\n else:\n return render_template('goals.html')\n\n\[email protected]('/gcalc', methods=['POST'])\ndef gcalc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n goal_name = request.form['goal']\n goal_price = request.form['goal_price'][1:]\n percentage = request.form['slide']\n print('This is percentage:')\n print(percentage)\n print('gcalc')\n print(goal_name)\n print(goal_price)\n user_id = db.search_user_list(session['username'])[0][2]\n db.add_goals(goal_name, goal_price, percentage, user_id)\n a = db.search_image_list(user_id)\n print(a)\n if a == [] or a[0][2] != goal_name:\n try:\n l = urllib.request.urlopen(PIXABAY_STUB + goal_name.replace(' ',\n '+') + '&image_type=photo')\n p = json.loads(l.read())\n img = p['hits'][0]['webformatURL']\n except:\n return render_template('error.html', err=\n 'Cannot connect to API', fix=\n 'Try refreshing or contacting the site owner')\n else:\n img = a[0][1]\n db.add_images(img, goal_name, user_id)\n flash(f'Goal for {goal_name} at ${goal_price} has been added!')\n return redirect(url_for('home'))\n\n\[email protected]('/sankey')\ndef sankey():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('sankey.html', idnum=user_id)\n\n\[email protected]('/pie')\ndef pie():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('pie.html', idnum=user_id)\n\n\[email protected]('/area')\ndef area():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n goal = db.search_goal_list(user_id)\n if goal == []:\n return redirect(url_for('goals'))\n daily = db.search_expense_list(user_id)\n monthly = db.search_monthly_list(user_id)\n dadict = {}\n modict = {}\n print(goal)\n ratings = {}\n for names in daily:\n dadict[names[0]] = names[1]\n for names in monthly:\n modict[names[0]] = names[1]\n print(dadict, modict)\n percent = 0\n for names in db.search_rating_list(user_id):\n print(names)\n if names[0] in modict:\n percent = modict[names[0]] * 12 / goal[0][1]\n if names[0] in dadict:\n percent = dadict[names[0]] * 30 * 12 / goal[0][1]\n if names[1] <= 6 and percent >= 0.05:\n ratings[names[0]] = names[1], percent\n print(ratings)\n return render_template('area.html', idnum=user_id, ratings=ratings)\n\n\[email protected]('/logout')\ndef logout():\n if 'username' in session:\n session.pop('username')\n return redirect(url_for('home'))\n\n\[email protected]('/account')\ndef account():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_list = json.dumps(db.search_user_list(ret_all=True))\n print(json.dumps(db.search_user_list(ret_all=True)))\n return render_template('accounts.html', user_list=user_list)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n print('this is the updates')\n update_dict = request.form['all-options']\n update_dict = json.loads(update_dict)\n print(request.form)\n user_ids = db.search_user_list(session['username'])\n user = user_ids[0][-1]\n print(user)\n db.update_user_list(update_dict['username'] or user_ids[0][0], \n update_dict['password'] or user_ids[0][1], user)\n db.reset_statistics(user, update_dict['reset'])\n session.pop('username')\n session['username'] = update_dict['username'] or user_ids[0][0]\n flash('Account information updated successfully')\n return redirect(url_for('home'))\n\n\[email protected]('/del')\ndef delete():\n if 'username' not in session:\n flash(\"Woops. You can't be here\")\n return redirect(url_for('login'))\n user = db.search_user_list(session['username'])[0][-1]\n print(user)\n db.update_user_list(None, None, user, rem=True)\n flash('User successfully removed')\n session.pop('username')\n return redirect(url_for('home'))\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n",
"step-4": "import os, datetime\nimport urllib\nfrom flask import Flask, flash, json, jsonify, redirect, render_template, request, session, url_for\nimport util.database as db\ntemplate_path = os.path.dirname(__file__) + '/templates'\nfile = ''\nif template_path != '/templates':\n app = Flask('__main__', template_folder=os.path.dirname(__file__) +\n '/templates', static_folder=os.path.dirname(__file__) + '/static')\n file = open(os.path.dirname(__file__) + '/data/keys.json')\nelse:\n app = Flask('__main__')\n file = open('./data/keys.json')\napp.secret_key = os.urandom(32)\ncontent = file.read()\nkeys = json.loads(content)\nPIXABAY_KEY = keys['Pixabay']\nPIXABAY_STUB = 'https://pixabay.com/api/?key=' + PIXABAY_KEY + '&q='\n\n\[email protected]('/')\ndef home():\n if 'username' in session:\n id_num = db.search_user_list(session['username'], is_usrname=True)[0][2\n ]\n finavail = db.search_finance_list(id_num)\n goalavail = db.search_goal_list(id_num)\n if finavail:\n session['finances'] = session['username']\n if goalavail:\n session['goals'] = session['username']\n set_goal = db.search_goal_list(id_num)\n print(set_goal)\n if set_goal != []:\n user_id = db.search_user_list(session['username'], is_usrname=True\n )[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now),\n '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('home.html', fin=finavail, goal=\n goalavail, set_goal=set_goal, goal_name=g,\n goal_price=price, perc_inc=perc, image=img, bal=bal,\n income=inc, months=delta_months, perc_comp=\n perc_complete * 100)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html', fin=finavail, goal=goalavail)\n return render_template('home.html')\n\n\[email protected]('/register')\ndef register():\n return render_template('register.html')\n\n\[email protected]('/login')\ndef login():\n return render_template('login.html')\n\n\[email protected]('/auth', methods=['POST'])\ndef auth():\n user = request.form.get('user')\n paswrd = request.form.get('pass')\n if request.form.get('submit') == 'Register':\n paswrd2 = request.form.get('pass2')\n print(paswrd)\n print(paswrd2)\n if paswrd != paswrd2:\n flash('Passwords Do Not Match')\n return redirect(url_for('register'))\n if db.register(user, paswrd):\n flash('Registered successfully')\n session['username'] = request.form['user']\n else:\n flash('Unable to register the user')\n return redirect(url_for('register'))\n print('Username has been registered previously!')\n else:\n match = db.search_user_list(user, is_usrname=True)\n if len(match) > 0:\n if match[0][1] == paswrd:\n session['username'] = request.form['user']\n else:\n flash('wrong Password')\n return redirect(url_for('login'))\n else:\n flash('User not found')\n return redirect(url_for('login'))\n return redirect(url_for('home'))\n\n\[email protected]('/finances')\ndef finance():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n items = db.search_finance_list(user_id)\n daily = db.search_expense_list(user_id, is_id=True)\n monthly = db.search_monthly_list(user_id, is_id=True)\n ratings = db.search_rating_list(user_id, is_id=True)\n print(ratings)\n print(f'Unlike month, this is daily: {daily}\\n')\n w = dict([(x[0], x[1]) for x in daily])\n s = dict([(x[0], x[1]) for x in monthly])\n r = dict([(x[0], x[1]) for x in ratings])\n print(f'THIS is monthly: {monthly}')\n print(f'THIS is s: {s}')\n print(f'These are the ratings: {r}')\n total = 0\n m_total = 0\n for x in w.values():\n total += float(x)\n for x in s.values():\n m_total += float(x)\n if items != []:\n bal, income, i = items[0]\n diction = {'Balance': bal, 'Income': income}\n return render_template('findata.html', diction=diction, daily=w,\n months=s, total=total, mtotal=m_total, completed=True, ratings=r)\n return render_template('findata.html')\n\n\[email protected]('/fincalc', methods=['POST'])\ndef calc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n session['finances'] = session['username']\n bal = request.form['balance'][1:]\n monthly = request.form['monthly-inputs']\n income = request.form['income'][1:]\n s = request.form\n d_rates = request.form['daily-importance']\n m_rates = request.form['monthly-importance']\n print(d_rates)\n user_id = db.search_user_list(session['username'])[0][2]\n daily_dict = json.loads(d_rates)\n monthly_dict = json.loads(m_rates)\n print(daily_dict)\n print(monthly_dict)\n dai_im = dict([x for x in daily_dict.values()])\n mon_im = dict([x for x in monthly_dict.values()])\n file = os.path.dirname(__file__) + f'/static/ratings.csv'\n stringg = '{'\n try:\n with open(file) as f:\n print('File found, not creating...')\n f.close()\n except Exception as e:\n print(e)\n with open(file, 'a+') as f:\n print('File not found, creating...')\n f.write(f'ratings,id\\n')\n f.close()\n for item in mon_im:\n db.add_rating(item, mon_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(mon_im[item]\n ) + \"'\" + ' '\n for item in dai_im:\n db.add_rating(item, dai_im[item], user_id)\n stringg += \"'\" + item + \"'\" + ' : ' + \"'\" + str(dai_im[item]\n ) + \"'\" + ' '\n stringg += '},' + str(user_id) + '\\n'\n with open(file, 'r') as f:\n lines = f.readlines()\n with open(file, 'w') as f:\n for line in lines:\n if str(user_id) != line.strip('\\n').split(',')[1]:\n f.write(line)\n f.write(stringg)\n f.close()\n daily = request.form['all-inputs']\n print(f'This is daily: {monthly}')\n daily = json.loads(daily)\n monthly = json.loads(monthly)\n print(f'This is daily now {monthly}')\n w = dict([x for x in daily.values()])\n m = dict([x for x in monthly.values()])\n print(f'\\nThis is calculated m:{m}\\n')\n db.add_finances(bal, m, income, w, user_id)\n flash('Finances updated')\n return redirect(url_for('home'))\n\n\[email protected]('/goals')\ndef goals():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n g = db.search_goal_list(user_id)\n b = db.search_finance_list(user_id)\n t = db.search_time_list(user_id)\n date_now = datetime.date.today()\n price = g\n perc = g\n delta_months = 0\n if g != []:\n g = g[0][0]\n if price != []:\n price = price[0][1]\n if perc != []:\n perc = perc[0][2]\n\n def months_between(date1, date2):\n if date1 > date2:\n date1, date2 = date2, date1\n m1 = date1.year * 12 + date1.month\n m2 = date2.year * 12 + date2.month\n months = m2 - m1\n if date1.day > date2.day:\n months -= 1\n elif date1.day == date2.day:\n seconds1 = date1.hour * 3600 + date1.minute + date1.second\n seconds2 = date2.hour * 3600 + date2.minute + date2.second\n if seconds1 > seconds2:\n months -= 1\n return months\n if t != []:\n t = t[0][0]\n delta_months = months_between(datetime.datetime.strptime(t,\n '%Y-%m-%d'), datetime.datetime.strptime(str(date_now), '%Y-%m-%d'))\n print(delta_months)\n img = db.search_image_list(user_id)\n if img != []:\n img = img[0][0]\n if b != []:\n bal = b[0][0]\n inc = b[0][1]\n print(b)\n print(g)\n print(price)\n print(perc)\n print(img)\n if g or price:\n if b:\n print('Used the first one')\n perc_complete = delta_months * (perc / 100.0) * inc / price\n print(perc_complete)\n if perc_complete > 1:\n perc_complete = 1\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img, bal=bal, income=inc, months=\n delta_months, perc_comp=perc_complete * 100)\n else:\n print('Used the second')\n return render_template('goals.html', goal=g, goal_price=price,\n perc_inc=perc, image=img)\n elif b:\n return render_template('goals.html', bal=bal, income=inc)\n else:\n return render_template('goals.html')\n\n\[email protected]('/gcalc', methods=['POST'])\ndef gcalc():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n goal_name = request.form['goal']\n goal_price = request.form['goal_price'][1:]\n percentage = request.form['slide']\n print('This is percentage:')\n print(percentage)\n print('gcalc')\n print(goal_name)\n print(goal_price)\n user_id = db.search_user_list(session['username'])[0][2]\n db.add_goals(goal_name, goal_price, percentage, user_id)\n a = db.search_image_list(user_id)\n print(a)\n if a == [] or a[0][2] != goal_name:\n try:\n l = urllib.request.urlopen(PIXABAY_STUB + goal_name.replace(' ',\n '+') + '&image_type=photo')\n p = json.loads(l.read())\n img = p['hits'][0]['webformatURL']\n except:\n return render_template('error.html', err=\n 'Cannot connect to API', fix=\n 'Try refreshing or contacting the site owner')\n else:\n img = a[0][1]\n db.add_images(img, goal_name, user_id)\n flash(f'Goal for {goal_name} at ${goal_price} has been added!')\n return redirect(url_for('home'))\n\n\[email protected]('/sankey')\ndef sankey():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('sankey.html', idnum=user_id)\n\n\[email protected]('/pie')\ndef pie():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n return render_template('pie.html', idnum=user_id)\n\n\[email protected]('/area')\ndef area():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_id = db.search_user_list(session['username'])[0][2]\n goal = db.search_goal_list(user_id)\n if goal == []:\n return redirect(url_for('goals'))\n daily = db.search_expense_list(user_id)\n monthly = db.search_monthly_list(user_id)\n dadict = {}\n modict = {}\n print(goal)\n ratings = {}\n for names in daily:\n dadict[names[0]] = names[1]\n for names in monthly:\n modict[names[0]] = names[1]\n print(dadict, modict)\n percent = 0\n for names in db.search_rating_list(user_id):\n print(names)\n if names[0] in modict:\n percent = modict[names[0]] * 12 / goal[0][1]\n if names[0] in dadict:\n percent = dadict[names[0]] * 30 * 12 / goal[0][1]\n if names[1] <= 6 and percent >= 0.05:\n ratings[names[0]] = names[1], percent\n print(ratings)\n return render_template('area.html', idnum=user_id, ratings=ratings)\n\n\[email protected]('/logout')\ndef logout():\n if 'username' in session:\n session.pop('username')\n return redirect(url_for('home'))\n\n\[email protected]('/account')\ndef account():\n if 'username' not in session:\n flash('You must be logged in to access this page')\n return redirect(url_for('login'))\n user_list = json.dumps(db.search_user_list(ret_all=True))\n print(json.dumps(db.search_user_list(ret_all=True)))\n return render_template('accounts.html', user_list=user_list)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n print('this is the updates')\n update_dict = request.form['all-options']\n update_dict = json.loads(update_dict)\n print(request.form)\n user_ids = db.search_user_list(session['username'])\n user = user_ids[0][-1]\n print(user)\n db.update_user_list(update_dict['username'] or user_ids[0][0], \n update_dict['password'] or user_ids[0][1], user)\n db.reset_statistics(user, update_dict['reset'])\n session.pop('username')\n session['username'] = update_dict['username'] or user_ids[0][0]\n flash('Account information updated successfully')\n return redirect(url_for('home'))\n\n\[email protected]('/del')\ndef delete():\n if 'username' not in session:\n flash(\"Woops. You can't be here\")\n return redirect(url_for('login'))\n user = db.search_user_list(session['username'])[0][-1]\n print(user)\n db.update_user_list(None, None, user, rem=True)\n flash('User successfully removed')\n session.pop('username')\n return redirect(url_for('home'))\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n",
"step-5": "import os, datetime\r\nimport urllib\r\n\r\nfrom flask import (Flask, flash, json, jsonify, redirect, render_template,\r\n request, session, url_for)\r\n\r\nimport util.database as db\r\n\r\ntemplate_path=os.path.dirname(__file__)+\"/templates\"\r\nfile=\"\"\r\nif template_path!=\"/templates\":\r\n app = Flask(\"__main__\",template_folder=os.path.dirname(__file__)+\"/templates\",static_folder=os.path.dirname(__file__)+\"/static\")\r\n file = open(os.path.dirname(__file__)+'/data/keys.json')\r\nelse:\r\n app = Flask(\"__main__\")\r\n file = open('./data/keys.json')\r\n\r\napp.secret_key = os.urandom(32)\r\n\r\n\r\ncontent = file.read()\r\nkeys = json.loads(content)\r\n\r\n# has a 5000 calls/day limit\r\nPIXABAY_KEY = keys['Pixabay']\r\nPIXABAY_STUB = \"https://pixabay.com/api/?key=\" + PIXABAY_KEY + \"&q=\" #separate words with \"+\"\r\[email protected]('/')\r\ndef home():\r\n if \"username\" in session:\r\n id_num=db.search_user_list(session[\"username\"], is_usrname=True)[0][2]\r\n finavail=db.search_finance_list(id_num)\r\n goalavail=db.search_goal_list(id_num)\r\n if finavail:\r\n session[\"finances\"]=session[\"username\"]\r\n if goalavail:\r\n session[\"goals\"]=session[\"username\"]\r\n set_goal = db.search_goal_list(id_num)\r\n print(set_goal)\r\n if set_goal != []:\r\n user_id = db.search_user_list(session['username'], is_usrname=True)[0][2]\r\n g = db.search_goal_list(user_id)\r\n b = db.search_finance_list(user_id)\r\n t = db.search_time_list(user_id)\r\n date_now = datetime.date.today()\r\n price = g\r\n perc = g\r\n delta_months = 0\r\n if g != []:\r\n g = g[0][0]\r\n if price != []:\r\n price = price[0][1]\r\n if perc != []:\r\n perc = perc[0][2]\r\n ##function to get difference in months between 2 dates\r\n def months_between(date1,date2):\r\n if date1>date2:\r\n date1,date2=date2,date1\r\n m1=date1.year*12+date1.month\r\n m2=date2.year*12+date2.month\r\n months=m2-m1\r\n if date1.day>date2.day:\r\n months-=1\r\n elif date1.day==date2.day:\r\n seconds1=date1.hour*3600+date1.minute+date1.second\r\n seconds2=date2.hour*3600+date2.minute+date2.second\r\n if seconds1>seconds2:\r\n months-=1\r\n return months\r\n\r\n if t != []:\r\n t = t[0][0]\r\n delta_months = months_between(datetime.datetime.strptime(t,'%Y-%m-%d'), datetime.datetime.strptime(str(date_now),'%Y-%m-%d'))\r\n print(delta_months)\r\n\r\n img = db.search_image_list(user_id)\r\n if img != []:\r\n img = img[0][0]\r\n if b != []:\r\n bal = b[0][0]\r\n inc = b[0][1]\r\n print(b)\r\n print(g)\r\n print(price)\r\n print(perc)\r\n print(img)\r\n if g or price:\r\n if b:\r\n print(\"Used the first one\")\r\n perc_complete = (delta_months * (perc / 100.0) * inc)/price\r\n print(perc_complete)\r\n if perc_complete > 1:\r\n perc_complete = 1\r\n return render_template('home.html',fin=finavail,goal=goalavail, set_goal= set_goal, goal_name =g, goal_price=price,perc_inc = perc, image=img, bal=bal, income=inc, months= delta_months, perc_comp = perc_complete * 100 )\r\n return render_template('home.html',fin=finavail,goal=goalavail)\r\n return render_template('home.html',fin=finavail,goal=goalavail)\r\n return render_template('home.html')\r\n\r\[email protected]('/register')\r\ndef register():\r\n return render_template('register.html')\r\[email protected]('/login')\r\ndef login():\r\n return render_template('login.html')\r\[email protected]('/auth', methods=['POST'])\r\ndef auth():\r\n user = request.form.get(\"user\")\r\n paswrd = request.form.get('pass')\r\n if request.form.get(\"submit\")==\"Register\":\r\n paswrd2 = request.form.get(\"pass2\")\r\n print(paswrd)\r\n print(paswrd2)\r\n if paswrd != paswrd2:\r\n flash(\"Passwords Do Not Match\")\r\n return redirect(url_for('register'))\r\n if db.register(user, paswrd):\r\n flash(\"Registered successfully\")\r\n session['username'] = request.form['user']\r\n else:\r\n flash(\"Unable to register the user\")\r\n return redirect(url_for('register'))\r\n print(\"Username has been registered previously!\")\r\n else:\r\n match=db.search_user_list(user, is_usrname=True)\r\n if len(match)>0:\r\n if match[0][1]==paswrd:\r\n session[\"username\"]=request.form[\"user\"]\r\n else:\r\n flash(\"wrong Password\")\r\n return redirect(url_for('login'))\r\n else:\r\n flash(\"User not found\")\r\n return redirect(url_for('login'))\r\n return redirect(url_for('home'))\r\n\r\[email protected]('/finances')\r\ndef finance():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n user_id = db.search_user_list(session['username'])[0][2]\r\n items = db.search_finance_list(user_id)\r\n daily = db.search_expense_list(user_id, is_id=True)\r\n monthly = db.search_monthly_list(user_id, is_id=True)\r\n ratings = db.search_rating_list(user_id, is_id=True)\r\n print(ratings)\r\n print(f\"Unlike month, this is daily: {daily}\\n\")\r\n w = dict([ (x[0], x[1]) for x in daily ])\r\n s = dict([ (x[0], x[1]) for x in monthly ])\r\n r = dict([ (x[0], x[1]) for x in ratings ])\r\n print(f\"THIS is monthly: {monthly}\")\r\n print(f\"THIS is s: {s}\")\r\n print(f\"These are the ratings: {r}\")\r\n total = 0\r\n m_total = 0\r\n for x in w.values():\r\n total += float(x)\r\n for x in s.values():\r\n m_total += float(x)\r\n if items != []:\r\n bal,income,i = items[0]\r\n diction = {\"Balance\":bal, \"Income\":income}\r\n return render_template('findata.html',\r\n diction=diction,\r\n daily=w,\r\n months = s,\r\n total=total,\r\n mtotal = m_total,completed=True, ratings=r)\r\n return render_template('findata.html')\r\n\r\[email protected]('/fincalc', methods=['POST'])\r\ndef calc():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n # print(request.form)\r\n session[\"finances\"]=session[\"username\"]\r\n bal = request.form['balance'][1:]\r\n monthly = request.form['monthly-inputs']\r\n income = request.form['income'][1:]\r\n # print(request.form)\r\n s = request.form\r\n d_rates = request.form['daily-importance']\r\n m_rates = request.form['monthly-importance']\r\n print(d_rates)\r\n user_id = db.search_user_list(session['username'])[0][2]\r\n daily_dict = json.loads(d_rates)\r\n monthly_dict = json.loads(m_rates)\r\n print(daily_dict)\r\n print(monthly_dict)\r\n\r\n dai_im = dict([x for x in daily_dict.values()]) # {expenseName: rating, expenseName2: rating, ...}\r\n mon_im = dict([x for x in monthly_dict.values()])\r\n file=os.path.dirname(__file__)+f'/static/ratings.csv'\r\n stringg = \"{\"\r\n try:\r\n with open(file) as f: # if readable, file already exists\r\n print(\"File found, not creating...\")\r\n f.close()\r\n except Exception as e:\r\n print(e)\r\n with open(file, 'a+') as f: # creates the file\r\n print(\"File not found, creating...\")\r\n f.write(f\"ratings,id\\n\")\r\n f.close()\r\n for item in mon_im:\r\n db.add_rating(item, mon_im[item], user_id)\r\n stringg += \"'\" + item + \"'\" + \" : \" + \"'\" + str(mon_im[item]) + \"'\" + \" \"\r\n\r\n for item in dai_im:\r\n db.add_rating(item, dai_im[item], user_id)\r\n stringg += \"'\" + item + \"'\" + \" : \" + \"'\" + str(dai_im[item]) + \"'\" + \" \"\r\n stringg += \"},\" + str(user_id) + \"\\n\"\r\n\r\n with open(file, \"r\") as f:\r\n lines = f.readlines()\r\n with open(file, \"w\") as f:\r\n for line in lines:\r\n if str(user_id) != line.strip(\"\\n\").split(\",\")[1]:\r\n f.write(line)\r\n f.write(stringg)\r\n f.close()\r\n daily = request.form['all-inputs']\r\n print(f\"This is daily: {monthly}\")\r\n daily = json.loads(daily) # dictionary\r\n monthly = json.loads(monthly)\r\n print(f\"This is daily now {monthly}\")\r\n w = dict([x for x in daily.values()]) # {expense1: $$$, expense2: $$$, ...}\r\n m = dict([x for x in monthly.values()])\r\n print(f\"\\nThis is calculated m:{m}\\n\")\r\n db.add_finances(bal, m, income, w, user_id)\r\n flash(\"Finances updated\")\r\n return redirect(url_for('home'))\r\n\r\[email protected]('/goals')\r\ndef goals():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n user_id = db.search_user_list(session['username'])[0][2]\r\n g = db.search_goal_list(user_id)\r\n b = db.search_finance_list(user_id)\r\n t = db.search_time_list(user_id)\r\n date_now = datetime.date.today()\r\n price = g\r\n perc = g\r\n delta_months = 0\r\n if g != []:\r\n g = g[0][0]\r\n if price != []:\r\n price = price[0][1]\r\n if perc != []:\r\n perc = perc[0][2]\r\n ##function to get difference in months between 2 dates\r\n def months_between(date1,date2):\r\n if date1>date2:\r\n date1,date2=date2,date1\r\n m1=date1.year*12+date1.month\r\n m2=date2.year*12+date2.month\r\n months=m2-m1\r\n if date1.day>date2.day:\r\n months-=1\r\n elif date1.day==date2.day:\r\n seconds1=date1.hour*3600+date1.minute+date1.second\r\n seconds2=date2.hour*3600+date2.minute+date2.second\r\n if seconds1>seconds2:\r\n months-=1\r\n return months\r\n\r\n if t != []:\r\n t = t[0][0]\r\n delta_months = months_between(datetime.datetime.strptime(t,'%Y-%m-%d'), datetime.datetime.strptime(str(date_now),'%Y-%m-%d'))\r\n print(delta_months)\r\n\r\n img = db.search_image_list(user_id)\r\n if img != []:\r\n img = img[0][0]\r\n if b != []:\r\n bal = b[0][0]\r\n inc = b[0][1]\r\n print(b)\r\n print(g)\r\n print(price)\r\n print(perc)\r\n print(img)\r\n if g or price:\r\n if b:\r\n print(\"Used the first one\")\r\n perc_complete = (delta_months * (perc / 100.0) * inc)/price\r\n print(perc_complete)\r\n if perc_complete > 1:\r\n perc_complete = 1\r\n return render_template('goals.html', goal=g, goal_price=price,perc_inc = perc, image=img, bal=bal, income=inc, months= delta_months, perc_comp = perc_complete * 100 )\r\n else:\r\n print(\"Used the second\")\r\n return render_template('goals.html', goal=g, goal_price=price,perc_inc = perc, image=img)\r\n else:\r\n if b:\r\n return render_template('goals.html', bal=bal, income=inc)\r\n else:\r\n return render_template('goals.html')\r\n\r\[email protected]('/gcalc', methods=['POST'])\r\ndef gcalc():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n goal_name = request.form['goal']\r\n goal_price = request.form['goal_price'][1:]\r\n percentage = request.form['slide']\r\n print(\"This is percentage:\")\r\n print(percentage)\r\n print(\"gcalc\")\r\n print(goal_name)\r\n print(goal_price)\r\n user_id = db.search_user_list(session['username'])[0][2]\r\n db.add_goals(goal_name, goal_price, percentage, user_id)\r\n a = db.search_image_list(user_id)\r\n print(a)\r\n # optimization to save on api calls\r\n if a == [] or a[0][2] != goal_name:\r\n try:\r\n l = urllib.request.urlopen(PIXABAY_STUB + goal_name.replace(' ', '+') + \"&image_type=photo\")\r\n p = json.loads(l.read())\r\n img = p['hits'][0]['webformatURL']\r\n except:\r\n return render_template('error.html', err=\"Cannot connect to API\", fix=\"Try refreshing or contacting the site owner\")\r\n else:\r\n img = a[0][1]\r\n db.add_images(img, goal_name, user_id)\r\n flash(f\"Goal for {goal_name} at ${goal_price} has been added!\")\r\n return redirect(url_for('home'))\r\n\r\[email protected]('/sankey')\r\ndef sankey():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n user_id = db.search_user_list(session['username'])[0][2]\r\n return render_template('sankey.html',idnum=user_id)\r\[email protected]('/pie')\r\ndef pie():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n user_id = db.search_user_list(session['username'])[0][2]\r\n return render_template('pie.html',idnum=user_id)\r\[email protected]('/area')\r\ndef area():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n\r\n user_id = db.search_user_list(session['username'])[0][2]\r\n goal=db.search_goal_list(user_id)\r\n if goal == []:\r\n return redirect(url_for('goals'))\r\n daily=db.search_expense_list(user_id)\r\n monthly=db.search_monthly_list(user_id)\r\n dadict={}\r\n modict={}\r\n print(goal)\r\n ratings={}\r\n for names in daily:\r\n dadict[names[0]]=names[1]\r\n for names in monthly:\r\n modict[names[0]]=names[1]\r\n print(dadict,modict)\r\n percent=0\r\n for names in db.search_rating_list(user_id):\r\n print(names)\r\n if names[0] in modict:\r\n percent=(modict[names[0]]*12)/goal[0][1]\r\n if names[0] in dadict:\r\n percent=(dadict[names[0]]*30*12)/goal[0][1]\r\n if names[1]<=6 and percent >=0.05:\r\n ratings[names[0]]=(names[1],percent)\r\n print(ratings)\r\n return render_template('area.html',idnum=user_id,ratings=ratings)\r\[email protected]('/logout')\r\ndef logout():\r\n if 'username' in session:\r\n session.pop('username')\r\n return redirect(url_for('home'))\r\n\r\[email protected]('/account')\r\ndef account():\r\n if 'username' not in session:\r\n flash(\"You must be logged in to access this page\")\r\n return redirect(url_for('login'))\r\n # print(db.search_user_list(session['username']))\r\n user_list = json.dumps(db.search_user_list(ret_all=True))\r\n print(json.dumps(db.search_user_list(ret_all=True)))\r\n return render_template('accounts.html', user_list=user_list)\r\n\r\[email protected]('/update', methods=[\"POST\"])\r\ndef update():\r\n print('this is the updates')\r\n update_dict = request.form['all-options']\r\n update_dict = json.loads(update_dict)\r\n print(request.form)\r\n user_ids = db.search_user_list(session['username'])\r\n user = user_ids[0][-1]\r\n print(user)\r\n db.update_user_list(update_dict['username'] or user_ids[0][0], update_dict['password'] or user_ids[0][1], user)\r\n db.reset_statistics(user, update_dict['reset'])\r\n session.pop('username')\r\n session['username'] = update_dict['username'] or user_ids[0][0] # change username in session\r\n flash(\"Account information updated successfully\")\r\n return redirect(url_for('home'))\r\n\r\[email protected]('/del')\r\ndef delete():\r\n if 'username' not in session:\r\n flash(\"Woops. You can't be here\")\r\n return redirect(url_for('login'))\r\n user = db.search_user_list(session['username'])[0][-1]\r\n print(user)\r\n db.update_user_list(None, None, user, rem=True)\r\n flash(\"User successfully removed\")\r\n session.pop('username')\r\n return redirect(url_for('home'))\r\n\r\nif __name__ == \"__main__\":\r\n app.debug = True\r\n app.run()\r\n",
"step-ids": [
14,
15,
16,
18,
19
]
}
|
[
14,
15,
16,
18,
19
] |
from connect_to_elasticsearch import *
# returns the name of all indices in the elasticsearch server
def getAllIndiciesNames():
indicies = set()
for index in connect_to_elasticsearch().indices.get_alias( "*" ):
indicies.add( index )
print( index )
return indicies
|
normal
|
{
"blob_id": "23c75840efd9a8fd68ac22d004bfe3b390fbe612",
"index": 2314,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getAllIndiciesNames():\n indicies = set()\n for index in connect_to_elasticsearch().indices.get_alias('*'):\n indicies.add(index)\n print(index)\n return indicies\n",
"step-3": "from connect_to_elasticsearch import *\n\n\ndef getAllIndiciesNames():\n indicies = set()\n for index in connect_to_elasticsearch().indices.get_alias('*'):\n indicies.add(index)\n print(index)\n return indicies\n",
"step-4": "from connect_to_elasticsearch import *\r\n\r\n\r\n# returns the name of all indices in the elasticsearch server \r\ndef getAllIndiciesNames(): \r\n indicies = set()\r\n for index in connect_to_elasticsearch().indices.get_alias( \"*\" ):\r\n indicies.add( index )\r\n print( index )\r\n return indicies\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import urllib2
import urllib
import json
import gzip
from StringIO import StringIO
service_url = 'https://babelfy.io/v1/disambiguate'
lang = 'EN'
key = ''
filehandle = open('triples/triples2.tsv') # the triples and the sentences where the triples were extracted
filehandle_write = open('triples/disambiguated_triples_sentence.tsv', 'a')
for line in filehandle:
splitted = line.split('|')
concept1 = splitted[0].strip()
relation = splitted[1].strip()
concept2 = splitted[2].strip()
sentence = splitted[3].strip()
if concept1 not in sentence:
# I do this for the triples extracted where the concept might not be in the sentence but that sentence refers to the concept
text = concept1+" "+sentence
else:
text = sentence
babelnetid1 = -1
babelnetid2 = -1
params = {
'text' : text,
'lang' : lang,
'key' : key
}
url = service_url + '?' + urllib.urlencode(params)
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
response = urllib2.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO( response.read())
f = gzip.GzipFile(fileobj=buf)
data = json.loads(f.read())
# retrieving data
for result in data:
charFragment = result.get('charFragment')
cfStart = charFragment.get('start')
cfEnd = charFragment.get('end')
word = text[cfStart:cfEnd+1]
print(word)
synsetId = result.get('babelSynsetID')
to_lower = word.lower()
if to_lower.startswith(concept1.lower()):
babelnetid1 = synsetId
if to_lower.startswith(concept2.lower()):
babelnetid2 = synsetId
print synsetId
filehandle_write.write(concept1 + " | " + relation + " | " + concept2 + " | " + sentence+" | " + concept1+" | "+str(babelnetid1)+" | "+concept2+" | "+str(babelnetid2))
filehandle_write.write('\n')
|
normal
|
{
"blob_id": "cd9f94d55eb13f5fc9959546e89a0af8ab2ea0db",
"index": 6147,
"step-1": "import urllib2\nimport urllib\nimport json\nimport gzip\n\nfrom StringIO import StringIO\n\nservice_url = 'https://babelfy.io/v1/disambiguate'\nlang = 'EN'\nkey = ''\n\nfilehandle = open('triples/triples2.tsv') # the triples and the sentences where the triples were extracted\nfilehandle_write = open('triples/disambiguated_triples_sentence.tsv', 'a')\n\nfor line in filehandle:\n splitted = line.split('|')\n concept1 = splitted[0].strip()\n relation = splitted[1].strip()\n concept2 = splitted[2].strip()\n sentence = splitted[3].strip()\n if concept1 not in sentence:\n # I do this for the triples extracted where the concept might not be in the sentence but that sentence refers to the concept\n text = concept1+\" \"+sentence\n else:\n text = sentence\n babelnetid1 = -1\n babelnetid2 = -1\n params = {\n 'text' : text,\n 'lang' : lang,\n 'key' : key\n }\n\n url = service_url + '?' + urllib.urlencode(params)\n request = urllib2.Request(url)\n request.add_header('Accept-encoding', 'gzip')\n response = urllib2.urlopen(request)\n\n if response.info().get('Content-Encoding') == 'gzip':\n buf = StringIO( response.read())\n f = gzip.GzipFile(fileobj=buf)\n data = json.loads(f.read())\n # retrieving data\n for result in data:\n charFragment = result.get('charFragment')\n cfStart = charFragment.get('start')\n cfEnd = charFragment.get('end')\n word = text[cfStart:cfEnd+1]\n print(word)\n synsetId = result.get('babelSynsetID')\n to_lower = word.lower()\n if to_lower.startswith(concept1.lower()):\n babelnetid1 = synsetId\n if to_lower.startswith(concept2.lower()):\n babelnetid2 = synsetId\n\n print synsetId\n\n filehandle_write.write(concept1 + \" | \" + relation + \" | \" + concept2 + \" | \" + sentence+\" | \" + concept1+\" | \"+str(babelnetid1)+\" | \"+concept2+\" | \"+str(babelnetid2))\n filehandle_write.write('\\n')\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import scrapy
from kingfisher_scrapy.base_spiders import BigFileSpider
from kingfisher_scrapy.util import components, handle_http_error
class France(BigFileSpider):
"""
Domain
France
Swagger API documentation
https://doc.data.gouv.fr/api/reference/
"""
name = 'france'
# SimpleSpider
data_type = 'release_package'
def start_requests(self):
# A CKAN API JSON response.
# Ministère de l'économie, des finances et de la relance
# https://www.data.gouv.fr/fr/datasets/donnees-essentielles-de-la-commande-publique-fichiers-consolides/
url = 'https://www.data.gouv.fr/api/1/datasets/donnees-essentielles-de-la-commande-publique-fichiers' \
'-consolides/'
yield scrapy.Request(url, meta={'file_name': 'page-1.json'}, callback=self.parse_list)
@handle_http_error
def parse_list(self, response):
for resource in response.json()['resources']:
description = resource['description']
if description and 'ocds' in description.lower():
yield self.build_request(resource['url'], formatter=components(-2))
|
normal
|
{
"blob_id": "369bffa21b5b8c0ca1d93da3aa30a38e2f4c82cc",
"index": 9451,
"step-1": "<mask token>\n\n\nclass France(BigFileSpider):\n <mask token>\n <mask token>\n <mask token>\n\n def start_requests(self):\n url = (\n 'https://www.data.gouv.fr/api/1/datasets/donnees-essentielles-de-la-commande-publique-fichiers-consolides/'\n )\n yield scrapy.Request(url, meta={'file_name': 'page-1.json'},\n callback=self.parse_list)\n\n @handle_http_error\n def parse_list(self, response):\n for resource in response.json()['resources']:\n description = resource['description']\n if description and 'ocds' in description.lower():\n yield self.build_request(resource['url'], formatter=\n components(-2))\n",
"step-2": "<mask token>\n\n\nclass France(BigFileSpider):\n <mask token>\n name = 'france'\n data_type = 'release_package'\n\n def start_requests(self):\n url = (\n 'https://www.data.gouv.fr/api/1/datasets/donnees-essentielles-de-la-commande-publique-fichiers-consolides/'\n )\n yield scrapy.Request(url, meta={'file_name': 'page-1.json'},\n callback=self.parse_list)\n\n @handle_http_error\n def parse_list(self, response):\n for resource in response.json()['resources']:\n description = resource['description']\n if description and 'ocds' in description.lower():\n yield self.build_request(resource['url'], formatter=\n components(-2))\n",
"step-3": "<mask token>\n\n\nclass France(BigFileSpider):\n \"\"\"\n Domain\n France\n Swagger API documentation\n https://doc.data.gouv.fr/api/reference/\n \"\"\"\n name = 'france'\n data_type = 'release_package'\n\n def start_requests(self):\n url = (\n 'https://www.data.gouv.fr/api/1/datasets/donnees-essentielles-de-la-commande-publique-fichiers-consolides/'\n )\n yield scrapy.Request(url, meta={'file_name': 'page-1.json'},\n callback=self.parse_list)\n\n @handle_http_error\n def parse_list(self, response):\n for resource in response.json()['resources']:\n description = resource['description']\n if description and 'ocds' in description.lower():\n yield self.build_request(resource['url'], formatter=\n components(-2))\n",
"step-4": "import scrapy\nfrom kingfisher_scrapy.base_spiders import BigFileSpider\nfrom kingfisher_scrapy.util import components, handle_http_error\n\n\nclass France(BigFileSpider):\n \"\"\"\n Domain\n France\n Swagger API documentation\n https://doc.data.gouv.fr/api/reference/\n \"\"\"\n name = 'france'\n data_type = 'release_package'\n\n def start_requests(self):\n url = (\n 'https://www.data.gouv.fr/api/1/datasets/donnees-essentielles-de-la-commande-publique-fichiers-consolides/'\n )\n yield scrapy.Request(url, meta={'file_name': 'page-1.json'},\n callback=self.parse_list)\n\n @handle_http_error\n def parse_list(self, response):\n for resource in response.json()['resources']:\n description = resource['description']\n if description and 'ocds' in description.lower():\n yield self.build_request(resource['url'], formatter=\n components(-2))\n",
"step-5": "import scrapy\n\nfrom kingfisher_scrapy.base_spiders import BigFileSpider\nfrom kingfisher_scrapy.util import components, handle_http_error\n\n\nclass France(BigFileSpider):\n \"\"\"\n Domain\n France\n Swagger API documentation\n https://doc.data.gouv.fr/api/reference/\n \"\"\"\n name = 'france'\n\n # SimpleSpider\n data_type = 'release_package'\n\n def start_requests(self):\n # A CKAN API JSON response.\n # Ministère de l'économie, des finances et de la relance\n # https://www.data.gouv.fr/fr/datasets/donnees-essentielles-de-la-commande-publique-fichiers-consolides/\n url = 'https://www.data.gouv.fr/api/1/datasets/donnees-essentielles-de-la-commande-publique-fichiers' \\\n '-consolides/'\n yield scrapy.Request(url, meta={'file_name': 'page-1.json'}, callback=self.parse_list)\n\n @handle_http_error\n def parse_list(self, response):\n for resource in response.json()['resources']:\n description = resource['description']\n if description and 'ocds' in description.lower():\n yield self.build_request(resource['url'], formatter=components(-2))\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# coding: utf-8
# 2021/5/29 @ tongshiwei
import logging
def get_logger():
_logger = logging.getLogger("EduNLP")
_logger.setLevel(logging.INFO)
_logger.propagate = False
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('[%(name)s, %(levelname)s] %(message)s'))
ch.setLevel(logging.INFO)
_logger.addHandler(ch)
return _logger
logger = get_logger()
|
normal
|
{
"blob_id": "41f71589d3fb9f5df218d8ffa0f608a890c73ad2",
"index": 8486,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_logger():\n _logger = logging.getLogger('EduNLP')\n _logger.setLevel(logging.INFO)\n _logger.propagate = False\n ch = logging.StreamHandler()\n ch.setFormatter(logging.Formatter('[%(name)s, %(levelname)s] %(message)s'))\n ch.setLevel(logging.INFO)\n _logger.addHandler(ch)\n return _logger\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_logger():\n _logger = logging.getLogger('EduNLP')\n _logger.setLevel(logging.INFO)\n _logger.propagate = False\n ch = logging.StreamHandler()\n ch.setFormatter(logging.Formatter('[%(name)s, %(levelname)s] %(message)s'))\n ch.setLevel(logging.INFO)\n _logger.addHandler(ch)\n return _logger\n\n\nlogger = get_logger()\n",
"step-4": "import logging\n\n\ndef get_logger():\n _logger = logging.getLogger('EduNLP')\n _logger.setLevel(logging.INFO)\n _logger.propagate = False\n ch = logging.StreamHandler()\n ch.setFormatter(logging.Formatter('[%(name)s, %(levelname)s] %(message)s'))\n ch.setLevel(logging.INFO)\n _logger.addHandler(ch)\n return _logger\n\n\nlogger = get_logger()\n",
"step-5": "# coding: utf-8\n# 2021/5/29 @ tongshiwei\nimport logging\n\n\ndef get_logger():\n _logger = logging.getLogger(\"EduNLP\")\n _logger.setLevel(logging.INFO)\n _logger.propagate = False\n ch = logging.StreamHandler()\n ch.setFormatter(logging.Formatter('[%(name)s, %(levelname)s] %(message)s'))\n ch.setLevel(logging.INFO)\n _logger.addHandler(ch)\n return _logger\n\n\nlogger = get_logger()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Flask
app = Flask(__name__)
import orderapi, views, models, processing
if __name__=="__main__":
orderapi.app.debug = True
orderapi.app.run(host='0.0.0.0', port=34203)
views.app.debug = True
views.app.run(host='0.0.0.0', port=42720)
|
normal
|
{
"blob_id": "3218a9e82cd19bab1680079aee5f09a97992629e",
"index": 6038,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n orderapi.app.debug = True\n orderapi.app.run(host='0.0.0.0', port=34203)\n views.app.debug = True\n views.app.run(host='0.0.0.0', port=42720)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n<mask token>\nif __name__ == '__main__':\n orderapi.app.debug = True\n orderapi.app.run(host='0.0.0.0', port=34203)\n views.app.debug = True\n views.app.run(host='0.0.0.0', port=42720)\n",
"step-4": "from flask import Flask\napp = Flask(__name__)\nimport orderapi, views, models, processing\nif __name__ == '__main__':\n orderapi.app.debug = True\n orderapi.app.run(host='0.0.0.0', port=34203)\n views.app.debug = True\n views.app.run(host='0.0.0.0', port=42720)\n",
"step-5": "from flask import Flask\napp = Flask(__name__)\nimport orderapi, views, models, processing\n\nif __name__==\"__main__\":\n orderapi.app.debug = True\n orderapi.app.run(host='0.0.0.0', port=34203)\n views.app.debug = True\n views.app.run(host='0.0.0.0', port=42720)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests
import os
from bs4 import BeautifulSoup
from urllib.parse import urljoin
CURRENT_DIR = os.getcwd()
DOWNLOAD_DIR = os.path.join(CURRENT_DIR, 'malware_album')
os.makedirs(DOWNLOAD_DIR, exist_ok=True)
url = 'http://old.vision.ece.ucsb.edu/~lakshman/malware_images/album/'
class Extractor(object):
"""docstring for Parser"""
def __init__(self, html, base_url):
self.soup = BeautifulSoup(html, "html5lib")
self.base_url = base_url
def get_album(self):
galaries = self.soup.find("div", {"id": "galleries"})
table = galaries.find("table")
families = table.find_all('a', href=True)
for family in families:
family_name = family.text.strip()
if family_name != "":
yield family_name, urljoin(self.base_url, family['href'])
def get_image_table(self):
tables = self.soup.find('table')
for td in tables.find_all('td'):
image_atag = td.find('a', href=True)
if image_atag is not None:
yield image_atag['href']
def get_pages(self):
pages = self.soup.find_all('a', href=True)
seen = list()
for page in pages:
if page is not None:
if 'index' in page['href']:
page_url = page['href']
if page_url not in seen:
seen.append(page_url)
yield page_url
def get_image_link(self):
"""
return downloadable image's url
"""
table = self.soup.find('table')
image_tag = table.find('img')
image_name = self.soup.find_all("b")[1].text
return image_tag['src'], image_name
# image = td.find_all('img')
# print(image)
# if image is not None:
# return urljoin(self.base_url, image['src'])
def fetch(image_url, image_name, folder):
r = requests.get(image_url, stream=True)
image_file = os.path.join(folder, image_name)
with open(image_file, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
del r
def extract_image(page_html, family_url, folder):
"""
Extract image from page
"""
image_extractor = Extractor(page_html, family_url)
for url in image_extractor.get_image_table():
image_page_url = urljoin(family_url, url)
# print(image_page_url)
imres = requests.get(image_page_url)
image_page_extractor = Extractor(imres.text, image_page_url)
image_src, image_name = image_page_extractor.get_image_link()
image_link = urljoin(image_page_url, image_src)
print(image_link, image_name)
# Download image
fetch(image_link, image_name, folder)
def download(url):
res = requests.get(url)
parser = Extractor(res.text, url)
# for each family, fetch image
for family, family_url in parser.get_album():
family_folder = os.path.join(DOWNLOAD_DIR, family)
print(family_folder)
os.makedirs(family_folder)
# print(os.path.join(DOWNLOAD_DIR, family_folder))
res = requests.get(family_url)
if res.status_code == 200:
page_extractor = Extractor(res.text, family_url)
count = 1
print('Page ', count)
extract_image(res.text, family_url, family_folder) # Extract on first page
for page in page_extractor.get_pages():
page_url = urljoin(family_url, page)
count += 1
print("Page ", count)
r = requests.get(page_url)
extract_image(r.text, family_url, family_folder)
# print('>', image_extractor.get_image_link())
else:
print('%s has status code: %s' % (family, res.status_code))
if __name__ == '__main__':
download(url)
|
normal
|
{
"blob_id": "a53d7b4c93fa49fb0162138d4a262fe7a5546148",
"index": 5215,
"step-1": "<mask token>\n\n\nclass Extractor(object):\n \"\"\"docstring for Parser\"\"\"\n\n def __init__(self, html, base_url):\n self.soup = BeautifulSoup(html, 'html5lib')\n self.base_url = base_url\n\n def get_album(self):\n galaries = self.soup.find('div', {'id': 'galleries'})\n table = galaries.find('table')\n families = table.find_all('a', href=True)\n for family in families:\n family_name = family.text.strip()\n if family_name != '':\n yield family_name, urljoin(self.base_url, family['href'])\n\n def get_image_table(self):\n tables = self.soup.find('table')\n for td in tables.find_all('td'):\n image_atag = td.find('a', href=True)\n if image_atag is not None:\n yield image_atag['href']\n\n def get_pages(self):\n pages = self.soup.find_all('a', href=True)\n seen = list()\n for page in pages:\n if page is not None:\n if 'index' in page['href']:\n page_url = page['href']\n if page_url not in seen:\n seen.append(page_url)\n yield page_url\n\n def get_image_link(self):\n \"\"\"\n return downloadable image's url\n \"\"\"\n table = self.soup.find('table')\n image_tag = table.find('img')\n image_name = self.soup.find_all('b')[1].text\n return image_tag['src'], image_name\n\n\ndef fetch(image_url, image_name, folder):\n r = requests.get(image_url, stream=True)\n image_file = os.path.join(folder, image_name)\n with open(image_file, 'wb') as f:\n for chunk in r.iter_content(1024):\n f.write(chunk)\n del r\n\n\ndef extract_image(page_html, family_url, folder):\n \"\"\"\n Extract image from page\n \"\"\"\n image_extractor = Extractor(page_html, family_url)\n for url in image_extractor.get_image_table():\n image_page_url = urljoin(family_url, url)\n imres = requests.get(image_page_url)\n image_page_extractor = Extractor(imres.text, image_page_url)\n image_src, image_name = image_page_extractor.get_image_link()\n image_link = urljoin(image_page_url, image_src)\n print(image_link, image_name)\n fetch(image_link, image_name, folder)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Extractor(object):\n \"\"\"docstring for Parser\"\"\"\n\n def __init__(self, html, base_url):\n self.soup = BeautifulSoup(html, 'html5lib')\n self.base_url = base_url\n\n def get_album(self):\n galaries = self.soup.find('div', {'id': 'galleries'})\n table = galaries.find('table')\n families = table.find_all('a', href=True)\n for family in families:\n family_name = family.text.strip()\n if family_name != '':\n yield family_name, urljoin(self.base_url, family['href'])\n\n def get_image_table(self):\n tables = self.soup.find('table')\n for td in tables.find_all('td'):\n image_atag = td.find('a', href=True)\n if image_atag is not None:\n yield image_atag['href']\n\n def get_pages(self):\n pages = self.soup.find_all('a', href=True)\n seen = list()\n for page in pages:\n if page is not None:\n if 'index' in page['href']:\n page_url = page['href']\n if page_url not in seen:\n seen.append(page_url)\n yield page_url\n\n def get_image_link(self):\n \"\"\"\n return downloadable image's url\n \"\"\"\n table = self.soup.find('table')\n image_tag = table.find('img')\n image_name = self.soup.find_all('b')[1].text\n return image_tag['src'], image_name\n\n\ndef fetch(image_url, image_name, folder):\n r = requests.get(image_url, stream=True)\n image_file = os.path.join(folder, image_name)\n with open(image_file, 'wb') as f:\n for chunk in r.iter_content(1024):\n f.write(chunk)\n del r\n\n\ndef extract_image(page_html, family_url, folder):\n \"\"\"\n Extract image from page\n \"\"\"\n image_extractor = Extractor(page_html, family_url)\n for url in image_extractor.get_image_table():\n image_page_url = urljoin(family_url, url)\n imres = requests.get(image_page_url)\n image_page_extractor = Extractor(imres.text, image_page_url)\n image_src, image_name = image_page_extractor.get_image_link()\n image_link = urljoin(image_page_url, image_src)\n print(image_link, image_name)\n fetch(image_link, image_name, folder)\n\n\ndef download(url):\n res = requests.get(url)\n parser = Extractor(res.text, url)\n for family, family_url in parser.get_album():\n family_folder = os.path.join(DOWNLOAD_DIR, family)\n print(family_folder)\n os.makedirs(family_folder)\n res = requests.get(family_url)\n if res.status_code == 200:\n page_extractor = Extractor(res.text, family_url)\n count = 1\n print('Page ', count)\n extract_image(res.text, family_url, family_folder)\n for page in page_extractor.get_pages():\n page_url = urljoin(family_url, page)\n count += 1\n print('Page ', count)\n r = requests.get(page_url)\n extract_image(r.text, family_url, family_folder)\n else:\n print('%s has status code: %s' % (family, res.status_code))\n\n\n<mask token>\n",
"step-3": "<mask token>\nos.makedirs(DOWNLOAD_DIR, exist_ok=True)\n<mask token>\n\n\nclass Extractor(object):\n \"\"\"docstring for Parser\"\"\"\n\n def __init__(self, html, base_url):\n self.soup = BeautifulSoup(html, 'html5lib')\n self.base_url = base_url\n\n def get_album(self):\n galaries = self.soup.find('div', {'id': 'galleries'})\n table = galaries.find('table')\n families = table.find_all('a', href=True)\n for family in families:\n family_name = family.text.strip()\n if family_name != '':\n yield family_name, urljoin(self.base_url, family['href'])\n\n def get_image_table(self):\n tables = self.soup.find('table')\n for td in tables.find_all('td'):\n image_atag = td.find('a', href=True)\n if image_atag is not None:\n yield image_atag['href']\n\n def get_pages(self):\n pages = self.soup.find_all('a', href=True)\n seen = list()\n for page in pages:\n if page is not None:\n if 'index' in page['href']:\n page_url = page['href']\n if page_url not in seen:\n seen.append(page_url)\n yield page_url\n\n def get_image_link(self):\n \"\"\"\n return downloadable image's url\n \"\"\"\n table = self.soup.find('table')\n image_tag = table.find('img')\n image_name = self.soup.find_all('b')[1].text\n return image_tag['src'], image_name\n\n\ndef fetch(image_url, image_name, folder):\n r = requests.get(image_url, stream=True)\n image_file = os.path.join(folder, image_name)\n with open(image_file, 'wb') as f:\n for chunk in r.iter_content(1024):\n f.write(chunk)\n del r\n\n\ndef extract_image(page_html, family_url, folder):\n \"\"\"\n Extract image from page\n \"\"\"\n image_extractor = Extractor(page_html, family_url)\n for url in image_extractor.get_image_table():\n image_page_url = urljoin(family_url, url)\n imres = requests.get(image_page_url)\n image_page_extractor = Extractor(imres.text, image_page_url)\n image_src, image_name = image_page_extractor.get_image_link()\n image_link = urljoin(image_page_url, image_src)\n print(image_link, image_name)\n fetch(image_link, image_name, folder)\n\n\ndef download(url):\n res = requests.get(url)\n parser = Extractor(res.text, url)\n for family, family_url in parser.get_album():\n family_folder = os.path.join(DOWNLOAD_DIR, family)\n print(family_folder)\n os.makedirs(family_folder)\n res = requests.get(family_url)\n if res.status_code == 200:\n page_extractor = Extractor(res.text, family_url)\n count = 1\n print('Page ', count)\n extract_image(res.text, family_url, family_folder)\n for page in page_extractor.get_pages():\n page_url = urljoin(family_url, page)\n count += 1\n print('Page ', count)\n r = requests.get(page_url)\n extract_image(r.text, family_url, family_folder)\n else:\n print('%s has status code: %s' % (family, res.status_code))\n\n\nif __name__ == '__main__':\n download(url)\n",
"step-4": "import requests\nimport os\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin\nCURRENT_DIR = os.getcwd()\nDOWNLOAD_DIR = os.path.join(CURRENT_DIR, 'malware_album')\nos.makedirs(DOWNLOAD_DIR, exist_ok=True)\nurl = 'http://old.vision.ece.ucsb.edu/~lakshman/malware_images/album/'\n\n\nclass Extractor(object):\n \"\"\"docstring for Parser\"\"\"\n\n def __init__(self, html, base_url):\n self.soup = BeautifulSoup(html, 'html5lib')\n self.base_url = base_url\n\n def get_album(self):\n galaries = self.soup.find('div', {'id': 'galleries'})\n table = galaries.find('table')\n families = table.find_all('a', href=True)\n for family in families:\n family_name = family.text.strip()\n if family_name != '':\n yield family_name, urljoin(self.base_url, family['href'])\n\n def get_image_table(self):\n tables = self.soup.find('table')\n for td in tables.find_all('td'):\n image_atag = td.find('a', href=True)\n if image_atag is not None:\n yield image_atag['href']\n\n def get_pages(self):\n pages = self.soup.find_all('a', href=True)\n seen = list()\n for page in pages:\n if page is not None:\n if 'index' in page['href']:\n page_url = page['href']\n if page_url not in seen:\n seen.append(page_url)\n yield page_url\n\n def get_image_link(self):\n \"\"\"\n return downloadable image's url\n \"\"\"\n table = self.soup.find('table')\n image_tag = table.find('img')\n image_name = self.soup.find_all('b')[1].text\n return image_tag['src'], image_name\n\n\ndef fetch(image_url, image_name, folder):\n r = requests.get(image_url, stream=True)\n image_file = os.path.join(folder, image_name)\n with open(image_file, 'wb') as f:\n for chunk in r.iter_content(1024):\n f.write(chunk)\n del r\n\n\ndef extract_image(page_html, family_url, folder):\n \"\"\"\n Extract image from page\n \"\"\"\n image_extractor = Extractor(page_html, family_url)\n for url in image_extractor.get_image_table():\n image_page_url = urljoin(family_url, url)\n imres = requests.get(image_page_url)\n image_page_extractor = Extractor(imres.text, image_page_url)\n image_src, image_name = image_page_extractor.get_image_link()\n image_link = urljoin(image_page_url, image_src)\n print(image_link, image_name)\n fetch(image_link, image_name, folder)\n\n\ndef download(url):\n res = requests.get(url)\n parser = Extractor(res.text, url)\n for family, family_url in parser.get_album():\n family_folder = os.path.join(DOWNLOAD_DIR, family)\n print(family_folder)\n os.makedirs(family_folder)\n res = requests.get(family_url)\n if res.status_code == 200:\n page_extractor = Extractor(res.text, family_url)\n count = 1\n print('Page ', count)\n extract_image(res.text, family_url, family_folder)\n for page in page_extractor.get_pages():\n page_url = urljoin(family_url, page)\n count += 1\n print('Page ', count)\n r = requests.get(page_url)\n extract_image(r.text, family_url, family_folder)\n else:\n print('%s has status code: %s' % (family, res.status_code))\n\n\nif __name__ == '__main__':\n download(url)\n",
"step-5": "import requests\nimport os\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin\n\nCURRENT_DIR = os.getcwd()\nDOWNLOAD_DIR = os.path.join(CURRENT_DIR, 'malware_album')\n\nos.makedirs(DOWNLOAD_DIR, exist_ok=True)\n\nurl = 'http://old.vision.ece.ucsb.edu/~lakshman/malware_images/album/'\n\n\nclass Extractor(object):\n \"\"\"docstring for Parser\"\"\"\n def __init__(self, html, base_url):\n self.soup = BeautifulSoup(html, \"html5lib\")\n self.base_url = base_url\n\n def get_album(self):\n galaries = self.soup.find(\"div\", {\"id\": \"galleries\"})\n table = galaries.find(\"table\")\n families = table.find_all('a', href=True)\n for family in families:\n family_name = family.text.strip()\n if family_name != \"\":\n yield family_name, urljoin(self.base_url, family['href'])\n\n\n def get_image_table(self):\n tables = self.soup.find('table')\n for td in tables.find_all('td'):\n image_atag = td.find('a', href=True)\n if image_atag is not None:\n yield image_atag['href']\n\n def get_pages(self):\n pages = self.soup.find_all('a', href=True)\n seen = list()\n for page in pages:\n if page is not None:\n if 'index' in page['href']:\n page_url = page['href']\n if page_url not in seen:\n seen.append(page_url)\n yield page_url\n\n def get_image_link(self):\n \"\"\"\n return downloadable image's url\n \"\"\"\n table = self.soup.find('table')\n image_tag = table.find('img')\n image_name = self.soup.find_all(\"b\")[1].text\n return image_tag['src'], image_name\n\n # image = td.find_all('img')\n # print(image)\n # if image is not None:\n # return urljoin(self.base_url, image['src'])\n\n\n\ndef fetch(image_url, image_name, folder):\n r = requests.get(image_url, stream=True)\n image_file = os.path.join(folder, image_name)\n with open(image_file, 'wb') as f:\n for chunk in r.iter_content(1024):\n f.write(chunk)\n del r\n\n\ndef extract_image(page_html, family_url, folder):\n \"\"\"\n Extract image from page\n \"\"\"\n image_extractor = Extractor(page_html, family_url)\n for url in image_extractor.get_image_table():\n image_page_url = urljoin(family_url, url)\n # print(image_page_url)\n imres = requests.get(image_page_url)\n image_page_extractor = Extractor(imres.text, image_page_url)\n image_src, image_name = image_page_extractor.get_image_link()\n\n image_link = urljoin(image_page_url, image_src)\n\n print(image_link, image_name)\n # Download image\n fetch(image_link, image_name, folder)\n\n\n\ndef download(url):\n res = requests.get(url)\n parser = Extractor(res.text, url)\n # for each family, fetch image\n for family, family_url in parser.get_album():\n family_folder = os.path.join(DOWNLOAD_DIR, family)\n print(family_folder)\n os.makedirs(family_folder)\n # print(os.path.join(DOWNLOAD_DIR, family_folder))\n\n res = requests.get(family_url)\n if res.status_code == 200:\n page_extractor = Extractor(res.text, family_url)\n count = 1\n print('Page ', count)\n extract_image(res.text, family_url, family_folder) # Extract on first page\n for page in page_extractor.get_pages():\n page_url = urljoin(family_url, page)\n\n count += 1\n print(\"Page \", count)\n\n r = requests.get(page_url)\n extract_image(r.text, family_url, family_folder)\n\n\n # print('>', image_extractor.get_image_link())\n else:\n print('%s has status code: %s' % (family, res.status_code))\n\n\n\n\n\nif __name__ == '__main__':\n download(url)\n\n\n\n",
"step-ids": [
9,
10,
11,
13,
14
]
}
|
[
9,
10,
11,
13,
14
] |
#ERP PROJECT
import pyrebase
import smtplib
config = {
"apiKey": "apiKey",
"authDomain": "erproject-dd24e-default-rtdb.firebaseapp.com",
"databaseURL": "https://erproject-dd24e-default-rtdb.firebaseio.com",
"storageBucket": "erproject-dd24e-default-rtdb.appspot.com"
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
db.child("Student").push({"DAY":""})
db.child("Faculty").push({"DAY":""})
student=["s1","s2","s3","s4","s5","s6","s7","s8","s9","s10"]
faculty=["f1","f2","f3","f4","f5"]
st={}
data,data1='',''
st1={}
fa={}
fa1={}
i=1
import schedule
import time
def j():
global i
import pandas as pd
st1.update({i:st})
data=pd.DataFrame(st1)
print(data)
data.to_csv('student.csv')
fa1.update({i:fa})
data1=pd.DataFrame(fa1)
print(data1)
data1.to_csv('faculty.csv')
i=i+1
while(1):
schedule.every(10).seconds.do(j)
schedule.run_pending()
time.sleep(1)
f=input("enter 's' for student,enter 'f' for faculty")
f=f.upper()
if(f=="S"):
name=input("enter student name")
if name in student:
a=input("enter 'a' for absent,enter 'l' for leave,enter 'p' for present")
a=a.upper()
if(a=="L"): #please change sender and receiver's email id for this function to work
import smtplib
server =smtplib.SMTP("smtp.gmail.com",587)
server.starttls()
server.login("[email protected]","akki@9510")
message=name+"is on leave"
server.sendmail("[email protected]","[email protected]",message)
a="A"
st.update({name:a})
from datetime import datetime
now = datetime.now() # current date and time
date_time = now.strftime("%d-%m-%Y")
db.child("Student").child("DAY").child(date_time).update({name:a})
if(f=="F"):
name=input("enter faculty name")
if name in faculty:
a=input("enter 'a' for absent,enter 'l' for leave,enter 'p' for present")
a=a.upper()
if(a=="L"):
import smtplib
server =smtplib.SMTP("smtp.gmail.com",587)
server.starttls()
server.login("[email protected]","akki@9510")
message=name+"is on leave"
server.sendmail("[email protected]","[email protected]",message)
a="A"
fa.update({name:a})
from datetime import datetime
now = datetime.now() # current date and time
date_time = now.strftime("%d-%m-%Y")
db.child("Faculty").child("DAY").child(date_time).update({name:a})
|
normal
|
{
"blob_id": "3e7e6d7a0137d91dc7437ff91a39d7f8faad675e",
"index": 7075,
"step-1": "<mask token>\n\n\ndef j():\n global i\n import pandas as pd\n st1.update({i: st})\n data = pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i: fa})\n data1 = pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i = i + 1\n\n\n<mask token>\n",
"step-2": "<mask token>\ndb.child('Student').push({'DAY': ''})\ndb.child('Faculty').push({'DAY': ''})\n<mask token>\n\n\ndef j():\n global i\n import pandas as pd\n st1.update({i: st})\n data = pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i: fa})\n data1 = pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i = i + 1\n\n\nwhile 1:\n schedule.every(10).seconds.do(j)\n schedule.run_pending()\n time.sleep(1)\n f = input(\"enter 's' for student,enter 'f' for faculty\")\n f = f.upper()\n if f == 'S':\n name = input('enter student name')\n if name in student:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('[email protected]', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('[email protected]', '[email protected]',\n message)\n a = 'A'\n st.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Student').child('DAY').child(date_time).update({name: a})\n if f == 'F':\n name = input('enter faculty name')\n if name in faculty:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('[email protected]', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('[email protected]', '[email protected]',\n message)\n a = 'A'\n fa.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Faculty').child('DAY').child(date_time).update({name: a})\n",
"step-3": "<mask token>\nconfig = {'apiKey': 'apiKey', 'authDomain':\n 'erproject-dd24e-default-rtdb.firebaseapp.com', 'databaseURL':\n 'https://erproject-dd24e-default-rtdb.firebaseio.com', 'storageBucket':\n 'erproject-dd24e-default-rtdb.appspot.com'}\nfirebase = pyrebase.initialize_app(config)\ndb = firebase.database()\ndb.child('Student').push({'DAY': ''})\ndb.child('Faculty').push({'DAY': ''})\nstudent = ['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10']\nfaculty = ['f1', 'f2', 'f3', 'f4', 'f5']\nst = {}\ndata, data1 = '', ''\nst1 = {}\nfa = {}\nfa1 = {}\ni = 1\n<mask token>\n\n\ndef j():\n global i\n import pandas as pd\n st1.update({i: st})\n data = pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i: fa})\n data1 = pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i = i + 1\n\n\nwhile 1:\n schedule.every(10).seconds.do(j)\n schedule.run_pending()\n time.sleep(1)\n f = input(\"enter 's' for student,enter 'f' for faculty\")\n f = f.upper()\n if f == 'S':\n name = input('enter student name')\n if name in student:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('[email protected]', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('[email protected]', '[email protected]',\n message)\n a = 'A'\n st.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Student').child('DAY').child(date_time).update({name: a})\n if f == 'F':\n name = input('enter faculty name')\n if name in faculty:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('[email protected]', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('[email protected]', '[email protected]',\n message)\n a = 'A'\n fa.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Faculty').child('DAY').child(date_time).update({name: a})\n",
"step-4": "import pyrebase\nimport smtplib\nconfig = {'apiKey': 'apiKey', 'authDomain':\n 'erproject-dd24e-default-rtdb.firebaseapp.com', 'databaseURL':\n 'https://erproject-dd24e-default-rtdb.firebaseio.com', 'storageBucket':\n 'erproject-dd24e-default-rtdb.appspot.com'}\nfirebase = pyrebase.initialize_app(config)\ndb = firebase.database()\ndb.child('Student').push({'DAY': ''})\ndb.child('Faculty').push({'DAY': ''})\nstudent = ['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's10']\nfaculty = ['f1', 'f2', 'f3', 'f4', 'f5']\nst = {}\ndata, data1 = '', ''\nst1 = {}\nfa = {}\nfa1 = {}\ni = 1\nimport schedule\nimport time\n\n\ndef j():\n global i\n import pandas as pd\n st1.update({i: st})\n data = pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i: fa})\n data1 = pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i = i + 1\n\n\nwhile 1:\n schedule.every(10).seconds.do(j)\n schedule.run_pending()\n time.sleep(1)\n f = input(\"enter 's' for student,enter 'f' for faculty\")\n f = f.upper()\n if f == 'S':\n name = input('enter student name')\n if name in student:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('[email protected]', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('[email protected]', '[email protected]',\n message)\n a = 'A'\n st.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Student').child('DAY').child(date_time).update({name: a})\n if f == 'F':\n name = input('enter faculty name')\n if name in faculty:\n a = input(\n \"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\"\n )\n a = a.upper()\n if a == 'L':\n import smtplib\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('[email protected]', 'akki@9510')\n message = name + 'is on leave'\n server.sendmail('[email protected]', '[email protected]',\n message)\n a = 'A'\n fa.update({name: a})\n from datetime import datetime\n now = datetime.now()\n date_time = now.strftime('%d-%m-%Y')\n db.child('Faculty').child('DAY').child(date_time).update({name: a})\n",
"step-5": "#ERP PROJECT\n\n\nimport pyrebase\nimport smtplib\n\nconfig = {\n \"apiKey\": \"apiKey\",\n \"authDomain\": \"erproject-dd24e-default-rtdb.firebaseapp.com\",\n \"databaseURL\": \"https://erproject-dd24e-default-rtdb.firebaseio.com\",\n \"storageBucket\": \"erproject-dd24e-default-rtdb.appspot.com\"\n}\n\nfirebase = pyrebase.initialize_app(config)\ndb = firebase.database()\n\ndb.child(\"Student\").push({\"DAY\":\"\"})\ndb.child(\"Faculty\").push({\"DAY\":\"\"}) \nstudent=[\"s1\",\"s2\",\"s3\",\"s4\",\"s5\",\"s6\",\"s7\",\"s8\",\"s9\",\"s10\"]\nfaculty=[\"f1\",\"f2\",\"f3\",\"f4\",\"f5\"]\nst={}\ndata,data1='',''\nst1={}\nfa={}\nfa1={}\ni=1\nimport schedule\nimport time\ndef j():\n global i\n import pandas as pd\n st1.update({i:st})\n data=pd.DataFrame(st1)\n print(data)\n data.to_csv('student.csv')\n fa1.update({i:fa})\n data1=pd.DataFrame(fa1)\n print(data1)\n data1.to_csv('faculty.csv')\n i=i+1 \nwhile(1):\n schedule.every(10).seconds.do(j)\n schedule.run_pending()\n time.sleep(1)\n f=input(\"enter 's' for student,enter 'f' for faculty\")\n f=f.upper()\n if(f==\"S\"):\n name=input(\"enter student name\")\n if name in student:\n a=input(\"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\")\n a=a.upper()\n if(a==\"L\"): #please change sender and receiver's email id for this function to work \n import smtplib\n server =smtplib.SMTP(\"smtp.gmail.com\",587)\n server.starttls()\n server.login(\"[email protected]\",\"akki@9510\")\n message=name+\"is on leave\"\n server.sendmail(\"[email protected]\",\"[email protected]\",message)\n a=\"A\"\n st.update({name:a})\n from datetime import datetime\n now = datetime.now() # current date and time\n date_time = now.strftime(\"%d-%m-%Y\")\n db.child(\"Student\").child(\"DAY\").child(date_time).update({name:a})\n \n if(f==\"F\"):\n name=input(\"enter faculty name\")\n if name in faculty:\n a=input(\"enter 'a' for absent,enter 'l' for leave,enter 'p' for present\")\n a=a.upper()\n if(a==\"L\"):\n import smtplib\n server =smtplib.SMTP(\"smtp.gmail.com\",587)\n server.starttls()\n server.login(\"[email protected]\",\"akki@9510\")\n message=name+\"is on leave\"\n server.sendmail(\"[email protected]\",\"[email protected]\",message)\n a=\"A\"\n fa.update({name:a})\n from datetime import datetime\n now = datetime.now() # current date and time\n date_time = now.strftime(\"%d-%m-%Y\")\n db.child(\"Faculty\").child(\"DAY\").child(date_time).update({name:a})\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import pandas as pd
import numpy as np
df = pd.DataFrame([['Hospital1', '2019-10-01'], ['Hospital2', '2019-10-01'],
['Hospital3', '2019-10-01'], ['Hospital1', '2019-10-01'], ['Hospital2',
'2019-10-02'], ['Hospital3', '2019-10-02'], ['Hospital2', '2019-10-03'],
['Hospital2', '2019-10-04'], ['Hospital3', '2019-10-04'], ['Hospital3',
'2019-10-05'], ['Hospital1', '2019-10-06'], ['Hospital1', '2019-10-07'],
['Hospital1', '2019-10-08']], columns=['Hospital_Name', 'Date'])
df2 = pd.DataFrame([['Hospital1', 12, 15, 16, 12], ['Hospital2', 10, 17, 14,
12], ['Hospital2', 15, 20, 12, 12]], columns=['Hospital_Name',
'2019-10-01', '2019-10-02', '2019-10-03', '2019-10-04'])
print(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size)
)
print(df2.sum())
|
normal
|
{
"blob_id": "8d8f1f0dbb76b5c536bd1a2142bb61c51dd75075",
"index": 9573,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size)\n )\nprint(df2.sum())\n",
"step-3": "<mask token>\ndf = pd.DataFrame([['Hospital1', '2019-10-01'], ['Hospital2', '2019-10-01'],\n ['Hospital3', '2019-10-01'], ['Hospital1', '2019-10-01'], ['Hospital2',\n '2019-10-02'], ['Hospital3', '2019-10-02'], ['Hospital2', '2019-10-03'],\n ['Hospital2', '2019-10-04'], ['Hospital3', '2019-10-04'], ['Hospital3',\n '2019-10-05'], ['Hospital1', '2019-10-06'], ['Hospital1', '2019-10-07'],\n ['Hospital1', '2019-10-08']], columns=['Hospital_Name', 'Date'])\ndf2 = pd.DataFrame([['Hospital1', 12, 15, 16, 12], ['Hospital2', 10, 17, 14,\n 12], ['Hospital2', 15, 20, 12, 12]], columns=['Hospital_Name',\n '2019-10-01', '2019-10-02', '2019-10-03', '2019-10-04'])\nprint(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size)\n )\nprint(df2.sum())\n",
"step-4": "import pandas as pd\nimport numpy as np\ndf = pd.DataFrame([['Hospital1', '2019-10-01'], ['Hospital2', '2019-10-01'],\n ['Hospital3', '2019-10-01'], ['Hospital1', '2019-10-01'], ['Hospital2',\n '2019-10-02'], ['Hospital3', '2019-10-02'], ['Hospital2', '2019-10-03'],\n ['Hospital2', '2019-10-04'], ['Hospital3', '2019-10-04'], ['Hospital3',\n '2019-10-05'], ['Hospital1', '2019-10-06'], ['Hospital1', '2019-10-07'],\n ['Hospital1', '2019-10-08']], columns=['Hospital_Name', 'Date'])\ndf2 = pd.DataFrame([['Hospital1', 12, 15, 16, 12], ['Hospital2', 10, 17, 14,\n 12], ['Hospital2', 15, 20, 12, 12]], columns=['Hospital_Name',\n '2019-10-01', '2019-10-02', '2019-10-03', '2019-10-04'])\nprint(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size)\n )\nprint(df2.sum())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
no_list = {"tor:", "getblocktemplate", " ping ", " pong "}
for i in range(1, 5):
with open("Desktop/"+str(i)+".log", "r") as r:
with open("Desktop/"+str(i)+"-clean.log", "a+") as w:
for line in r:
if not any(s in line for s in no_list):
w.write(line)
|
normal
|
{
"blob_id": "f14a8d0d51f0baefe20b2699ffa82112dad9c38f",
"index": 6582,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, 5):\n with open('Desktop/' + str(i) + '.log', 'r') as r:\n with open('Desktop/' + str(i) + '-clean.log', 'a+') as w:\n for line in r:\n if not any(s in line for s in no_list):\n w.write(line)\n",
"step-3": "no_list = {'tor:', 'getblocktemplate', ' ping ', ' pong '}\nfor i in range(1, 5):\n with open('Desktop/' + str(i) + '.log', 'r') as r:\n with open('Desktop/' + str(i) + '-clean.log', 'a+') as w:\n for line in r:\n if not any(s in line for s in no_list):\n w.write(line)\n",
"step-4": "no_list = {\"tor:\", \"getblocktemplate\", \" ping \", \" pong \"}\nfor i in range(1, 5):\n\twith open(\"Desktop/\"+str(i)+\".log\", \"r\") as r:\n\t\twith open(\"Desktop/\"+str(i)+\"-clean.log\", \"a+\") as w:\n\t\t\tfor line in r:\n\t\t\t\tif not any(s in line for s in no_list):\n\t\t\t\t\tw.write(line)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from floppy.node import Node, Input, Output, Tag, abstractNode
@abstractNode
class StringNode(Node):
Tag('StringOperations')
class StringAppend(StringNode):
"""
Creates a new node which combines two strings. These can be seperated by a delimiter.
:param nodeClass: subclass object of 'Node'.
:return: newly created Node instance.
"""
Input('First', str)
Input('Second', str)
Input('Delimiter', str, optional=True, default='')
Output('Joined', str)
def run(self):
super(StringAppend, self).run()
self._Joined(self._Delimiter.join([self._First, self._Second]))
class ListToString(StringNode):
"""
Creates a new node which combines two strings. These can be seperated by a delimiter.
:param nodeClass: subclass object of 'Node'.
:return: newly created Node instance.
"""
Input('List', object, list=True)
Input('Delimiter', str, optional=True, default='')
Output('Joined', str)
def run(self):
super(ListToString, self).run()
string = []
for element in self._List:
string.append(str(element))
self._Joined(self._Delimiter.join(string))
|
normal
|
{
"blob_id": "1bb151171bbbb899456324056be3634e87b5c8fb",
"index": 3494,
"step-1": "<mask token>\n\n\nclass StringAppend(StringNode):\n <mask token>\n Input('First', str)\n Input('Second', str)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n <mask token>\n\n\nclass ListToString(StringNode):\n \"\"\"\n Creates a new node which combines two strings. These can be seperated by a delimiter.\n :param nodeClass: subclass object of 'Node'.\n :return: newly created Node instance.\n \"\"\"\n Input('List', object, list=True)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n\n def run(self):\n super(ListToString, self).run()\n string = []\n for element in self._List:\n string.append(str(element))\n self._Joined(self._Delimiter.join(string))\n",
"step-2": "<mask token>\n\n\nclass StringAppend(StringNode):\n <mask token>\n Input('First', str)\n Input('Second', str)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n\n def run(self):\n super(StringAppend, self).run()\n self._Joined(self._Delimiter.join([self._First, self._Second]))\n\n\nclass ListToString(StringNode):\n \"\"\"\n Creates a new node which combines two strings. These can be seperated by a delimiter.\n :param nodeClass: subclass object of 'Node'.\n :return: newly created Node instance.\n \"\"\"\n Input('List', object, list=True)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n\n def run(self):\n super(ListToString, self).run()\n string = []\n for element in self._List:\n string.append(str(element))\n self._Joined(self._Delimiter.join(string))\n",
"step-3": "<mask token>\n\n\nclass StringAppend(StringNode):\n \"\"\"\n Creates a new node which combines two strings. These can be seperated by a delimiter.\n :param nodeClass: subclass object of 'Node'.\n :return: newly created Node instance.\n \"\"\"\n Input('First', str)\n Input('Second', str)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n\n def run(self):\n super(StringAppend, self).run()\n self._Joined(self._Delimiter.join([self._First, self._Second]))\n\n\nclass ListToString(StringNode):\n \"\"\"\n Creates a new node which combines two strings. These can be seperated by a delimiter.\n :param nodeClass: subclass object of 'Node'.\n :return: newly created Node instance.\n \"\"\"\n Input('List', object, list=True)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n\n def run(self):\n super(ListToString, self).run()\n string = []\n for element in self._List:\n string.append(str(element))\n self._Joined(self._Delimiter.join(string))\n",
"step-4": "from floppy.node import Node, Input, Output, Tag, abstractNode\n\n\n@abstractNode\nclass StringNode(Node):\n Tag('StringOperations')\n\n\nclass StringAppend(StringNode):\n \"\"\"\n Creates a new node which combines two strings. These can be seperated by a delimiter.\n :param nodeClass: subclass object of 'Node'.\n :return: newly created Node instance.\n \"\"\"\n Input('First', str)\n Input('Second', str)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n\n def run(self):\n super(StringAppend, self).run()\n self._Joined(self._Delimiter.join([self._First, self._Second]))\n\n\nclass ListToString(StringNode):\n \"\"\"\n Creates a new node which combines two strings. These can be seperated by a delimiter.\n :param nodeClass: subclass object of 'Node'.\n :return: newly created Node instance.\n \"\"\"\n Input('List', object, list=True)\n Input('Delimiter', str, optional=True, default='')\n Output('Joined', str)\n\n def run(self):\n super(ListToString, self).run()\n string = []\n for element in self._List:\n string.append(str(element))\n self._Joined(self._Delimiter.join(string))\n",
"step-5": null,
"step-ids": [
4,
5,
6,
8
]
}
|
[
4,
5,
6,
8
] |
class Point:
def __init__(self,x,y):
self.x=x
self.y=y
def __str__(self):
return "({0},{1})".format(self.x,self.y)
def __add__(self, other):
self.x=self.x+other.x
self.y=self.y+other.y
return Point(self.x,self.y)
p1=Point(1,2)
p2=Point(3,4)
print(p1)
print(p2)
p3=p1+p2
print(p3)
|
normal
|
{
"blob_id": "1bebd3c18742f5362d2e5f22c539f6b13ad58d2a",
"index": 2873,
"step-1": "class Point:\n <mask token>\n\n def __str__(self):\n return '({0},{1})'.format(self.x, self.y)\n\n def __add__(self, other):\n self.x = self.x + other.x\n self.y = self.y + other.y\n return Point(self.x, self.y)\n\n\n<mask token>\n",
"step-2": "class Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __str__(self):\n return '({0},{1})'.format(self.x, self.y)\n\n def __add__(self, other):\n self.x = self.x + other.x\n self.y = self.y + other.y\n return Point(self.x, self.y)\n\n\n<mask token>\n",
"step-3": "class Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __str__(self):\n return '({0},{1})'.format(self.x, self.y)\n\n def __add__(self, other):\n self.x = self.x + other.x\n self.y = self.y + other.y\n return Point(self.x, self.y)\n\n\n<mask token>\nprint(p1)\nprint(p2)\n<mask token>\nprint(p3)\n",
"step-4": "class Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __str__(self):\n return '({0},{1})'.format(self.x, self.y)\n\n def __add__(self, other):\n self.x = self.x + other.x\n self.y = self.y + other.y\n return Point(self.x, self.y)\n\n\np1 = Point(1, 2)\np2 = Point(3, 4)\nprint(p1)\nprint(p2)\np3 = p1 + p2\nprint(p3)\n",
"step-5": "class Point:\r\n def __init__(self,x,y):\r\n self.x=x\r\n self.y=y\r\n\r\n def __str__(self):\r\n return \"({0},{1})\".format(self.x,self.y)\r\n\r\n def __add__(self, other):\r\n self.x=self.x+other.x\r\n self.y=self.y+other.y\r\n return Point(self.x,self.y)\r\n\r\np1=Point(1,2)\r\np2=Point(3,4)\r\nprint(p1)\r\nprint(p2)\r\np3=p1+p2\r\nprint(p3)\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#! /usr/bin/env python3
import common, os, shutil, sys
def main():
os.chdir(common.root)
shutil.rmtree('shared/target', ignore_errors = True)
shutil.rmtree('platform/build', ignore_errors = True)
shutil.rmtree('platform/target', ignore_errors = True)
shutil.rmtree('tests/target', ignore_errors = True)
shutil.rmtree('examples/lwjgl/target', ignore_errors = True)
shutil.rmtree('examples/kwinit/target', ignore_errors = True)
shutil.rmtree('examples/jwm/target', ignore_errors = True)
shutil.rmtree('examples/swt/target', ignore_errors = True)
return 0
if __name__ == '__main__':
sys.exit(main())
|
normal
|
{
"blob_id": "2305d0b7ec0d9e08e3f1c0cedaafa6ed60786e50",
"index": 7359,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n os.chdir(common.root)\n shutil.rmtree('shared/target', ignore_errors=True)\n shutil.rmtree('platform/build', ignore_errors=True)\n shutil.rmtree('platform/target', ignore_errors=True)\n shutil.rmtree('tests/target', ignore_errors=True)\n shutil.rmtree('examples/lwjgl/target', ignore_errors=True)\n shutil.rmtree('examples/kwinit/target', ignore_errors=True)\n shutil.rmtree('examples/jwm/target', ignore_errors=True)\n shutil.rmtree('examples/swt/target', ignore_errors=True)\n return 0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n os.chdir(common.root)\n shutil.rmtree('shared/target', ignore_errors=True)\n shutil.rmtree('platform/build', ignore_errors=True)\n shutil.rmtree('platform/target', ignore_errors=True)\n shutil.rmtree('tests/target', ignore_errors=True)\n shutil.rmtree('examples/lwjgl/target', ignore_errors=True)\n shutil.rmtree('examples/kwinit/target', ignore_errors=True)\n shutil.rmtree('examples/jwm/target', ignore_errors=True)\n shutil.rmtree('examples/swt/target', ignore_errors=True)\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-4": "import common, os, shutil, sys\n\n\ndef main():\n os.chdir(common.root)\n shutil.rmtree('shared/target', ignore_errors=True)\n shutil.rmtree('platform/build', ignore_errors=True)\n shutil.rmtree('platform/target', ignore_errors=True)\n shutil.rmtree('tests/target', ignore_errors=True)\n shutil.rmtree('examples/lwjgl/target', ignore_errors=True)\n shutil.rmtree('examples/kwinit/target', ignore_errors=True)\n shutil.rmtree('examples/jwm/target', ignore_errors=True)\n shutil.rmtree('examples/swt/target', ignore_errors=True)\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"step-5": "#! /usr/bin/env python3\nimport common, os, shutil, sys\n\ndef main():\n os.chdir(common.root)\n shutil.rmtree('shared/target', ignore_errors = True)\n shutil.rmtree('platform/build', ignore_errors = True)\n shutil.rmtree('platform/target', ignore_errors = True)\n shutil.rmtree('tests/target', ignore_errors = True)\n shutil.rmtree('examples/lwjgl/target', ignore_errors = True)\n shutil.rmtree('examples/kwinit/target', ignore_errors = True)\n shutil.rmtree('examples/jwm/target', ignore_errors = True)\n shutil.rmtree('examples/swt/target', ignore_errors = True)\n\n return 0\n\nif __name__ == '__main__':\n sys.exit(main())",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
total = totmil = cont = menor = 0
barato = ' '
print('-' * 40)
print('LOJA SUPER BARATÃO')
print('-' * 40)
while True:
produto = str(input('Nome do Produto: '))
preco = float(input('Preço: '))
cont += 1
total += preco
if preco > 1000:
totmil += 1
if cont == 1 or preco < menor:
barato = produto
menor = preco
resp = ' '
while resp not in 'SN':
resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]
if resp == 'N':
break
print('O total da compra foi R${:.2f}'.format(total))
print('Temos {} produtos custando mais de R$1000,00'.format(totmil))
print('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))
|
normal
|
{
"blob_id": "35b24ffa14f8b3c2040d5becc8a35721e86d8b3d",
"index": 345,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('-' * 40)\nprint('LOJA SUPER BARATÃO')\nprint('-' * 40)\nwhile True:\n produto = str(input('Nome do Produto: '))\n preco = float(input('Preço: '))\n cont += 1\n total += preco\n if preco > 1000:\n totmil += 1\n if cont == 1 or preco < menor:\n barato = produto\n menor = preco\n resp = ' '\n while resp not in 'SN':\n resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]\n if resp == 'N':\n break\nprint('O total da compra foi R${:.2f}'.format(total))\nprint('Temos {} produtos custando mais de R$1000,00'.format(totmil))\nprint('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))\n",
"step-3": "total = totmil = cont = menor = 0\nbarato = ' '\nprint('-' * 40)\nprint('LOJA SUPER BARATÃO')\nprint('-' * 40)\nwhile True:\n produto = str(input('Nome do Produto: '))\n preco = float(input('Preço: '))\n cont += 1\n total += preco\n if preco > 1000:\n totmil += 1\n if cont == 1 or preco < menor:\n barato = produto\n menor = preco\n resp = ' '\n while resp not in 'SN':\n resp = str(input('Quer continuar? [S/N]')).strip().upper()[0]\n if resp == 'N':\n break\nprint('O total da compra foi R${:.2f}'.format(total))\nprint('Temos {} produtos custando mais de R$1000,00'.format(totmil))\nprint('O produto mais barato foi {} que custa {:.2f}'.format(barato, menor))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import json
from flask import Flask, request, jsonify
from lib.chess_utils import run_game
def create_app():
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/process_game', methods=['POST'])
def process_game():
move_sequence = json.loads(request.data)['moves']
return jsonify(run_game(move_sequence))
return app
if __name__ == '__main__':
app = create_app()
app.run(port=5000)
|
normal
|
{
"blob_id": "60ca8b1d7307a9d8183e3617f238efcfb9d707dd",
"index": 1950,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_app():\n app = Flask(__name__)\n\n @app.route('/')\n def hello_world():\n return 'Hello, World!'\n\n @app.route('/process_game', methods=['POST'])\n def process_game():\n move_sequence = json.loads(request.data)['moves']\n return jsonify(run_game(move_sequence))\n return app\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef create_app():\n app = Flask(__name__)\n\n @app.route('/')\n def hello_world():\n return 'Hello, World!'\n\n @app.route('/process_game', methods=['POST'])\n def process_game():\n move_sequence = json.loads(request.data)['moves']\n return jsonify(run_game(move_sequence))\n return app\n\n\nif __name__ == '__main__':\n app = create_app()\n app.run(port=5000)\n",
"step-4": "import json\nfrom flask import Flask, request, jsonify\nfrom lib.chess_utils import run_game\n\n\ndef create_app():\n app = Flask(__name__)\n\n @app.route('/')\n def hello_world():\n return 'Hello, World!'\n\n @app.route('/process_game', methods=['POST'])\n def process_game():\n move_sequence = json.loads(request.data)['moves']\n return jsonify(run_game(move_sequence))\n return app\n\n\nif __name__ == '__main__':\n app = create_app()\n app.run(port=5000)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from datetime import datetime, timedelta
def sendmail(subject, template, to, context):
template_str = 'app/' + template + '.html'
html_msg = render_to_string(template_str, {'data': context})
plain_msg = strip_tags(html_msg)
from_email = '[email protected]'
send_mail(subject, plain_msg, from_email, to, html_message=html_msg)
|
normal
|
{
"blob_id": "0349a8a4841b024afd77d20ae18810645fad41cd",
"index": 4883,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sendmail(subject, template, to, context):\n template_str = 'app/' + template + '.html'\n html_msg = render_to_string(template_str, {'data': context})\n plain_msg = strip_tags(html_msg)\n from_email = '[email protected]'\n send_mail(subject, plain_msg, from_email, to, html_message=html_msg)\n",
"step-3": "from django.core.mail import send_mail\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\nfrom datetime import datetime, timedelta\n\n\ndef sendmail(subject, template, to, context):\n template_str = 'app/' + template + '.html'\n html_msg = render_to_string(template_str, {'data': context})\n plain_msg = strip_tags(html_msg)\n from_email = '[email protected]'\n send_mail(subject, plain_msg, from_email, to, html_message=html_msg)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from rest_framework import filters
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.viewsets import ModelViewSet
from apis.models import Contact, Address, InvoicePosition, Country, Invoice
from apis.serializers import ContactSerializer, AddressSerializer, InvoicePositionSerializer, CountrySerializer, \
InvoiceSerializer
class ContactViewSet(ModelViewSet):
queryset = Contact.objects.all()
serializer_class = ContactSerializer
filterset_fields = ['type']
permission_classes = (IsAuthenticated,)
class AddressViewSet(ModelViewSet):
queryset = Address.objects.all()
serializer_class = AddressSerializer
permission_classes = (IsAuthenticated,)
class InvoicePositionViewSet(ModelViewSet):
queryset = InvoicePosition.objects.all()
serializer_class = InvoicePositionSerializer
permission_classes = (IsAuthenticated,)
class CountryListView(ListAPIView):
queryset = Country.objects.all()
serializer_class = CountrySerializer
filter_backends = [filters.SearchFilter]
search_fields = ['value']
permission_classes = (IsAuthenticated,)
class InvoiceViewSet(ModelViewSet):
queryset = Invoice.objects.all()
serializer_class = InvoiceSerializer
filter_backends = [filters.SearchFilter]
search_fields = ['address__contact__name']
permission_classes = (IsAuthenticated,)
|
normal
|
{
"blob_id": "43bad38d209b5c326cb9f17ba1ae135d06320e97",
"index": 145,
"step-1": "<mask token>\n\n\nclass InvoicePositionViewSet(ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CountryListView(ListAPIView):\n queryset = Country.objects.all()\n serializer_class = CountrySerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['value']\n permission_classes = IsAuthenticated,\n\n\nclass InvoiceViewSet(ModelViewSet):\n queryset = Invoice.objects.all()\n serializer_class = InvoiceSerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['address__contact__name']\n permission_classes = IsAuthenticated,\n",
"step-2": "<mask token>\n\n\nclass InvoicePositionViewSet(ModelViewSet):\n queryset = InvoicePosition.objects.all()\n serializer_class = InvoicePositionSerializer\n permission_classes = IsAuthenticated,\n\n\nclass CountryListView(ListAPIView):\n queryset = Country.objects.all()\n serializer_class = CountrySerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['value']\n permission_classes = IsAuthenticated,\n\n\nclass InvoiceViewSet(ModelViewSet):\n queryset = Invoice.objects.all()\n serializer_class = InvoiceSerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['address__contact__name']\n permission_classes = IsAuthenticated,\n",
"step-3": "<mask token>\n\n\nclass ContactViewSet(ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass AddressViewSet(ModelViewSet):\n queryset = Address.objects.all()\n serializer_class = AddressSerializer\n permission_classes = IsAuthenticated,\n\n\nclass InvoicePositionViewSet(ModelViewSet):\n queryset = InvoicePosition.objects.all()\n serializer_class = InvoicePositionSerializer\n permission_classes = IsAuthenticated,\n\n\nclass CountryListView(ListAPIView):\n queryset = Country.objects.all()\n serializer_class = CountrySerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['value']\n permission_classes = IsAuthenticated,\n\n\nclass InvoiceViewSet(ModelViewSet):\n queryset = Invoice.objects.all()\n serializer_class = InvoiceSerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['address__contact__name']\n permission_classes = IsAuthenticated,\n",
"step-4": "from rest_framework import filters\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.viewsets import ModelViewSet\nfrom apis.models import Contact, Address, InvoicePosition, Country, Invoice\nfrom apis.serializers import ContactSerializer, AddressSerializer, InvoicePositionSerializer, CountrySerializer, InvoiceSerializer\n\n\nclass ContactViewSet(ModelViewSet):\n queryset = Contact.objects.all()\n serializer_class = ContactSerializer\n filterset_fields = ['type']\n permission_classes = IsAuthenticated,\n\n\nclass AddressViewSet(ModelViewSet):\n queryset = Address.objects.all()\n serializer_class = AddressSerializer\n permission_classes = IsAuthenticated,\n\n\nclass InvoicePositionViewSet(ModelViewSet):\n queryset = InvoicePosition.objects.all()\n serializer_class = InvoicePositionSerializer\n permission_classes = IsAuthenticated,\n\n\nclass CountryListView(ListAPIView):\n queryset = Country.objects.all()\n serializer_class = CountrySerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['value']\n permission_classes = IsAuthenticated,\n\n\nclass InvoiceViewSet(ModelViewSet):\n queryset = Invoice.objects.all()\n serializer_class = InvoiceSerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['address__contact__name']\n permission_classes = IsAuthenticated,\n",
"step-5": "from rest_framework import filters\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom apis.models import Contact, Address, InvoicePosition, Country, Invoice\nfrom apis.serializers import ContactSerializer, AddressSerializer, InvoicePositionSerializer, CountrySerializer, \\\n InvoiceSerializer\n\n\nclass ContactViewSet(ModelViewSet):\n queryset = Contact.objects.all()\n serializer_class = ContactSerializer\n filterset_fields = ['type']\n permission_classes = (IsAuthenticated,)\n\n\nclass AddressViewSet(ModelViewSet):\n queryset = Address.objects.all()\n serializer_class = AddressSerializer\n permission_classes = (IsAuthenticated,)\n\n\nclass InvoicePositionViewSet(ModelViewSet):\n queryset = InvoicePosition.objects.all()\n serializer_class = InvoicePositionSerializer\n permission_classes = (IsAuthenticated,)\n\n\nclass CountryListView(ListAPIView):\n queryset = Country.objects.all()\n serializer_class = CountrySerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['value']\n permission_classes = (IsAuthenticated,)\n\n\nclass InvoiceViewSet(ModelViewSet):\n queryset = Invoice.objects.all()\n serializer_class = InvoiceSerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['address__contact__name']\n permission_classes = (IsAuthenticated,)\n",
"step-ids": [
5,
6,
9,
11,
12
]
}
|
[
5,
6,
9,
11,
12
] |
from django.shortcuts import render
from .models import Recipe, Author
def index(request):
recipes_list = Recipe.objects.all()
return render(request, "index.html",
{"data": recipes_list, "title": "Recipe Box"})
def recipeDetail(request, recipe_id):
recipe_detail = Recipe.objects.filter(id=recipe_id).first()
return render(request, "recipe_detail.html",
{"recipe": recipe_detail})
def authorDetail(request, author_id):
author = Author.objects.filter(id=author_id).first()
recipes = Recipe.objects.filter(author=author_id)
return render(request, "author_detail.html",
{"recipes": recipes, "author": author})
|
normal
|
{
"blob_id": "f0f8ad7b65707bcf691847ccb387e4d026b405b5",
"index": 6395,
"step-1": "<mask token>\n\n\ndef authorDetail(request, author_id):\n author = Author.objects.filter(id=author_id).first()\n recipes = Recipe.objects.filter(author=author_id)\n return render(request, 'author_detail.html', {'recipes': recipes,\n 'author': author})\n",
"step-2": "<mask token>\n\n\ndef recipeDetail(request, recipe_id):\n recipe_detail = Recipe.objects.filter(id=recipe_id).first()\n return render(request, 'recipe_detail.html', {'recipe': recipe_detail})\n\n\ndef authorDetail(request, author_id):\n author = Author.objects.filter(id=author_id).first()\n recipes = Recipe.objects.filter(author=author_id)\n return render(request, 'author_detail.html', {'recipes': recipes,\n 'author': author})\n",
"step-3": "<mask token>\n\n\ndef index(request):\n recipes_list = Recipe.objects.all()\n return render(request, 'index.html', {'data': recipes_list, 'title':\n 'Recipe Box'})\n\n\ndef recipeDetail(request, recipe_id):\n recipe_detail = Recipe.objects.filter(id=recipe_id).first()\n return render(request, 'recipe_detail.html', {'recipe': recipe_detail})\n\n\ndef authorDetail(request, author_id):\n author = Author.objects.filter(id=author_id).first()\n recipes = Recipe.objects.filter(author=author_id)\n return render(request, 'author_detail.html', {'recipes': recipes,\n 'author': author})\n",
"step-4": "from django.shortcuts import render\nfrom .models import Recipe, Author\n\n\ndef index(request):\n recipes_list = Recipe.objects.all()\n return render(request, 'index.html', {'data': recipes_list, 'title':\n 'Recipe Box'})\n\n\ndef recipeDetail(request, recipe_id):\n recipe_detail = Recipe.objects.filter(id=recipe_id).first()\n return render(request, 'recipe_detail.html', {'recipe': recipe_detail})\n\n\ndef authorDetail(request, author_id):\n author = Author.objects.filter(id=author_id).first()\n recipes = Recipe.objects.filter(author=author_id)\n return render(request, 'author_detail.html', {'recipes': recipes,\n 'author': author})\n",
"step-5": "from django.shortcuts import render\nfrom .models import Recipe, Author\n\n\ndef index(request):\n recipes_list = Recipe.objects.all()\n return render(request, \"index.html\",\n {\"data\": recipes_list, \"title\": \"Recipe Box\"})\n\n\ndef recipeDetail(request, recipe_id):\n recipe_detail = Recipe.objects.filter(id=recipe_id).first()\n return render(request, \"recipe_detail.html\",\n {\"recipe\": recipe_detail})\n\n\ndef authorDetail(request, author_id):\n author = Author.objects.filter(id=author_id).first()\n recipes = Recipe.objects.filter(author=author_id)\n return render(request, \"author_detail.html\",\n {\"recipes\": recipes, \"author\": author})\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from urllib.parse import quote
from top_model import db
from top_model.ext.flask import FlaskTopModel
from top_model.filesystem import ProductPhotoCIP
from top_model.webstore import Product, Labo
from unrest import UnRest
class Hydra(FlaskTopModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config['CLIENT_ID'] = 4
self.config['BASE_IMAGE_URL'] = (
'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}')
self.config['SQLALCHEMY_DATABASE_URI'] = (
'pgfdw://hydra@localhost/hydra')
self.config.from_envvar('MEDBOX_SETTINGS', silent=True)
self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])
def filter_query(query):
return query.filter_by(client_id=app.config['CLIENT_ID'])
app = Hydra(__name__)
rest = UnRest(app, db.session)
rest(Labo, only=('label',))
product_api = rest(Product, query=filter_query, only=(
'product_id', 'title', 'description', 'cip', 'resip_labo_code',
'type_product'))
image_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))
@image_api.declare('GET')
def get_image(payload, cip, name, ext):
result = image_api.get(payload, cip=cip)
for obj in getattr(result, 'data', result)['objects']:
obj['name'] = quote(obj['name'])
obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)
return result
@product_api.declare('GET')
def get_product(payload, product_id):
products = (
Product.query
.filter_by(cip=str(product_id))
.filter_by(client_id=app.config['CLIENT_ID'])
.all())
if products:
return product_api.get(payload, product_id=products[0].product_id)
else:
return {'objects': [], 'occurences': 0}
|
normal
|
{
"blob_id": "de3a4053b5b0d4d2d5c2dcd317e64cf9b4faeb75",
"index": 562,
"step-1": "<mask token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\n<mask token>\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-2": "<mask token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\n<mask token>\nrest(Labo, only=('label',))\n<mask token>\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-3": "<mask token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\napp = Hydra(__name__)\nrest = UnRest(app, db.session)\nrest(Labo, only=('label',))\nproduct_api = rest(Product, query=filter_query, only=('product_id', 'title',\n 'description', 'cip', 'resip_labo_code', 'type_product'))\nimage_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-4": "from urllib.parse import quote\nfrom top_model import db\nfrom top_model.ext.flask import FlaskTopModel\nfrom top_model.filesystem import ProductPhotoCIP\nfrom top_model.webstore import Product, Labo\nfrom unrest import UnRest\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\napp = Hydra(__name__)\nrest = UnRest(app, db.session)\nrest(Labo, only=('label',))\nproduct_api = rest(Product, query=filter_query, only=('product_id', 'title',\n 'description', 'cip', 'resip_labo_code', 'type_product'))\nimage_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-5": "from urllib.parse import quote\n\nfrom top_model import db\nfrom top_model.ext.flask import FlaskTopModel\nfrom top_model.filesystem import ProductPhotoCIP\nfrom top_model.webstore import Product, Labo\nfrom unrest import UnRest\n\n\nclass Hydra(FlaskTopModel):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'] = (\n 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}')\n self.config['SQLALCHEMY_DATABASE_URI'] = (\n 'pgfdw://hydra@localhost/hydra')\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\napp = Hydra(__name__)\n\n\nrest = UnRest(app, db.session)\nrest(Labo, only=('label',))\nproduct_api = rest(Product, query=filter_query, only=(\n 'product_id', 'title', 'description', 'cip', 'resip_labo_code',\n 'type_product'))\nimage_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = (\n Product.query\n .filter_by(cip=str(product_id))\n .filter_by(client_id=app.config['CLIENT_ID'])\n .all())\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"step-ids": [
3,
6,
7,
8,
9
]
}
|
[
3,
6,
7,
8,
9
] |
#!/usr/bin/env python
#-------------------------------------------------------------------------------
#
# Circle finder.
#
# Rowan Leeder
#
#-------------------------------------------------------------------------------
#
# Listens on the 'scan' and 'base_scan' topics. These are the pioneers SICK
# topic and Stage's scan topic respectively.
#
# The program strips out noise samples and attempts to match circles to the
# remaining samples.
#
# Any circle that is found is then published on the 'circles' topic in a
# circleArray message.
#
# The circleArray and circleEntry messages are defined in the msg\ folder.
#
#-------------------------------------------------------------------------------
#
# Compile Commands:
#
# First run 'rosmake' in the base directory. If you change the messages in any
# way then you will have to close all ros components using the topic (basically
# everything) and then recompile with rosmake. If you add a message, add an
# entry to the manifest file.
#
# To run this program do 'rosrun circleFinder finder.py'.
#
# Exit with Ctrl + C.
#
# Listen in with 'rostopic echo circles'
#
# If you want to see a plot of the data, set the 'plot' variable to True.
#
#-------------------------------------------------------------------------------
# Known Bugs:
# If the laser scan covers 360 degrees then you might get two circles at the
# same spot. This is becuase i haven't joined the two ends of the scan together.
# This will not be an issue with the robots as they only take 180 degree scans.
# Ros imports.
import roslib;
roslib.load_manifest('circleFinder')
import rospy
from sensor_msgs.msg import LaserScan
from roslib.rostime import Duration
# Python lib imports.
import math
import time
# Message imports
from circleFinder.msg import *
# Local file imports.
from placment_funcs import *
from data_parser import *
# plot functions are in here. Remove if you dont want and you might free up
# some memory.
from plot_funcs import *
#-------------------------------------------------------------------------------
# Function: callback
#
# Thread created when a laser scan is received on a listening topic and extract
# and publish a specified number of circle from the data.
#
#-------------------------------------------------------------------------------
#
# args - An array of arguments. The form is:
# max_dist - the maximum distance to look for circles. If a sample or
# circle edge goes beyond this then it will be ignored.
# max_rad - The maximum radius that a valid circle can have.
# min_rad - The minimum radius that a valid circle can have.
# grad_tol - The tolerance used in the prune function.
# split_multi - The multiplier used in the split function
#
# publish - A circleArray object containing the circle data in an array of
# circleEntry objects. These classes are defined in the
# circleFinder/msg path.
#-------------------------------------------------------------------------------
def callback(data, args):
tStart = time.time()
pub = args[0]
max_dist = args[1]
max_rad = args[2]
min_rad = args[3]
grad_tol = args[4]
split_multi = args[5]
prune_lines = args[6]
plot = args[7]
# Get possible circle data.
possibles = dataParser(data,max_dist, grad_tol, split_multi, prune_lines)
# Calculate the circle info from that data.
circles = []
for i in possibles:
current = matchCirc(list(i), False)
if current is not None:
#prune out any circles that are too large or small
if current[1] > max_rad or \
current[1] < min_rad or \
math.sqrt(math.pow(current[0][0],2) + math.pow(current[0][1],2)) + current[1] > max_dist:
pass
else:
circles.append(current)
# Setup circleArray and publish found circles.
ret = []
for i in circles:
c = circleEntry()
c.x = i[0][0]
c.y = i[0][1]
c.distance = math.sqrt(i[0][0]*i[0][0] + i[0][1] * i[0][1])
c.theta = math.atan2(i[0][1], i[0][0])
c.radius = i[1]
ret.append(c)
m = circleArray()
m.broadcastTime = rospy.get_rostime()
m.duration = time.time() - tStart
m.array = ret
if not rospy.is_shutdown():
pub.publish(m)
if plot:
import matplotlib.pyplot as plt
plotWorld(data, 30, True, 'ro')
for i in circles:
plotCircle((i[0])[0],(i[0])[1],i[1])
for i in possibles:
for u in i:
plt.plot(u[0], u[1], 'bo')
plt.plot(0,0,'ro')
plotAxis(8,-8,8,-8,4)
plt.axis([-8,8,-8,8])
plt.show()
#-------------------------------------------------------------------------------
# Function: main
#
# Sets up the callback function and then idles.
#
# Program arguments are inside.
#
#-------------------------------------------------------------------------------
if __name__ == '__main__':
#print dir()
# the publiser
pub = rospy.Publisher("circles", circleArray)
# The maximum distance from the origin that a sample point or circle edge
# can be before they are considered invalid.
max_dist = 7
# The maximum radius a circle can be before it is considered invalid.
max_rad = 0.25
# The maximum radius a circle can be before it is considered invalid.
min_rad = 0
# See the prune function in data_parser.py
grad_tol = 0.3
# See the split function in data_parser.py
split_multi = 2.5
# If true then an attempt to remove straight edges from the data will be
# made.
prune_lines = True
# Plot flag.
plot = False
import sys
if (len(sys.argv) > 1):
for i in sys.argv:
if i == '--plot':
plot = True
elif i == '--no-line-pruning':
prune_lines = False
args = [pub, max_dist, max_rad, min_rad, grad_tol, split_multi, prune_lines , plot]
print "--------------------------------------------------------------------------------"
print "Circle Finder"
print
print "--------------------------------------------------------------------------------"
print "Command line arguments are:"
print " --plot Will cause the outcome of the first scan to be plotted."
print " --no-line-pruning Will prevent straight lines from being removed from the"
print " scan."
print
print "--------------------------------------------------------------------------------"
print "Starting circle finder with arguments:"
print
print " Publisher: " , pub
print " Maximum Distance: " , max_dist
print " Maximum Radius: " , max_rad
print " Minimum Radius: " , min_rad
print " Gradient Tolerance: " , grad_tol
print " Split Multiplier: " , split_multi
print " Remove Lines: " , prune_lines
print " Plot: " , plot
print
print "--------------------------------------------------------------------------------"
print "To increase speed, the listening thread is not verbose."
print "Ctrl+C to exit."
rospy.init_node('circles', anonymous=True)
rospy.Subscriber("base_scan",LaserScan, callback, callback_args=args)
rospy.Subscriber("scan",LaserScan, callback, callback_args=args)
rospy.spin()
|
normal
|
{
"blob_id": "3ac02308959749b8cd264e660c3d6334fd385fd4",
"index": 1114,
"step-1": "#!/usr/bin/env python\n#-------------------------------------------------------------------------------\n#\n# Circle finder.\n#\n# Rowan Leeder\n#\n#-------------------------------------------------------------------------------\n#\n# Listens on the 'scan' and 'base_scan' topics. These are the pioneers SICK\n# topic and Stage's scan topic respectively.\n#\n# The program strips out noise samples and attempts to match circles to the\n# remaining samples.\n#\n# Any circle that is found is then published on the 'circles' topic in a\n# circleArray message.\n#\n# The circleArray and circleEntry messages are defined in the msg\\ folder.\n#\n#-------------------------------------------------------------------------------\n#\n# Compile Commands:\n#\n# First run 'rosmake' in the base directory. If you change the messages in any\n# way then you will have to close all ros components using the topic (basically\n# everything) and then recompile with rosmake. If you add a message, add an \n# entry to the manifest file. \n#\n# To run this program do 'rosrun circleFinder finder.py'. \n#\n# Exit with Ctrl + C.\n#\n# Listen in with 'rostopic echo circles'\n#\n# If you want to see a plot of the data, set the 'plot' variable to True.\n#\n#-------------------------------------------------------------------------------\n# Known Bugs:\n# If the laser scan covers 360 degrees then you might get two circles at the \n# same spot. This is becuase i haven't joined the two ends of the scan together.\n# This will not be an issue with the robots as they only take 180 degree scans.\n\n# Ros imports.\nimport roslib; \nroslib.load_manifest('circleFinder')\nimport rospy\nfrom sensor_msgs.msg import LaserScan\nfrom roslib.rostime import Duration \n\n# Python lib imports.\nimport math\nimport time\n\n# Message imports\nfrom circleFinder.msg import *\n\n# Local file imports.\nfrom placment_funcs import *\nfrom data_parser import *\n\n# plot functions are in here. Remove if you dont want and you might free up \n# some memory.\nfrom plot_funcs import *\n\n\n#-------------------------------------------------------------------------------\n# Function: callback\n#\n# Thread created when a laser scan is received on a listening topic and extract \n# and publish a specified number of circle from the data.\n#\n#-------------------------------------------------------------------------------\n#\n# args - An array of arguments. The form is: \n# max_dist - the maximum distance to look for circles. If a sample or \n# circle edge goes beyond this then it will be ignored.\n# max_rad - The maximum radius that a valid circle can have.\n# min_rad - The minimum radius that a valid circle can have.\n# grad_tol - The tolerance used in the prune function.\n# split_multi - The multiplier used in the split function\n#\n# publish - A circleArray object containing the circle data in an array of \n# circleEntry objects. These classes are defined in the \n# circleFinder/msg path.\n#-------------------------------------------------------------------------------\ndef callback(data, args):\n tStart = time.time()\n \n pub = args[0]\n max_dist = args[1]\n max_rad = args[2]\n min_rad = args[3]\n grad_tol = args[4]\n split_multi = args[5]\n prune_lines = args[6]\n plot = args[7]\n \n \n # Get possible circle data.\n possibles = dataParser(data,max_dist, grad_tol, split_multi, prune_lines)\n \n # Calculate the circle info from that data.\n circles = []\n for i in possibles:\n current = matchCirc(list(i), False)\n if current is not None:\n #prune out any circles that are too large or small\n if current[1] > max_rad or \\\n current[1] < min_rad or \\\n math.sqrt(math.pow(current[0][0],2) + math.pow(current[0][1],2)) + current[1] > max_dist:\n pass\n else:\n circles.append(current)\n \n # Setup circleArray and publish found circles.\n ret = []\n for i in circles:\n c = circleEntry()\n c.x = i[0][0]\n c.y = i[0][1]\n c.distance = math.sqrt(i[0][0]*i[0][0] + i[0][1] * i[0][1])\n c.theta = math.atan2(i[0][1], i[0][0])\n c.radius = i[1]\n ret.append(c)\n m = circleArray()\n m.broadcastTime = rospy.get_rostime()\n m.duration = time.time() - tStart\n m.array = ret\n if not rospy.is_shutdown():\n pub.publish(m)\n \n if plot:\n import matplotlib.pyplot as plt\n plotWorld(data, 30, True, 'ro')\n for i in circles:\n plotCircle((i[0])[0],(i[0])[1],i[1])\n for i in possibles:\n for u in i:\n plt.plot(u[0], u[1], 'bo')\n plt.plot(0,0,'ro')\n plotAxis(8,-8,8,-8,4)\n plt.axis([-8,8,-8,8])\n plt.show()\n\n\n\n#-------------------------------------------------------------------------------\n# Function: main\n#\n# Sets up the callback function and then idles.\n#\n# Program arguments are inside. \n#\n#-------------------------------------------------------------------------------\nif __name__ == '__main__':\n #print dir()\n \n # the publiser\n pub = rospy.Publisher(\"circles\", circleArray)\n \n # The maximum distance from the origin that a sample point or circle edge \n # can be before they are considered invalid.\n max_dist = 7\n \n # The maximum radius a circle can be before it is considered invalid.\n max_rad = 0.25\n \n # The maximum radius a circle can be before it is considered invalid.\n min_rad = 0\n \n # See the prune function in data_parser.py\n grad_tol = 0.3\n \n # See the split function in data_parser.py\n split_multi = 2.5\n \n # If true then an attempt to remove straight edges from the data will be \n # made.\n prune_lines = True\n \n # Plot flag.\n plot = False\n \n import sys\n if (len(sys.argv) > 1):\n for i in sys.argv:\n if i == '--plot':\n plot = True\n elif i == '--no-line-pruning':\n prune_lines = False\n \n args = [pub, max_dist, max_rad, min_rad, grad_tol, split_multi, prune_lines , plot]\n print \"--------------------------------------------------------------------------------\"\n print \"Circle Finder\"\n print\n print \"--------------------------------------------------------------------------------\"\n print \"Command line arguments are:\"\n print \" --plot Will cause the outcome of the first scan to be plotted.\"\n print \" --no-line-pruning Will prevent straight lines from being removed from the\" \n print \" scan.\"\n print\n print \"--------------------------------------------------------------------------------\"\n print \"Starting circle finder with arguments:\"\n print\n print \" Publisher: \" , pub\n print \" Maximum Distance: \" , max_dist\n print \" Maximum Radius: \" , max_rad\n print \" Minimum Radius: \" , min_rad\n print \" Gradient Tolerance: \" , grad_tol\n print \" Split Multiplier: \" , split_multi\n print \" Remove Lines: \" , prune_lines \n print \" Plot: \" , plot\n print\n print \"--------------------------------------------------------------------------------\"\n print \"To increase speed, the listening thread is not verbose.\"\n print \"Ctrl+C to exit.\"\n rospy.init_node('circles', anonymous=True)\n rospy.Subscriber(\"base_scan\",LaserScan, callback, callback_args=args)\n rospy.Subscriber(\"scan\",LaserScan, callback, callback_args=args)\n rospy.spin()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
Pattern matching problem
Boyer Moore algorithm
First is my attempt, below is the code provided in the book
Idea:
Optimize brute force approach using 2 heuristics:
- Looking-Glass: start searches from last character of the
pattern and work backwards
- Character-Jump: During testing of a pattern P, a mismatch
in T[i] = c with corresponding pattern P[k] is handled:
a) if C is not contained in P, shift P completely past i.
b) if c is contained in P shift P until an occurrence of c
gets aligned with T[i]
"""
def find_boyer_moore(T, P):
""" return lowest index of T at which the substring P begins or -1"""
n, m = len(T), len(P)
if m == 0: return 0
last = {} # Using hash table for fast access
for k in range(m):
last[P[k]] = k
i = m - 1 # i index at T, k index at P
k = m - 1 # j index of last occurrence of T[i] in P
while i < n:
if T[i] == P[k]: # if chars are equal
""" INCORRECT PART """
i -= 1 # normal iteration
k -= 1
if k == 0:
return i # check if Patter is complete
else:
# if j < k (remember k index at P)
# shift i += m - (j+1)
# if j > k
# shift i += m - k
j = last.get(T[i], -1) # -1 if item not there
i += m - (min(k, j+1))
k = m - 1
return -1
def find_boyer_moore2(T, P):
""" return lowest index of T at which the substring P begins or -1"""
n, m = len(T), len(P)
if m == 0: return 0
last = {} # Using hash table for fast access
for k in range(m):
last[P[k]] = k
i = m - 1 # i index at T, k index at P
k = m - 1 # j index of last occurrence of T[i] in P
while i < n:
if T[i] == P[k]: # if chars are equal
if k == 0:
return i # check if Patter is complete
else:
i -= 1 # normal iteration
k -= 1
else:
j = last.get(T[i], -1) # -1 if item not there
i += m - (min(k, j+1))
k = m - 1
return -1
# T = "abacaabadcabacabaabb"
T = "ddcbacab"
P = "abacab"
print(find_boyer_moore2(T, P))
|
normal
|
{
"blob_id": "c418b9b6903ebdad204a3a55f2384a94a3be0d09",
"index": 5561,
"step-1": "<mask token>\n\n\ndef find_boyer_moore2(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0:\n return 0\n last = {}\n for k in range(m):\n last[P[k]] = k\n i = m - 1\n k = m - 1\n while i < n:\n if T[i] == P[k]:\n if k == 0:\n return i\n else:\n i -= 1\n k -= 1\n else:\n j = last.get(T[i], -1)\n i += m - min(k, j + 1)\n k = m - 1\n return -1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_boyer_moore(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0:\n return 0\n last = {}\n for k in range(m):\n last[P[k]] = k\n i = m - 1\n k = m - 1\n while i < n:\n if T[i] == P[k]:\n \"\"\" INCORRECT PART \"\"\"\n i -= 1\n k -= 1\n if k == 0:\n return i\n else:\n j = last.get(T[i], -1)\n i += m - min(k, j + 1)\n k = m - 1\n return -1\n\n\ndef find_boyer_moore2(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0:\n return 0\n last = {}\n for k in range(m):\n last[P[k]] = k\n i = m - 1\n k = m - 1\n while i < n:\n if T[i] == P[k]:\n if k == 0:\n return i\n else:\n i -= 1\n k -= 1\n else:\n j = last.get(T[i], -1)\n i += m - min(k, j + 1)\n k = m - 1\n return -1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_boyer_moore(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0:\n return 0\n last = {}\n for k in range(m):\n last[P[k]] = k\n i = m - 1\n k = m - 1\n while i < n:\n if T[i] == P[k]:\n \"\"\" INCORRECT PART \"\"\"\n i -= 1\n k -= 1\n if k == 0:\n return i\n else:\n j = last.get(T[i], -1)\n i += m - min(k, j + 1)\n k = m - 1\n return -1\n\n\ndef find_boyer_moore2(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0:\n return 0\n last = {}\n for k in range(m):\n last[P[k]] = k\n i = m - 1\n k = m - 1\n while i < n:\n if T[i] == P[k]:\n if k == 0:\n return i\n else:\n i -= 1\n k -= 1\n else:\n j = last.get(T[i], -1)\n i += m - min(k, j + 1)\n k = m - 1\n return -1\n\n\n<mask token>\nprint(find_boyer_moore2(T, P))\n",
"step-4": "<mask token>\n\n\ndef find_boyer_moore(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0:\n return 0\n last = {}\n for k in range(m):\n last[P[k]] = k\n i = m - 1\n k = m - 1\n while i < n:\n if T[i] == P[k]:\n \"\"\" INCORRECT PART \"\"\"\n i -= 1\n k -= 1\n if k == 0:\n return i\n else:\n j = last.get(T[i], -1)\n i += m - min(k, j + 1)\n k = m - 1\n return -1\n\n\ndef find_boyer_moore2(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0:\n return 0\n last = {}\n for k in range(m):\n last[P[k]] = k\n i = m - 1\n k = m - 1\n while i < n:\n if T[i] == P[k]:\n if k == 0:\n return i\n else:\n i -= 1\n k -= 1\n else:\n j = last.get(T[i], -1)\n i += m - min(k, j + 1)\n k = m - 1\n return -1\n\n\nT = 'ddcbacab'\nP = 'abacab'\nprint(find_boyer_moore2(T, P))\n",
"step-5": "\"\"\"\nPattern matching problem\nBoyer Moore algorithm\n\nFirst is my attempt, below is the code provided in the book\nIdea:\nOptimize brute force approach using 2 heuristics:\n- Looking-Glass: start searches from last character of the\npattern and work backwards\n- Character-Jump: During testing of a pattern P, a mismatch\nin T[i] = c with corresponding pattern P[k] is handled:\na) if C is not contained in P, shift P completely past i.\nb) if c is contained in P shift P until an occurrence of c\ngets aligned with T[i]\n\n\"\"\"\n\n\ndef find_boyer_moore(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0: return 0\n last = {} # Using hash table for fast access\n for k in range(m):\n last[P[k]] = k\n i = m - 1 # i index at T, k index at P\n k = m - 1 # j index of last occurrence of T[i] in P\n while i < n:\n if T[i] == P[k]: # if chars are equal\n \"\"\" INCORRECT PART \"\"\"\n i -= 1 # normal iteration\n k -= 1\n if k == 0:\n return i # check if Patter is complete\n else:\n # if j < k (remember k index at P)\n # shift i += m - (j+1)\n # if j > k\n # shift i += m - k\n j = last.get(T[i], -1) # -1 if item not there\n i += m - (min(k, j+1))\n k = m - 1\n return -1\n\n\ndef find_boyer_moore2(T, P):\n \"\"\" return lowest index of T at which the substring P begins or -1\"\"\"\n n, m = len(T), len(P)\n if m == 0: return 0\n last = {} # Using hash table for fast access\n for k in range(m):\n last[P[k]] = k\n i = m - 1 # i index at T, k index at P\n k = m - 1 # j index of last occurrence of T[i] in P\n while i < n:\n if T[i] == P[k]: # if chars are equal\n if k == 0:\n return i # check if Patter is complete\n else:\n i -= 1 # normal iteration\n k -= 1\n else:\n j = last.get(T[i], -1) # -1 if item not there\n i += m - (min(k, j+1))\n k = m - 1\n return -1\n\n# T = \"abacaabadcabacabaabb\"\nT = \"ddcbacab\"\nP = \"abacab\"\nprint(find_boyer_moore2(T, P))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#usage:
#crawl raw weibo text data from sina weibo users(my followees)
#in total, there are 20080 weibo tweets, because there is uplimit for crawler
# -*- coding: utf-8 -*-
import weibo
APP_KEY = 'your app_key'
APP_SECRET = 'your app_secret'
CALL_BACK = 'your call back url'
def run():
token = "your access token gotten from call_back url"
client = weibo.APIClient(APP_KEY, APP_SECRET, CALL_BACK)
client.set_access_token(token,12345)
followlist = client.friendships.friends.get(screen_name='蜀云Parallelli',count=200)
wb_raw = open('weibo_raw_userlistweibo_big.txt','w')
weiboCnt = 0
usernames = {}
for fl in followlist.users:
pg = 1
wbres = client.statuses.user_timeline.get(screen_name=fl.screen_name,page=pg)
while (pg <= 3):
wbres = client.statuses.user_timeline.get(screen_name=fl.screen_name,page=pg)
if fl.screen_name not in usernames:
usernames[fl.screen_name]=1
for wb in wbres.statuses:
weiboCnt += 1
wb_raw.write(wb.text.encode('utf-8')+'\n')
pg += 1
followlist = client.friendships.friends.get(screen_name='尹欢欢欢',count=200)
for fl in followlist.users:
pg = 1
if fl.screen_name in usernames:
continue
wbres = client.statuses.user_timeline.get(screen_name=fl.screen_name,page=pg)
while (pg <= 3):
wbres = client.statuses.user_timeline.get(screen_name=fl.screen_name,page=pg)
if fl.screen_name not in usernames:
usernames[fl.screen_name]=1
for wb in wbres.statuses:
weiboCnt += 1
wb_raw.write(wb.text.encode('utf-8')+'\n')
pg += 1
print weiboCnt
wb_raw.close()
if __name__ == "__main__":
run()
|
normal
|
{
"blob_id": "8a04166e091e2da348928598b2356c8ad75dd831",
"index": 5889,
"step-1": "#usage:\n#crawl raw weibo text data from sina weibo users(my followees)\n#in total, there are 20080 weibo tweets, because there is uplimit for crawler\n\n# -*- coding: utf-8 -*-\nimport weibo\n\nAPP_KEY = 'your app_key'\nAPP_SECRET = 'your app_secret'\nCALL_BACK = 'your call back url'\n\ndef run():\n\ttoken = \"your access token gotten from call_back url\"\n\tclient = weibo.APIClient(APP_KEY, APP_SECRET, CALL_BACK)\n\tclient.set_access_token(token,12345)\n\n\tfollowlist = client.friendships.friends.get(screen_name='蜀云Parallelli',count=200)\n\twb_raw = open('weibo_raw_userlistweibo_big.txt','w')\n\tweiboCnt = 0\n\tusernames = {}\n\tfor fl in followlist.users:\n\t\tpg = 1\n\t\twbres = client.statuses.user_timeline.get(screen_name=fl.screen_name,page=pg)\n\t\twhile (pg <= 3):\n\t\t\twbres = client.statuses.user_timeline.get(screen_name=fl.screen_name,page=pg)\n\t\t\tif fl.screen_name not in usernames:\n\t\t\t\tusernames[fl.screen_name]=1\n\n\t\t\tfor wb in wbres.statuses:\n\t\t\t\tweiboCnt += 1\n\t\t\t\twb_raw.write(wb.text.encode('utf-8')+'\\n')\n\t\t\tpg += 1\n\tfollowlist = client.friendships.friends.get(screen_name='尹欢欢欢',count=200)\n\tfor fl in followlist.users:\n\t\tpg = 1\n\t\tif fl.screen_name in usernames: \n\t\t\tcontinue\n\t\twbres = client.statuses.user_timeline.get(screen_name=fl.screen_name,page=pg)\n\t\twhile (pg <= 3):\n\t\t\twbres = client.statuses.user_timeline.get(screen_name=fl.screen_name,page=pg)\n\t\t\tif fl.screen_name not in usernames:\n\t\t\t\tusernames[fl.screen_name]=1\n\n\t\t\tfor wb in wbres.statuses:\n\t\t\t\tweiboCnt += 1\n\t\t\t\twb_raw.write(wb.text.encode('utf-8')+'\\n')\n\t\t\tpg += 1\n\tprint weiboCnt\n\twb_raw.close()\n\t\nif __name__ == \"__main__\":\n\trun()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
alunos = list()
while True:
nome = str(input('Nome: '))
nota1 = float(input('Nota 1: '))
nota2 = float(input('Nota 2: '))
media = (nota1+nota2)/2
alunos.append([nome, [nota1, nota2], media])
pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]
if pergunta == 'N':
break
print('-=' *30)
print(f'{"Nº":<4}{"Nome":<10}{"Média":>8}')
print('-' *30)
for i, v in enumerate(alunos):
print(f'{i:<4}{v[0]:<10}{v[2]:>8}')
while True:
print('-' *30)
notas_aluno = int(input('Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))
if notas_aluno == 999:
print('Fim do Boletim.')
break
if notas_aluno <= len(alunos)-1:
print(f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}')
|
normal
|
{
"blob_id": "8dcd4914c58a7ecafdfdd70b698ef3b7141386a6",
"index": 2632,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n nome = str(input('Nome: '))\n nota1 = float(input('Nota 1: '))\n nota2 = float(input('Nota 2: '))\n media = (nota1 + nota2) / 2\n alunos.append([nome, [nota1, nota2], media])\n pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]\n if pergunta == 'N':\n break\nprint('-=' * 30)\nprint(f\"{'Nº':<4}{'Nome':<10}{'Média':>8}\")\nprint('-' * 30)\nfor i, v in enumerate(alunos):\n print(f'{i:<4}{v[0]:<10}{v[2]:>8}')\nwhile True:\n print('-' * 30)\n notas_aluno = int(input(\n 'Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))\n if notas_aluno == 999:\n print('Fim do Boletim.')\n break\n if notas_aluno <= len(alunos) - 1:\n print(\n f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}'\n )\n",
"step-3": "alunos = list()\nwhile True:\n nome = str(input('Nome: '))\n nota1 = float(input('Nota 1: '))\n nota2 = float(input('Nota 2: '))\n media = (nota1 + nota2) / 2\n alunos.append([nome, [nota1, nota2], media])\n pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]\n if pergunta == 'N':\n break\nprint('-=' * 30)\nprint(f\"{'Nº':<4}{'Nome':<10}{'Média':>8}\")\nprint('-' * 30)\nfor i, v in enumerate(alunos):\n print(f'{i:<4}{v[0]:<10}{v[2]:>8}')\nwhile True:\n print('-' * 30)\n notas_aluno = int(input(\n 'Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))\n if notas_aluno == 999:\n print('Fim do Boletim.')\n break\n if notas_aluno <= len(alunos) - 1:\n print(\n f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}'\n )\n",
"step-4": "alunos = list()\nwhile True:\n nome = str(input('Nome: '))\n nota1 = float(input('Nota 1: '))\n nota2 = float(input('Nota 2: '))\n media = (nota1+nota2)/2\n alunos.append([nome, [nota1, nota2], media])\n pergunta = str(input('Quer continuar [S/N]? ')).upper()[0]\n if pergunta == 'N':\n break\nprint('-=' *30)\nprint(f'{\"Nº\":<4}{\"Nome\":<10}{\"Média\":>8}')\nprint('-' *30)\nfor i, v in enumerate(alunos): \n print(f'{i:<4}{v[0]:<10}{v[2]:>8}')\nwhile True:\n print('-' *30)\n notas_aluno = int(input('Mostrar as notas de qual aluno? (Digite 999 para encerrar): '))\n if notas_aluno == 999:\n print('Fim do Boletim.')\n break\n if notas_aluno <= len(alunos)-1:\n print(f'As notas de {alunos[notas_aluno][0]} são {alunos[notas_aluno][1]}')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.