code
stringlengths
13
6.09M
order_type
stringclasses
2 values
original_example
dict
step_ids
listlengths
1
5
<|reserved_special_token_0|> @app.route('/') def interactive_input(): return render_template('main.html') @app.route('/food_1_star') def food_1_star(): return render_template('food_1.html') <|reserved_special_token_0|> @app.route('/general_5_star') def general_5_star(): return render_template('general_5.html') @app.route('/food_1') def food_1(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='food_1_star_large') my_prediction = gpt2.generate(sess, run_name='food_1_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) @app.route('/food_5') def food_5(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='food_5_star_large') my_prediction = gpt2.generate(sess, run_name='food_5_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) <|reserved_special_token_0|> @app.route('/general_5') def general_5(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='general_5_star_large') my_prediction = gpt2.generate(sess, run_name='general_5_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @app.route('/') def interactive_input(): return render_template('main.html') @app.route('/food_1_star') def food_1_star(): return render_template('food_1.html') <|reserved_special_token_0|> @app.route('/general_5_star') def general_5_star(): return render_template('general_5.html') @app.route('/food_1') def food_1(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='food_1_star_large') my_prediction = gpt2.generate(sess, run_name='food_1_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) @app.route('/food_5') def food_5(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='food_5_star_large') my_prediction = gpt2.generate(sess, run_name='food_5_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) @app.route('/general_1') def general_1(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='general_1_star_large') my_prediction = gpt2.generate(sess, run_name='general_1_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) @app.route('/general_5') def general_5(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='general_5_star_large') my_prediction = gpt2.generate(sess, run_name='general_5_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @app.route('/') def interactive_input(): return render_template('main.html') @app.route('/food_1_star') def food_1_star(): return render_template('food_1.html') @app.route('/food_5_star') def food_5_star(): return render_template('food_5.html') @app.route('/general_1_star') def general_1_star(): return render_template('general_1.html') @app.route('/general_5_star') def general_5_star(): return render_template('general_5.html') @app.route('/food_1') def food_1(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='food_1_star_large') my_prediction = gpt2.generate(sess, run_name='food_1_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) @app.route('/food_5') def food_5(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='food_5_star_large') my_prediction = gpt2.generate(sess, run_name='food_5_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) @app.route('/general_1') def general_1(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='general_1_star_large') my_prediction = gpt2.generate(sess, run_name='general_1_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) @app.route('/general_5') def general_5(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='general_5_star_large') my_prediction = gpt2.generate(sess, run_name='general_5_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> app = Flask(__name__) Bootstrap(app) @app.route('/') def interactive_input(): return render_template('main.html') @app.route('/food_1_star') def food_1_star(): return render_template('food_1.html') @app.route('/food_5_star') def food_5_star(): return render_template('food_5.html') @app.route('/general_1_star') def general_1_star(): return render_template('general_1.html') @app.route('/general_5_star') def general_5_star(): return render_template('general_5.html') @app.route('/food_1') def food_1(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='food_1_star_large') my_prediction = gpt2.generate(sess, run_name='food_1_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) @app.route('/food_5') def food_5(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='food_5_star_large') my_prediction = gpt2.generate(sess, run_name='food_5_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) @app.route('/general_1') def general_1(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='general_1_star_large') my_prediction = gpt2.generate(sess, run_name='general_1_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) @app.route('/general_5') def general_5(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='general_5_star_large') my_prediction = gpt2.generate(sess, run_name='general_5_star_large', temperature=complexity, length=15, prefix=lang, sample_delim= '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1=res1, result2=res2, result3=res3) except Exception as e: return str(e) if __name__ == '__main__': app.run(debug=True) <|reserved_special_token_1|> from flask import Flask,Response,render_template,url_for,request,jsonify from flask_bootstrap import Bootstrap import pandas as pd import gpt_2_simple as gpt2 import json app = Flask(__name__) Bootstrap(app) #Main Page @app.route('/') def interactive_input(): return render_template('main.html') #Creating the different routes @app.route('/food_1_star') def food_1_star(): return render_template('food_1.html') @app.route('/food_5_star') def food_5_star(): return render_template('food_5.html') @app.route('/general_1_star') def general_1_star(): return render_template('general_1.html') @app.route('/general_5_star') def general_5_star(): return render_template('general_5.html') #Reactive function that will enable the code to run @app.route('/food_1') def food_1(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='food_1_star_large') my_prediction = gpt2.generate(sess, run_name= 'food_1_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1 = res1, result2 = res2, result3 = res3) except Exception as e: return str(e) #Reactive function that will enable the code to run @app.route('/food_5') def food_5(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='food_5_star_large') my_prediction = gpt2.generate(sess, run_name= 'food_5_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1 = res1, result2 = res2, result3 = res3) except Exception as e: return str(e) #Reactive function that will enable the code to run @app.route('/general_1') def general_1(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='general_1_star_large') my_prediction = gpt2.generate(sess, run_name= 'general_1_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1 = res1, result2 = res2, result3 = res3) except Exception as e: return str(e) #Reactive function that will enable the code to run @app.route('/general_5') def general_5(): try: lang = request.args.get('message', 0, type=str) complexity = request.args.get('complexity', 0, type=str) complexity = float(complexity) sess = gpt2.start_tf_sess() gpt2.load_gpt2(sess, run_name='general_5_star_large') my_prediction = gpt2.generate(sess, run_name= 'general_5_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True) res1 = str(my_prediction[0]).replace('<|endoftext|>', '') res2 = str(my_prediction[1]).replace('<|endoftext|>', '') res3 = str(my_prediction[2]).replace('<|endoftext|>', '') return jsonify(result1 = res1, result2 = res2, result3 = res3) except Exception as e: return str(e) if __name__ == '__main__': app.run(debug=True)
flexible
{ "blob_id": "1e41cc5d2661f1fb4f3a356318fabcb2b742cbdf", "index": 1826, "step-1": "<mask token>\n\n\[email protected]('/')\ndef interactive_input():\n return render_template('main.html')\n\n\[email protected]('/food_1_star')\ndef food_1_star():\n return render_template('food_1.html')\n\n\n<mask token>\n\n\[email protected]('/general_5_star')\ndef general_5_star():\n return render_template('general_5.html')\n\n\[email protected]('/food_1')\ndef food_1():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_1_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_1_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/food_5')\ndef food_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\n<mask token>\n\n\[email protected]('/general_5')\ndef general_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='general_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='general_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('/')\ndef interactive_input():\n return render_template('main.html')\n\n\[email protected]('/food_1_star')\ndef food_1_star():\n return render_template('food_1.html')\n\n\n<mask token>\n\n\[email protected]('/general_5_star')\ndef general_5_star():\n return render_template('general_5.html')\n\n\[email protected]('/food_1')\ndef food_1():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_1_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_1_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/food_5')\ndef food_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/general_1')\ndef general_1():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='general_1_star_large')\n my_prediction = gpt2.generate(sess, run_name='general_1_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/general_5')\ndef general_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='general_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='general_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\[email protected]('/')\ndef interactive_input():\n return render_template('main.html')\n\n\[email protected]('/food_1_star')\ndef food_1_star():\n return render_template('food_1.html')\n\n\[email protected]('/food_5_star')\ndef food_5_star():\n return render_template('food_5.html')\n\n\[email protected]('/general_1_star')\ndef general_1_star():\n return render_template('general_1.html')\n\n\[email protected]('/general_5_star')\ndef general_5_star():\n return render_template('general_5.html')\n\n\[email protected]('/food_1')\ndef food_1():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_1_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_1_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/food_5')\ndef food_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/general_1')\ndef general_1():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='general_1_star_large')\n my_prediction = gpt2.generate(sess, run_name='general_1_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/general_5')\ndef general_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='general_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='general_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\n<mask token>\n", "step-4": "<mask token>\napp = Flask(__name__)\nBootstrap(app)\n\n\[email protected]('/')\ndef interactive_input():\n return render_template('main.html')\n\n\[email protected]('/food_1_star')\ndef food_1_star():\n return render_template('food_1.html')\n\n\[email protected]('/food_5_star')\ndef food_5_star():\n return render_template('food_5.html')\n\n\[email protected]('/general_1_star')\ndef general_1_star():\n return render_template('general_1.html')\n\n\[email protected]('/general_5_star')\ndef general_5_star():\n return render_template('general_5.html')\n\n\[email protected]('/food_1')\ndef food_1():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_1_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_1_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/food_5')\ndef food_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='food_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='food_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/general_1')\ndef general_1():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='general_1_star_large')\n my_prediction = gpt2.generate(sess, run_name='general_1_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\[email protected]('/general_5')\ndef general_5():\n try:\n lang = request.args.get('message', 0, type=str)\n complexity = request.args.get('complexity', 0, type=str)\n complexity = float(complexity)\n sess = gpt2.start_tf_sess()\n gpt2.load_gpt2(sess, run_name='general_5_star_large')\n my_prediction = gpt2.generate(sess, run_name='general_5_star_large',\n temperature=complexity, length=15, prefix=lang, sample_delim=\n '<|endoftext|>', include_prefix=False, nsamples=3,\n return_as_list=True)\n res1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n res2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n res3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n return jsonify(result1=res1, result2=res2, result3=res3)\n except Exception as e:\n return str(e)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-5": "from flask import Flask,Response,render_template,url_for,request,jsonify\nfrom flask_bootstrap import Bootstrap\nimport pandas as pd \nimport gpt_2_simple as gpt2\nimport json\n\n\napp = Flask(__name__)\nBootstrap(app)\n\n#Main Page\[email protected]('/')\ndef interactive_input():\n\treturn render_template('main.html')\n\n#Creating the different routes\[email protected]('/food_1_star')\ndef food_1_star():\n\treturn render_template('food_1.html')\n\[email protected]('/food_5_star')\ndef food_5_star():\n\treturn render_template('food_5.html')\n\[email protected]('/general_1_star')\ndef general_1_star():\n\treturn render_template('general_1.html')\n\[email protected]('/general_5_star')\ndef general_5_star():\n\treturn render_template('general_5.html')\n\n#Reactive function that will enable the code to run \[email protected]('/food_1')\ndef food_1():\n\ttry:\n\t\tlang = request.args.get('message', 0, type=str)\n\t\tcomplexity = request.args.get('complexity', 0, type=str)\n\t\tcomplexity = float(complexity)\n\t\tsess = gpt2.start_tf_sess()\n\t\tgpt2.load_gpt2(sess, run_name='food_1_star_large')\n\t\tmy_prediction = gpt2.generate(sess, run_name= 'food_1_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)\n\t\tres1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n\t\tres2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n\t\tres3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n\t\treturn jsonify(result1 = res1, result2 = res2, result3 = res3)\n\texcept Exception as e:\n\t\treturn str(e)\n\n#Reactive function that will enable the code to run \[email protected]('/food_5')\ndef food_5():\n\ttry:\n\t\tlang = request.args.get('message', 0, type=str)\n\t\tcomplexity = request.args.get('complexity', 0, type=str)\n\t\tcomplexity = float(complexity)\n\t\tsess = gpt2.start_tf_sess()\n\t\tgpt2.load_gpt2(sess, run_name='food_5_star_large')\n\t\tmy_prediction = gpt2.generate(sess, run_name= 'food_5_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)\n\t\tres1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n\t\tres2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n\t\tres3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n\t\treturn jsonify(result1 = res1, result2 = res2, result3 = res3)\n\texcept Exception as e:\n\t\treturn str(e)\n\n#Reactive function that will enable the code to run \[email protected]('/general_1')\ndef general_1():\n\ttry:\n\t\tlang = request.args.get('message', 0, type=str)\n\t\tcomplexity = request.args.get('complexity', 0, type=str)\n\t\tcomplexity = float(complexity)\n\t\tsess = gpt2.start_tf_sess()\n\t\tgpt2.load_gpt2(sess, run_name='general_1_star_large')\n\t\tmy_prediction = gpt2.generate(sess, run_name= 'general_1_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)\n\t\tres1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n\t\tres2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n\t\tres3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n\t\treturn jsonify(result1 = res1, result2 = res2, result3 = res3)\n\texcept Exception as e:\n\t\treturn str(e)\n\n#Reactive function that will enable the code to run \[email protected]('/general_5')\ndef general_5():\n\ttry:\n\t\tlang = request.args.get('message', 0, type=str)\n\t\tcomplexity = request.args.get('complexity', 0, type=str)\n\t\tcomplexity = float(complexity)\n\t\tsess = gpt2.start_tf_sess()\n\t\tgpt2.load_gpt2(sess, run_name='general_5_star_large')\n\t\tmy_prediction = gpt2.generate(sess, run_name= 'general_5_star_large',temperature=complexity, length=15, prefix= lang, sample_delim = '<|endoftext|>', include_prefix=False, nsamples=3, return_as_list=True)\n\t\tres1 = str(my_prediction[0]).replace('<|endoftext|>', '')\n\t\tres2 = str(my_prediction[1]).replace('<|endoftext|>', '')\n\t\tres3 = str(my_prediction[2]).replace('<|endoftext|>', '')\n\t\treturn jsonify(result1 = res1, result2 = res2, result3 = res3)\n\texcept Exception as e:\n\t\treturn str(e)\n\nif __name__ == '__main__':\n\tapp.run(debug=True)", "step-ids": [ 6, 7, 9, 11, 13 ] }
[ 6, 7, 9, 11, 13 ]
from flask import Flask, app from flask_sqlalchemy import SQLAlchemy db = SQLAlchemy() DBNAME = 'database.db' def create_app(): app = Flask(__name__) app.config['SECRET_KEY'] = 'KARNISINGHSHEKHAWAT' app.config['SQLALCHEMY_DATABASE_URL'] = f'sqlite:///{DBNAME}' db.init_app(app) from .views import views from .auth import auth app.register_blueprint(views, urlprefix='/') app.register_blueprint(auth, urlprefix='/') return app
normal
{ "blob_id": "c6fdb9c405427a3583a59065f77c75c4aa781405", "index": 5417, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef create_app():\n app = Flask(__name__)\n app.config['SECRET_KEY'] = 'KARNISINGHSHEKHAWAT'\n app.config['SQLALCHEMY_DATABASE_URL'] = f'sqlite:///{DBNAME}'\n db.init_app(app)\n from .views import views\n from .auth import auth\n app.register_blueprint(views, urlprefix='/')\n app.register_blueprint(auth, urlprefix='/')\n return app\n", "step-3": "<mask token>\ndb = SQLAlchemy()\nDBNAME = 'database.db'\n\n\ndef create_app():\n app = Flask(__name__)\n app.config['SECRET_KEY'] = 'KARNISINGHSHEKHAWAT'\n app.config['SQLALCHEMY_DATABASE_URL'] = f'sqlite:///{DBNAME}'\n db.init_app(app)\n from .views import views\n from .auth import auth\n app.register_blueprint(views, urlprefix='/')\n app.register_blueprint(auth, urlprefix='/')\n return app\n", "step-4": "from flask import Flask, app\nfrom flask_sqlalchemy import SQLAlchemy\ndb = SQLAlchemy()\nDBNAME = 'database.db'\n\n\ndef create_app():\n app = Flask(__name__)\n app.config['SECRET_KEY'] = 'KARNISINGHSHEKHAWAT'\n app.config['SQLALCHEMY_DATABASE_URL'] = f'sqlite:///{DBNAME}'\n db.init_app(app)\n from .views import views\n from .auth import auth\n app.register_blueprint(views, urlprefix='/')\n app.register_blueprint(auth, urlprefix='/')\n return app\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def glInitYuvTargetEXT(): """Return boolean indicating whether this extension is available""" from OpenGL import extensions return extensions.hasGLExtension(_EXTENSION_NAME) <|reserved_special_token_1|> <|reserved_special_token_0|> from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GLES2 import _types, _glgets from OpenGL.raw.GLES2.EXT.YUV_target import * from OpenGL.raw.GLES2.EXT.YUV_target import _EXTENSION_NAME def glInitYuvTargetEXT(): """Return boolean indicating whether this extension is available""" from OpenGL import extensions return extensions.hasGLExtension(_EXTENSION_NAME) <|reserved_special_token_1|> '''OpenGL extension EXT.YUV_target This module customises the behaviour of the OpenGL.raw.GLES2.EXT.YUV_target to provide a more Python-friendly API Overview (from the spec) This extension adds support for three new YUV related items: first rendering to YUV images, second sampling from YUV images while keeping the data in YUV space, third it defines a new built in function that does conversion from RGB to YUV with controls to choose ITU-R BT.601-7, ITU-R BT.601-7 Full range (JFIF images), or ITU-R BT.709-5 standard. This new functionality is layered on top of the OES_EGL_image_external extension. To perform the YUV rendering capability in this extension an application will attach a texture to the framebuffer object as the color attachment. If the texture has a target type of TEXTURE_EXTERNAL_OES with YUV color format then the GL driver can use this framebuffer object as the render target, TEXTURE_EXTERNAL_OES target with RGB color format are not allowed with this extension. The official definition of this extension is available here: http://www.opengl.org/registry/specs/EXT/YUV_target.txt ''' from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GLES2 import _types, _glgets from OpenGL.raw.GLES2.EXT.YUV_target import * from OpenGL.raw.GLES2.EXT.YUV_target import _EXTENSION_NAME def glInitYuvTargetEXT(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) ### END AUTOGENERATED SECTION
flexible
{ "blob_id": "08420d31713859946b2f19cebf68c333331cb80e", "index": 1494, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef glInitYuvTargetEXT():\n \"\"\"Return boolean indicating whether this extension is available\"\"\"\n from OpenGL import extensions\n return extensions.hasGLExtension(_EXTENSION_NAME)\n", "step-3": "<mask token>\nfrom OpenGL import platform, constant, arrays\nfrom OpenGL import extensions, wrapper\nimport ctypes\nfrom OpenGL.raw.GLES2 import _types, _glgets\nfrom OpenGL.raw.GLES2.EXT.YUV_target import *\nfrom OpenGL.raw.GLES2.EXT.YUV_target import _EXTENSION_NAME\n\n\ndef glInitYuvTargetEXT():\n \"\"\"Return boolean indicating whether this extension is available\"\"\"\n from OpenGL import extensions\n return extensions.hasGLExtension(_EXTENSION_NAME)\n", "step-4": "'''OpenGL extension EXT.YUV_target\n\nThis module customises the behaviour of the \nOpenGL.raw.GLES2.EXT.YUV_target to provide a more \nPython-friendly API\n\nOverview (from the spec)\n\t\n\tThis extension adds support for three new YUV related items: first\n\trendering to YUV images, second sampling from YUV images while keeping the\n\tdata in YUV space, third it defines a new built in function that does\n\tconversion from RGB to YUV with controls to choose ITU-R BT.601-7,\n\tITU-R BT.601-7 Full range (JFIF images), or ITU-R BT.709-5 standard.\n\t\n\tThis new functionality is layered on top of the OES_EGL_image_external\n\textension.\n\t\n\tTo perform the YUV rendering capability in this extension an application\n\twill attach a texture to the framebuffer object as the color attachment.\n\tIf the texture has a target type of TEXTURE_EXTERNAL_OES with YUV color\n\tformat then the GL driver can use this framebuffer object as the render\n\ttarget, TEXTURE_EXTERNAL_OES target with RGB color format are not allowed\n\twith this extension.\n\nThe official definition of this extension is available here:\nhttp://www.opengl.org/registry/specs/EXT/YUV_target.txt\n'''\nfrom OpenGL import platform, constant, arrays\nfrom OpenGL import extensions, wrapper\nimport ctypes\nfrom OpenGL.raw.GLES2 import _types, _glgets\nfrom OpenGL.raw.GLES2.EXT.YUV_target import *\nfrom OpenGL.raw.GLES2.EXT.YUV_target import _EXTENSION_NAME\n\ndef glInitYuvTargetEXT():\n '''Return boolean indicating whether this extension is available'''\n from OpenGL import extensions\n return extensions.hasGLExtension( _EXTENSION_NAME )\n\n\n### END AUTOGENERATED SECTION", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for i in range(N): c, t = map(int, input().split()) if nm > c and T >= t: nm = c if nm == 1000000: print('TLE') else: print(nm) <|reserved_special_token_1|> N, T = map(int, input().split()) nm = 1000000 for i in range(N): c, t = map(int, input().split()) if nm > c and T >= t: nm = c if nm == 1000000: print('TLE') else: print(nm) <|reserved_special_token_1|> N,T=map(int,input().split()) nm=1000000 for i in range(N): c,t=map(int,input().split()) if nm>c and T>=t: nm=c if nm==1000000: print("TLE") else: print(nm)
flexible
{ "blob_id": "8a0e781f29c426161240e33b9d2adc7537b3d352", "index": 2513, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(N):\n c, t = map(int, input().split())\n if nm > c and T >= t:\n nm = c\nif nm == 1000000:\n print('TLE')\nelse:\n print(nm)\n", "step-3": "N, T = map(int, input().split())\nnm = 1000000\nfor i in range(N):\n c, t = map(int, input().split())\n if nm > c and T >= t:\n nm = c\nif nm == 1000000:\n print('TLE')\nelse:\n print(nm)\n", "step-4": "N,T=map(int,input().split())\nnm=1000000\nfor i in range(N):\n c,t=map(int,input().split())\n if nm>c and T>=t:\n nm=c\nif nm==1000000:\n print(\"TLE\")\nelse:\n print(nm)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
def test_{{ project_name }}(): assert True
normal
{ "blob_id": "1c1f1dab1ae2e8f18536784a5dec9de37c8a8582", "index": 3995, "step-1": "def test_{{ project_name }}():\n assert True\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @pytest.mark.parametrize('path', glob.glob(join(data_dir('structure'), '*.cif'))) def test_array_conversion(path): pdbx_file = pdbx.PDBxFile.read(path) ref_structure = pdbx.get_structure(pdbx_file, model=1, extra_fields=[ 'charge']) ref_structure.bonds = struc.connect_via_residue_names(ref_structure) pdbqt_file = pdbqt.PDBQTFile() with warnings.catch_warnings(): warnings.simplefilter('ignore') mask = pdbqt.set_structure(pdbqt_file, ref_structure) ref_structure = ref_structure[mask] temp = TemporaryFile('r+') pdbqt_file.write(temp) temp.seek(0) pdbqt_file = pdbqt.PDBQTFile.read(temp) test_structure = pdbqt.get_structure(pdbqt_file, model=1) temp.close() assert np.allclose(test_structure.coord, ref_structure.coord) for category in test_structure.get_annotation_categories(): if category == 'element': continue try: assert np.array_equal(test_structure.get_annotation(category), ref_structure.get_annotation(category)) except AssertionError: print(f"Inequality in '{category}' category") raise <|reserved_special_token_1|> import warnings from tempfile import TemporaryFile import glob from os.path import join import pytest import numpy as np import biotite.structure as struc import biotite.structure.io.pdbqt as pdbqt import biotite.structure.io.pdbx as pdbx from ..util import data_dir @pytest.mark.parametrize('path', glob.glob(join(data_dir('structure'), '*.cif'))) def test_array_conversion(path): pdbx_file = pdbx.PDBxFile.read(path) ref_structure = pdbx.get_structure(pdbx_file, model=1, extra_fields=[ 'charge']) ref_structure.bonds = struc.connect_via_residue_names(ref_structure) pdbqt_file = pdbqt.PDBQTFile() with warnings.catch_warnings(): warnings.simplefilter('ignore') mask = pdbqt.set_structure(pdbqt_file, ref_structure) ref_structure = ref_structure[mask] temp = TemporaryFile('r+') pdbqt_file.write(temp) temp.seek(0) pdbqt_file = pdbqt.PDBQTFile.read(temp) test_structure = pdbqt.get_structure(pdbqt_file, model=1) temp.close() assert np.allclose(test_structure.coord, ref_structure.coord) for category in test_structure.get_annotation_categories(): if category == 'element': continue try: assert np.array_equal(test_structure.get_annotation(category), ref_structure.get_annotation(category)) except AssertionError: print(f"Inequality in '{category}' category") raise <|reserved_special_token_1|> # This source code is part of the Biotite package and is distributed # under the 3-Clause BSD License. Please see 'LICENSE.rst' for further # information. import warnings from tempfile import TemporaryFile import glob from os.path import join import pytest import numpy as np import biotite.structure as struc import biotite.structure.io.pdbqt as pdbqt import biotite.structure.io.pdbx as pdbx from ..util import data_dir @pytest.mark.parametrize( "path", glob.glob(join(data_dir("structure"), "*.cif")) ) def test_array_conversion(path): pdbx_file = pdbx.PDBxFile.read(path) ref_structure = pdbx.get_structure( pdbx_file, model=1, extra_fields=["charge"] ) ref_structure.bonds = struc.connect_via_residue_names(ref_structure) pdbqt_file = pdbqt.PDBQTFile() with warnings.catch_warnings(): warnings.simplefilter("ignore") # Ignore warnings about atoms not parametrized mask = pdbqt.set_structure(pdbqt_file, ref_structure) ref_structure = ref_structure[mask] temp = TemporaryFile("r+") pdbqt_file.write(temp) temp.seek(0) pdbqt_file = pdbqt.PDBQTFile.read(temp) test_structure = pdbqt.get_structure(pdbqt_file, model=1) temp.close() assert np.allclose(test_structure.coord, ref_structure.coord) for category in test_structure.get_annotation_categories(): if category == "element": # PDBQT uses special atom types, which replace the usual # elements # -> there cannot be equality of the 'element' annotation continue try: assert np.array_equal( test_structure.get_annotation(category), ref_structure.get_annotation(category) ) except AssertionError: print(f"Inequality in '{category}' category") raise
flexible
{ "blob_id": "cc637d14ce2106fcc3b8bbb54e497691e72a3f65", "index": 2858, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('path', glob.glob(join(data_dir('structure'),\n '*.cif')))\ndef test_array_conversion(path):\n pdbx_file = pdbx.PDBxFile.read(path)\n ref_structure = pdbx.get_structure(pdbx_file, model=1, extra_fields=[\n 'charge'])\n ref_structure.bonds = struc.connect_via_residue_names(ref_structure)\n pdbqt_file = pdbqt.PDBQTFile()\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n mask = pdbqt.set_structure(pdbqt_file, ref_structure)\n ref_structure = ref_structure[mask]\n temp = TemporaryFile('r+')\n pdbqt_file.write(temp)\n temp.seek(0)\n pdbqt_file = pdbqt.PDBQTFile.read(temp)\n test_structure = pdbqt.get_structure(pdbqt_file, model=1)\n temp.close()\n assert np.allclose(test_structure.coord, ref_structure.coord)\n for category in test_structure.get_annotation_categories():\n if category == 'element':\n continue\n try:\n assert np.array_equal(test_structure.get_annotation(category),\n ref_structure.get_annotation(category))\n except AssertionError:\n print(f\"Inequality in '{category}' category\")\n raise\n", "step-3": "import warnings\nfrom tempfile import TemporaryFile\nimport glob\nfrom os.path import join\nimport pytest\nimport numpy as np\nimport biotite.structure as struc\nimport biotite.structure.io.pdbqt as pdbqt\nimport biotite.structure.io.pdbx as pdbx\nfrom ..util import data_dir\n\n\[email protected]('path', glob.glob(join(data_dir('structure'),\n '*.cif')))\ndef test_array_conversion(path):\n pdbx_file = pdbx.PDBxFile.read(path)\n ref_structure = pdbx.get_structure(pdbx_file, model=1, extra_fields=[\n 'charge'])\n ref_structure.bonds = struc.connect_via_residue_names(ref_structure)\n pdbqt_file = pdbqt.PDBQTFile()\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n mask = pdbqt.set_structure(pdbqt_file, ref_structure)\n ref_structure = ref_structure[mask]\n temp = TemporaryFile('r+')\n pdbqt_file.write(temp)\n temp.seek(0)\n pdbqt_file = pdbqt.PDBQTFile.read(temp)\n test_structure = pdbqt.get_structure(pdbqt_file, model=1)\n temp.close()\n assert np.allclose(test_structure.coord, ref_structure.coord)\n for category in test_structure.get_annotation_categories():\n if category == 'element':\n continue\n try:\n assert np.array_equal(test_structure.get_annotation(category),\n ref_structure.get_annotation(category))\n except AssertionError:\n print(f\"Inequality in '{category}' category\")\n raise\n", "step-4": "# This source code is part of the Biotite package and is distributed\n# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further\n# information.\n\nimport warnings\nfrom tempfile import TemporaryFile\nimport glob\nfrom os.path import join\nimport pytest\nimport numpy as np\nimport biotite.structure as struc\nimport biotite.structure.io.pdbqt as pdbqt\nimport biotite.structure.io.pdbx as pdbx\nfrom ..util import data_dir\n\n\[email protected](\n \"path\", glob.glob(join(data_dir(\"structure\"), \"*.cif\"))\n)\ndef test_array_conversion(path):\n pdbx_file = pdbx.PDBxFile.read(path)\n ref_structure = pdbx.get_structure(\n pdbx_file, model=1, extra_fields=[\"charge\"]\n )\n ref_structure.bonds = struc.connect_via_residue_names(ref_structure)\n\n pdbqt_file = pdbqt.PDBQTFile()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n # Ignore warnings about atoms not parametrized \n mask = pdbqt.set_structure(pdbqt_file, ref_structure)\n ref_structure = ref_structure[mask]\n temp = TemporaryFile(\"r+\")\n pdbqt_file.write(temp)\n\n temp.seek(0)\n pdbqt_file = pdbqt.PDBQTFile.read(temp)\n test_structure = pdbqt.get_structure(pdbqt_file, model=1)\n temp.close()\n\n assert np.allclose(test_structure.coord, ref_structure.coord)\n for category in test_structure.get_annotation_categories():\n if category == \"element\":\n # PDBQT uses special atom types, which replace the usual\n # elements\n # -> there cannot be equality of the 'element' annotation\n continue\n try:\n assert np.array_equal(\n test_structure.get_annotation(category),\n ref_structure.get_annotation(category)\n )\n except AssertionError:\n print(f\"Inequality in '{category}' category\")\n raise\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/python try: from Queue import Queue except ImportError: # Python 3 from queue import Queue class BFSWithQueue: """Breadth-First Search. Attributes ---------- graph : input graph color : dict with nodes, private distance : dict with nodes (distances to source node) parent : dict (BFS tree) dag : graph (BFS tree) Examples -------- >>> from graphtheory.structures.edges import Edge >>> from graphtheory.structures.graphs import Graph >>> from graphtheory.traversing.bfs import BFSWithQueue >>> G = Graph(n=10, False) # an exemplary undirected graph # Add nodes and edges here. >>> order = list() >>> algorithm = BFSWithQueue(G) >>> algorithm.run(source=0, pre_action=lambda node: order.append(node)) >>> order # visited nodes >>> algorithm.distance[target] # distance from source to target >>> algorithm.parent # BFS tree as a dict >>> algorithm.dag # BFS tree as a directed graph >>> algorithm.path(source, target) Notes ----- Based on: Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009, Introduction to Algorithms, third edition, The MIT Press, Cambridge, London. https://en.wikipedia.org/wiki/Breadth-first_search """ def __init__(self, graph): """The algorithm initialization.""" self.graph = graph self.color = dict(((node, "WHITE") for node in self.graph.iternodes())) self.distance = dict(((node, float("inf")) for node in self.graph.iternodes())) self.parent = dict(((node, None) for node in self.graph.iternodes())) self.dag = self.graph.__class__(self.graph.v(), directed=True) for node in self.graph.iternodes(): # isolated nodes are possible self.dag.add_node(node) def run(self, source=None, pre_action=None, post_action=None): """Executable pseudocode.""" if source is not None: self._visit(source, pre_action, post_action) else: for node in self.graph.iternodes(): if self.color[node] == "WHITE": self._visit(node, pre_action, post_action) def _visit(self, node, pre_action=None, post_action=None): """Explore the connected component.""" self.color[node] = "GREY" self.distance[node] = 0 self.parent[node] = None Q = Queue() Q.put(node) # node is GREY if pre_action: # when Q.put pre_action(node) while not Q.empty(): source = Q.get() for edge in self.graph.iteroutedges(source): if self.color[edge.target] == "WHITE": self.color[edge.target] = "GREY" self.distance[edge.target] = self.distance[source] + 1 self.parent[edge.target] = source self.dag.add_edge(edge) Q.put(edge.target) # target is GREY if pre_action: # when Q.put pre_action(edge.target) self.color[source] = "BLACK" if post_action: # source became BLACK post_action(source) def path(self, source, target): """Construct a path from source to target.""" if source == target: return [source] elif self.parent[target] is None: raise ValueError("no path to target") else: return self.path(source, self.parent[target]) + [target] class SimpleBFS: """Breadth-First Search. Attributes ---------- graph : input graph parent : dict (BFS tree) dag : graph (BFS tree) Examples -------- >>> from graphtheory.structures.edges import Edge >>> from graphtheory.structures.graphs import Graph >>> from graphtheory.traversing.bfs import SimpleBFS >>> G = Graph(n=10, False) # an exemplary undirected graph # Add nodes and edges here. >>> order = list() >>> algorithm = SimpleBFS(G) >>> algorithm.run(source=0, pre_action=lambda node: order.append(node)) >>> order # visited nodes >>> algorithm.parent # BFS tree as a dict >>> algorithm.dag # BFS tree as a directed graph >>> algorithm.path(source, target) Notes ----- Based on: Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009, Introduction to Algorithms, third edition, The MIT Press, Cambridge, London. https://en.wikipedia.org/wiki/Breadth-first_search """ def __init__(self, graph): """The algorithm initialization.""" self.graph = graph self.parent = dict() self.dag = self.graph.__class__(self.graph.v(), directed=True) for node in self.graph.iternodes(): # isolated nodes are possible self.dag.add_node(node) def run(self, source=None, pre_action=None, post_action=None): """Executable pseudocode.""" if source is not None: self._visit(source, pre_action, post_action) else: for node in self.graph.iternodes(): if node not in self.parent: self._visit(node, pre_action, post_action) def _visit(self, node, pre_action=None, post_action=None): """Explore the connected component.""" Q = Queue() self.parent[node] = None # before Q.put Q.put(node) if pre_action: # when Q.put pre_action(node) while not Q.empty(): source = Q.get() for edge in self.graph.iteroutedges(source): if edge.target not in self.parent: self.parent[edge.target] = source # before Q.put self.dag.add_edge(edge) Q.put(edge.target) if pre_action: # when Q.put pre_action(edge.target) if post_action: post_action(source) def path(self, source, target): """Construct a path from source to target.""" if source == target: return [source] elif self.parent[target] is None: raise ValueError("no path to target") else: return self.path(source, self.parent[target]) + [target] # EOF
normal
{ "blob_id": "0bce5d590b96e434cd8aee7531a321bc648c1981", "index": 8722, "step-1": "<mask token>\n\n\nclass BFSWithQueue:\n <mask token>\n <mask token>\n\n def run(self, source=None, pre_action=None, post_action=None):\n \"\"\"Executable pseudocode.\"\"\"\n if source is not None:\n self._visit(source, pre_action, post_action)\n else:\n for node in self.graph.iternodes():\n if self.color[node] == 'WHITE':\n self._visit(node, pre_action, post_action)\n <mask token>\n <mask token>\n\n\nclass SimpleBFS:\n \"\"\"Breadth-First Search.\n \n Attributes\n ----------\n graph : input graph\n parent : dict (BFS tree)\n dag : graph (BFS tree)\n \n Examples\n --------\n >>> from graphtheory.structures.edges import Edge\n >>> from graphtheory.structures.graphs import Graph\n >>> from graphtheory.traversing.bfs import SimpleBFS\n >>> G = Graph(n=10, False) # an exemplary undirected graph\n # Add nodes and edges here.\n >>> order = list()\n >>> algorithm = SimpleBFS(G)\n >>> algorithm.run(source=0, pre_action=lambda node: order.append(node))\n >>> order # visited nodes\n >>> algorithm.parent # BFS tree as a dict\n >>> algorithm.dag # BFS tree as a directed graph\n >>> algorithm.path(source, target)\n \n Notes\n -----\n Based on:\n \n Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009, \n Introduction to Algorithms, third edition, The MIT Press, \n Cambridge, London.\n \n https://en.wikipedia.org/wiki/Breadth-first_search\n \"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n self.graph = graph\n self.parent = dict()\n self.dag = self.graph.__class__(self.graph.v(), directed=True)\n for node in self.graph.iternodes():\n self.dag.add_node(node)\n\n def run(self, source=None, pre_action=None, post_action=None):\n \"\"\"Executable pseudocode.\"\"\"\n if source is not None:\n self._visit(source, pre_action, post_action)\n else:\n for node in self.graph.iternodes():\n if node not in self.parent:\n self._visit(node, pre_action, post_action)\n\n def _visit(self, node, pre_action=None, post_action=None):\n \"\"\"Explore the connected component.\"\"\"\n Q = Queue()\n self.parent[node] = None\n Q.put(node)\n if pre_action:\n pre_action(node)\n while not Q.empty():\n source = Q.get()\n for edge in self.graph.iteroutedges(source):\n if edge.target not in self.parent:\n self.parent[edge.target] = source\n self.dag.add_edge(edge)\n Q.put(edge.target)\n if pre_action:\n pre_action(edge.target)\n if post_action:\n post_action(source)\n\n def path(self, source, target):\n \"\"\"Construct a path from source to target.\"\"\"\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError('no path to target')\n else:\n return self.path(source, self.parent[target]) + [target]\n", "step-2": "<mask token>\n\n\nclass BFSWithQueue:\n <mask token>\n <mask token>\n\n def run(self, source=None, pre_action=None, post_action=None):\n \"\"\"Executable pseudocode.\"\"\"\n if source is not None:\n self._visit(source, pre_action, post_action)\n else:\n for node in self.graph.iternodes():\n if self.color[node] == 'WHITE':\n self._visit(node, pre_action, post_action)\n\n def _visit(self, node, pre_action=None, post_action=None):\n \"\"\"Explore the connected component.\"\"\"\n self.color[node] = 'GREY'\n self.distance[node] = 0\n self.parent[node] = None\n Q = Queue()\n Q.put(node)\n if pre_action:\n pre_action(node)\n while not Q.empty():\n source = Q.get()\n for edge in self.graph.iteroutedges(source):\n if self.color[edge.target] == 'WHITE':\n self.color[edge.target] = 'GREY'\n self.distance[edge.target] = self.distance[source] + 1\n self.parent[edge.target] = source\n self.dag.add_edge(edge)\n Q.put(edge.target)\n if pre_action:\n pre_action(edge.target)\n self.color[source] = 'BLACK'\n if post_action:\n post_action(source)\n\n def path(self, source, target):\n \"\"\"Construct a path from source to target.\"\"\"\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError('no path to target')\n else:\n return self.path(source, self.parent[target]) + [target]\n\n\nclass SimpleBFS:\n \"\"\"Breadth-First Search.\n \n Attributes\n ----------\n graph : input graph\n parent : dict (BFS tree)\n dag : graph (BFS tree)\n \n Examples\n --------\n >>> from graphtheory.structures.edges import Edge\n >>> from graphtheory.structures.graphs import Graph\n >>> from graphtheory.traversing.bfs import SimpleBFS\n >>> G = Graph(n=10, False) # an exemplary undirected graph\n # Add nodes and edges here.\n >>> order = list()\n >>> algorithm = SimpleBFS(G)\n >>> algorithm.run(source=0, pre_action=lambda node: order.append(node))\n >>> order # visited nodes\n >>> algorithm.parent # BFS tree as a dict\n >>> algorithm.dag # BFS tree as a directed graph\n >>> algorithm.path(source, target)\n \n Notes\n -----\n Based on:\n \n Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009, \n Introduction to Algorithms, third edition, The MIT Press, \n Cambridge, London.\n \n https://en.wikipedia.org/wiki/Breadth-first_search\n \"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n self.graph = graph\n self.parent = dict()\n self.dag = self.graph.__class__(self.graph.v(), directed=True)\n for node in self.graph.iternodes():\n self.dag.add_node(node)\n\n def run(self, source=None, pre_action=None, post_action=None):\n \"\"\"Executable pseudocode.\"\"\"\n if source is not None:\n self._visit(source, pre_action, post_action)\n else:\n for node in self.graph.iternodes():\n if node not in self.parent:\n self._visit(node, pre_action, post_action)\n\n def _visit(self, node, pre_action=None, post_action=None):\n \"\"\"Explore the connected component.\"\"\"\n Q = Queue()\n self.parent[node] = None\n Q.put(node)\n if pre_action:\n pre_action(node)\n while not Q.empty():\n source = Q.get()\n for edge in self.graph.iteroutedges(source):\n if edge.target not in self.parent:\n self.parent[edge.target] = source\n self.dag.add_edge(edge)\n Q.put(edge.target)\n if pre_action:\n pre_action(edge.target)\n if post_action:\n post_action(source)\n\n def path(self, source, target):\n \"\"\"Construct a path from source to target.\"\"\"\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError('no path to target')\n else:\n return self.path(source, self.parent[target]) + [target]\n", "step-3": "<mask token>\n\n\nclass BFSWithQueue:\n <mask token>\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n self.graph = graph\n self.color = dict((node, 'WHITE') for node in self.graph.iternodes())\n self.distance = dict((node, float('inf')) for node in self.graph.\n iternodes())\n self.parent = dict((node, None) for node in self.graph.iternodes())\n self.dag = self.graph.__class__(self.graph.v(), directed=True)\n for node in self.graph.iternodes():\n self.dag.add_node(node)\n\n def run(self, source=None, pre_action=None, post_action=None):\n \"\"\"Executable pseudocode.\"\"\"\n if source is not None:\n self._visit(source, pre_action, post_action)\n else:\n for node in self.graph.iternodes():\n if self.color[node] == 'WHITE':\n self._visit(node, pre_action, post_action)\n\n def _visit(self, node, pre_action=None, post_action=None):\n \"\"\"Explore the connected component.\"\"\"\n self.color[node] = 'GREY'\n self.distance[node] = 0\n self.parent[node] = None\n Q = Queue()\n Q.put(node)\n if pre_action:\n pre_action(node)\n while not Q.empty():\n source = Q.get()\n for edge in self.graph.iteroutedges(source):\n if self.color[edge.target] == 'WHITE':\n self.color[edge.target] = 'GREY'\n self.distance[edge.target] = self.distance[source] + 1\n self.parent[edge.target] = source\n self.dag.add_edge(edge)\n Q.put(edge.target)\n if pre_action:\n pre_action(edge.target)\n self.color[source] = 'BLACK'\n if post_action:\n post_action(source)\n\n def path(self, source, target):\n \"\"\"Construct a path from source to target.\"\"\"\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError('no path to target')\n else:\n return self.path(source, self.parent[target]) + [target]\n\n\nclass SimpleBFS:\n \"\"\"Breadth-First Search.\n \n Attributes\n ----------\n graph : input graph\n parent : dict (BFS tree)\n dag : graph (BFS tree)\n \n Examples\n --------\n >>> from graphtheory.structures.edges import Edge\n >>> from graphtheory.structures.graphs import Graph\n >>> from graphtheory.traversing.bfs import SimpleBFS\n >>> G = Graph(n=10, False) # an exemplary undirected graph\n # Add nodes and edges here.\n >>> order = list()\n >>> algorithm = SimpleBFS(G)\n >>> algorithm.run(source=0, pre_action=lambda node: order.append(node))\n >>> order # visited nodes\n >>> algorithm.parent # BFS tree as a dict\n >>> algorithm.dag # BFS tree as a directed graph\n >>> algorithm.path(source, target)\n \n Notes\n -----\n Based on:\n \n Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009, \n Introduction to Algorithms, third edition, The MIT Press, \n Cambridge, London.\n \n https://en.wikipedia.org/wiki/Breadth-first_search\n \"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n self.graph = graph\n self.parent = dict()\n self.dag = self.graph.__class__(self.graph.v(), directed=True)\n for node in self.graph.iternodes():\n self.dag.add_node(node)\n\n def run(self, source=None, pre_action=None, post_action=None):\n \"\"\"Executable pseudocode.\"\"\"\n if source is not None:\n self._visit(source, pre_action, post_action)\n else:\n for node in self.graph.iternodes():\n if node not in self.parent:\n self._visit(node, pre_action, post_action)\n\n def _visit(self, node, pre_action=None, post_action=None):\n \"\"\"Explore the connected component.\"\"\"\n Q = Queue()\n self.parent[node] = None\n Q.put(node)\n if pre_action:\n pre_action(node)\n while not Q.empty():\n source = Q.get()\n for edge in self.graph.iteroutedges(source):\n if edge.target not in self.parent:\n self.parent[edge.target] = source\n self.dag.add_edge(edge)\n Q.put(edge.target)\n if pre_action:\n pre_action(edge.target)\n if post_action:\n post_action(source)\n\n def path(self, source, target):\n \"\"\"Construct a path from source to target.\"\"\"\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError('no path to target')\n else:\n return self.path(source, self.parent[target]) + [target]\n", "step-4": "<mask token>\n\n\nclass BFSWithQueue:\n \"\"\"Breadth-First Search.\n \n Attributes\n ----------\n graph : input graph\n color : dict with nodes, private\n distance : dict with nodes (distances to source node)\n parent : dict (BFS tree)\n dag : graph (BFS tree)\n \n Examples\n --------\n >>> from graphtheory.structures.edges import Edge\n >>> from graphtheory.structures.graphs import Graph\n >>> from graphtheory.traversing.bfs import BFSWithQueue\n >>> G = Graph(n=10, False) # an exemplary undirected graph\n # Add nodes and edges here.\n >>> order = list()\n >>> algorithm = BFSWithQueue(G)\n >>> algorithm.run(source=0, pre_action=lambda node: order.append(node))\n >>> order # visited nodes\n >>> algorithm.distance[target] # distance from source to target\n >>> algorithm.parent # BFS tree as a dict\n >>> algorithm.dag # BFS tree as a directed graph\n >>> algorithm.path(source, target)\n \n Notes\n -----\n Based on:\n \n Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009, \n Introduction to Algorithms, third edition, The MIT Press, \n Cambridge, London.\n \n https://en.wikipedia.org/wiki/Breadth-first_search\n \"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n self.graph = graph\n self.color = dict((node, 'WHITE') for node in self.graph.iternodes())\n self.distance = dict((node, float('inf')) for node in self.graph.\n iternodes())\n self.parent = dict((node, None) for node in self.graph.iternodes())\n self.dag = self.graph.__class__(self.graph.v(), directed=True)\n for node in self.graph.iternodes():\n self.dag.add_node(node)\n\n def run(self, source=None, pre_action=None, post_action=None):\n \"\"\"Executable pseudocode.\"\"\"\n if source is not None:\n self._visit(source, pre_action, post_action)\n else:\n for node in self.graph.iternodes():\n if self.color[node] == 'WHITE':\n self._visit(node, pre_action, post_action)\n\n def _visit(self, node, pre_action=None, post_action=None):\n \"\"\"Explore the connected component.\"\"\"\n self.color[node] = 'GREY'\n self.distance[node] = 0\n self.parent[node] = None\n Q = Queue()\n Q.put(node)\n if pre_action:\n pre_action(node)\n while not Q.empty():\n source = Q.get()\n for edge in self.graph.iteroutedges(source):\n if self.color[edge.target] == 'WHITE':\n self.color[edge.target] = 'GREY'\n self.distance[edge.target] = self.distance[source] + 1\n self.parent[edge.target] = source\n self.dag.add_edge(edge)\n Q.put(edge.target)\n if pre_action:\n pre_action(edge.target)\n self.color[source] = 'BLACK'\n if post_action:\n post_action(source)\n\n def path(self, source, target):\n \"\"\"Construct a path from source to target.\"\"\"\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError('no path to target')\n else:\n return self.path(source, self.parent[target]) + [target]\n\n\nclass SimpleBFS:\n \"\"\"Breadth-First Search.\n \n Attributes\n ----------\n graph : input graph\n parent : dict (BFS tree)\n dag : graph (BFS tree)\n \n Examples\n --------\n >>> from graphtheory.structures.edges import Edge\n >>> from graphtheory.structures.graphs import Graph\n >>> from graphtheory.traversing.bfs import SimpleBFS\n >>> G = Graph(n=10, False) # an exemplary undirected graph\n # Add nodes and edges here.\n >>> order = list()\n >>> algorithm = SimpleBFS(G)\n >>> algorithm.run(source=0, pre_action=lambda node: order.append(node))\n >>> order # visited nodes\n >>> algorithm.parent # BFS tree as a dict\n >>> algorithm.dag # BFS tree as a directed graph\n >>> algorithm.path(source, target)\n \n Notes\n -----\n Based on:\n \n Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009, \n Introduction to Algorithms, third edition, The MIT Press, \n Cambridge, London.\n \n https://en.wikipedia.org/wiki/Breadth-first_search\n \"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n self.graph = graph\n self.parent = dict()\n self.dag = self.graph.__class__(self.graph.v(), directed=True)\n for node in self.graph.iternodes():\n self.dag.add_node(node)\n\n def run(self, source=None, pre_action=None, post_action=None):\n \"\"\"Executable pseudocode.\"\"\"\n if source is not None:\n self._visit(source, pre_action, post_action)\n else:\n for node in self.graph.iternodes():\n if node not in self.parent:\n self._visit(node, pre_action, post_action)\n\n def _visit(self, node, pre_action=None, post_action=None):\n \"\"\"Explore the connected component.\"\"\"\n Q = Queue()\n self.parent[node] = None\n Q.put(node)\n if pre_action:\n pre_action(node)\n while not Q.empty():\n source = Q.get()\n for edge in self.graph.iteroutedges(source):\n if edge.target not in self.parent:\n self.parent[edge.target] = source\n self.dag.add_edge(edge)\n Q.put(edge.target)\n if pre_action:\n pre_action(edge.target)\n if post_action:\n post_action(source)\n\n def path(self, source, target):\n \"\"\"Construct a path from source to target.\"\"\"\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError('no path to target')\n else:\n return self.path(source, self.parent[target]) + [target]\n", "step-5": "#!/usr/bin/python\n\ntry:\n from Queue import Queue\nexcept ImportError: # Python 3\n from queue import Queue\n\n\nclass BFSWithQueue:\n \"\"\"Breadth-First Search.\n \n Attributes\n ----------\n graph : input graph\n color : dict with nodes, private\n distance : dict with nodes (distances to source node)\n parent : dict (BFS tree)\n dag : graph (BFS tree)\n \n Examples\n --------\n >>> from graphtheory.structures.edges import Edge\n >>> from graphtheory.structures.graphs import Graph\n >>> from graphtheory.traversing.bfs import BFSWithQueue\n >>> G = Graph(n=10, False) # an exemplary undirected graph\n # Add nodes and edges here.\n >>> order = list()\n >>> algorithm = BFSWithQueue(G)\n >>> algorithm.run(source=0, pre_action=lambda node: order.append(node))\n >>> order # visited nodes\n >>> algorithm.distance[target] # distance from source to target\n >>> algorithm.parent # BFS tree as a dict\n >>> algorithm.dag # BFS tree as a directed graph\n >>> algorithm.path(source, target)\n \n Notes\n -----\n Based on:\n \n Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009, \n Introduction to Algorithms, third edition, The MIT Press, \n Cambridge, London.\n \n https://en.wikipedia.org/wiki/Breadth-first_search\n \"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n self.graph = graph\n self.color = dict(((node, \"WHITE\") for node in self.graph.iternodes()))\n self.distance = dict(((node, float(\"inf\")) for node in self.graph.iternodes()))\n self.parent = dict(((node, None) for node in self.graph.iternodes()))\n self.dag = self.graph.__class__(self.graph.v(), directed=True)\n for node in self.graph.iternodes(): # isolated nodes are possible\n self.dag.add_node(node)\n\n def run(self, source=None, pre_action=None, post_action=None):\n \"\"\"Executable pseudocode.\"\"\"\n if source is not None:\n self._visit(source, pre_action, post_action)\n else:\n for node in self.graph.iternodes():\n if self.color[node] == \"WHITE\":\n self._visit(node, pre_action, post_action)\n\n def _visit(self, node, pre_action=None, post_action=None):\n \"\"\"Explore the connected component.\"\"\"\n self.color[node] = \"GREY\"\n self.distance[node] = 0\n self.parent[node] = None\n Q = Queue()\n Q.put(node) # node is GREY\n if pre_action: # when Q.put\n pre_action(node)\n while not Q.empty():\n source = Q.get()\n for edge in self.graph.iteroutedges(source):\n if self.color[edge.target] == \"WHITE\":\n self.color[edge.target] = \"GREY\"\n self.distance[edge.target] = self.distance[source] + 1\n self.parent[edge.target] = source\n self.dag.add_edge(edge)\n Q.put(edge.target) # target is GREY\n if pre_action: # when Q.put\n pre_action(edge.target)\n self.color[source] = \"BLACK\"\n if post_action: # source became BLACK\n post_action(source)\n\n def path(self, source, target):\n \"\"\"Construct a path from source to target.\"\"\"\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError(\"no path to target\")\n else:\n return self.path(source, self.parent[target]) + [target]\n\n\nclass SimpleBFS:\n \"\"\"Breadth-First Search.\n \n Attributes\n ----------\n graph : input graph\n parent : dict (BFS tree)\n dag : graph (BFS tree)\n \n Examples\n --------\n >>> from graphtheory.structures.edges import Edge\n >>> from graphtheory.structures.graphs import Graph\n >>> from graphtheory.traversing.bfs import SimpleBFS\n >>> G = Graph(n=10, False) # an exemplary undirected graph\n # Add nodes and edges here.\n >>> order = list()\n >>> algorithm = SimpleBFS(G)\n >>> algorithm.run(source=0, pre_action=lambda node: order.append(node))\n >>> order # visited nodes\n >>> algorithm.parent # BFS tree as a dict\n >>> algorithm.dag # BFS tree as a directed graph\n >>> algorithm.path(source, target)\n \n Notes\n -----\n Based on:\n \n Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009, \n Introduction to Algorithms, third edition, The MIT Press, \n Cambridge, London.\n \n https://en.wikipedia.org/wiki/Breadth-first_search\n \"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n self.graph = graph\n self.parent = dict()\n self.dag = self.graph.__class__(self.graph.v(), directed=True)\n for node in self.graph.iternodes(): # isolated nodes are possible\n self.dag.add_node(node)\n\n def run(self, source=None, pre_action=None, post_action=None):\n \"\"\"Executable pseudocode.\"\"\"\n if source is not None:\n self._visit(source, pre_action, post_action)\n else:\n for node in self.graph.iternodes():\n if node not in self.parent:\n self._visit(node, pre_action, post_action)\n\n def _visit(self, node, pre_action=None, post_action=None):\n \"\"\"Explore the connected component.\"\"\"\n Q = Queue()\n self.parent[node] = None # before Q.put\n Q.put(node)\n if pre_action: # when Q.put\n pre_action(node)\n while not Q.empty():\n source = Q.get()\n for edge in self.graph.iteroutedges(source):\n if edge.target not in self.parent:\n self.parent[edge.target] = source # before Q.put\n self.dag.add_edge(edge)\n Q.put(edge.target)\n if pre_action: # when Q.put\n pre_action(edge.target)\n if post_action:\n post_action(source)\n\n def path(self, source, target):\n \"\"\"Construct a path from source to target.\"\"\"\n if source == target:\n return [source]\n elif self.parent[target] is None:\n raise ValueError(\"no path to target\")\n else:\n return self.path(source, self.parent[target]) + [target]\n\n# EOF\n", "step-ids": [ 8, 10, 11, 12, 14 ] }
[ 8, 10, 11, 12, 14 ]
<|reserved_special_token_0|> @app.route('/predict', methods=['POST']) def predict(): arr = [int(x) for x in request.form.values()] arr2 = [np.array(arr)] output = model.predict(arr2) return render_template('index.html', prediction_text=output) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @app.route('/') def home(): return render_template('index.html') @app.route('/predict', methods=['POST']) def predict(): arr = [int(x) for x in request.form.values()] arr2 = [np.array(arr)] output = model.predict(arr2) return render_template('index.html', prediction_text=output) if __name__ == '__main__': run_simple('localhost', 8001, app, use_reloader=False) <|reserved_special_token_1|> <|reserved_special_token_0|> app = Flask(__name__, template_folder='template') model = pickle.load(open('model.pkl', 'rb')) @app.route('/') def home(): return render_template('index.html') @app.route('/predict', methods=['POST']) def predict(): arr = [int(x) for x in request.form.values()] arr2 = [np.array(arr)] output = model.predict(arr2) return render_template('index.html', prediction_text=output) if __name__ == '__main__': run_simple('localhost', 8001, app, use_reloader=False) <|reserved_special_token_1|> import numpy as np from flask import Flask, request, render_template import pickle from werkzeug.serving import run_simple app = Flask(__name__, template_folder='template') model = pickle.load(open('model.pkl', 'rb')) @app.route('/') def home(): return render_template('index.html') @app.route('/predict', methods=['POST']) def predict(): arr = [int(x) for x in request.form.values()] arr2 = [np.array(arr)] output = model.predict(arr2) return render_template('index.html', prediction_text=output) if __name__ == '__main__': run_simple('localhost', 8001, app, use_reloader=False) <|reserved_special_token_1|> import numpy as np from flask import Flask,request,render_template import pickle from werkzeug.serving import run_simple app=Flask(__name__,template_folder='template') model=pickle.load(open("model.pkl",'rb')) @app.route('/') def home(): return render_template('index.html') @app.route('/predict',methods=['POST']) def predict(): arr=[int(x) for x in request.form.values()] arr2=[np.array(arr)] output=model.predict(arr2) # o2=round(output) return render_template('index.html',prediction_text=output) if __name__ == "__main__": run_simple('localhost',8001,app,use_reloader=False)
flexible
{ "blob_id": "02b760b16cdcd42f8d8d7222b439da87fb8076a3", "index": 4959, "step-1": "<mask token>\n\n\[email protected]('/predict', methods=['POST'])\ndef predict():\n arr = [int(x) for x in request.form.values()]\n arr2 = [np.array(arr)]\n output = model.predict(arr2)\n return render_template('index.html', prediction_text=output)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('/')\ndef home():\n return render_template('index.html')\n\n\[email protected]('/predict', methods=['POST'])\ndef predict():\n arr = [int(x) for x in request.form.values()]\n arr2 = [np.array(arr)]\n output = model.predict(arr2)\n return render_template('index.html', prediction_text=output)\n\n\nif __name__ == '__main__':\n run_simple('localhost', 8001, app, use_reloader=False)\n", "step-3": "<mask token>\napp = Flask(__name__, template_folder='template')\nmodel = pickle.load(open('model.pkl', 'rb'))\n\n\[email protected]('/')\ndef home():\n return render_template('index.html')\n\n\[email protected]('/predict', methods=['POST'])\ndef predict():\n arr = [int(x) for x in request.form.values()]\n arr2 = [np.array(arr)]\n output = model.predict(arr2)\n return render_template('index.html', prediction_text=output)\n\n\nif __name__ == '__main__':\n run_simple('localhost', 8001, app, use_reloader=False)\n", "step-4": "import numpy as np\nfrom flask import Flask, request, render_template\nimport pickle\nfrom werkzeug.serving import run_simple\napp = Flask(__name__, template_folder='template')\nmodel = pickle.load(open('model.pkl', 'rb'))\n\n\[email protected]('/')\ndef home():\n return render_template('index.html')\n\n\[email protected]('/predict', methods=['POST'])\ndef predict():\n arr = [int(x) for x in request.form.values()]\n arr2 = [np.array(arr)]\n output = model.predict(arr2)\n return render_template('index.html', prediction_text=output)\n\n\nif __name__ == '__main__':\n run_simple('localhost', 8001, app, use_reloader=False)\n", "step-5": "import numpy as np\r\nfrom flask import Flask,request,render_template\r\nimport pickle\r\nfrom werkzeug.serving import run_simple\r\n\r\napp=Flask(__name__,template_folder='template')\r\nmodel=pickle.load(open(\"model.pkl\",'rb'))\r\n\r\n\r\[email protected]('/')\r\ndef home():\r\n return render_template('index.html')\r\n\r\[email protected]('/predict',methods=['POST'])\r\ndef predict():\r\n arr=[int(x) for x in request.form.values()]\r\n arr2=[np.array(arr)]\r\n output=model.predict(arr2)\r\n # o2=round(output)\r\n return render_template('index.html',prediction_text=output)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_simple('localhost',8001,app,use_reloader=False)", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
import glob pyfiles = glob.glob('*.py') modulenames = [f.split('.')[0] for f in pyfiles] # print(modulenames) for f in pyfiles: contents = open(f).read() for m in modulenames: v1 = "import " + m v2 = "from " + m if v1 or v2 in contents: contents = contents.replace(v1, "import ."+m) contents = contents.replace(v2, "from ."+m) with open('new_'+f, 'w') as outf: outf.write(contents)
normal
{ "blob_id": "d6a73365aa32c74798b6887ff46c0ed2323ed1a6", "index": 2324, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor f in pyfiles:\n contents = open(f).read()\n for m in modulenames:\n v1 = 'import ' + m\n v2 = 'from ' + m\n if v1 or v2 in contents:\n contents = contents.replace(v1, 'import .' + m)\n contents = contents.replace(v2, 'from .' + m)\n with open('new_' + f, 'w') as outf:\n outf.write(contents)\n", "step-3": "<mask token>\npyfiles = glob.glob('*.py')\nmodulenames = [f.split('.')[0] for f in pyfiles]\nfor f in pyfiles:\n contents = open(f).read()\n for m in modulenames:\n v1 = 'import ' + m\n v2 = 'from ' + m\n if v1 or v2 in contents:\n contents = contents.replace(v1, 'import .' + m)\n contents = contents.replace(v2, 'from .' + m)\n with open('new_' + f, 'w') as outf:\n outf.write(contents)\n", "step-4": "import glob\npyfiles = glob.glob('*.py')\nmodulenames = [f.split('.')[0] for f in pyfiles]\nfor f in pyfiles:\n contents = open(f).read()\n for m in modulenames:\n v1 = 'import ' + m\n v2 = 'from ' + m\n if v1 or v2 in contents:\n contents = contents.replace(v1, 'import .' + m)\n contents = contents.replace(v2, 'from .' + m)\n with open('new_' + f, 'w') as outf:\n outf.write(contents)\n", "step-5": "import glob\n\npyfiles = glob.glob('*.py')\n\nmodulenames = [f.split('.')[0] for f in pyfiles]\n\n# print(modulenames)\n\nfor f in pyfiles:\n contents = open(f).read()\n for m in modulenames:\n v1 = \"import \" + m\n v2 = \"from \" + m\n if v1 or v2 in contents:\n contents = contents.replace(v1, \"import .\"+m)\n contents = contents.replace(v2, \"from .\"+m)\n with open('new_'+f, 'w') as outf:\n outf.write(contents)\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class LoginForm(forms.Form): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class LoginForm(forms.Form): usuario = forms.CharField(label='Usuario', max_length=20, required=True, widget=forms.TextInput(attrs={'class': 'form-control'})) contraseña = forms.CharField(label='Contraseña', max_length=20, widget= forms.PasswordInput(attrs={'class': 'form-control'})) <|reserved_special_token_1|> from django import forms class LoginForm(forms.Form): usuario = forms.CharField(label='Usuario', max_length=20, required=True, widget=forms.TextInput(attrs={'class': 'form-control'})) contraseña = forms.CharField(label='Contraseña', max_length=20, widget= forms.PasswordInput(attrs={'class': 'form-control'})) <|reserved_special_token_1|> from django import forms class LoginForm(forms.Form): usuario=forms.CharField(label="Usuario",max_length=20, required=True, widget=forms.TextInput( attrs={'class':'form-control'} )) contraseña=forms.CharField(label="Contraseña",max_length=20, widget=forms.PasswordInput( attrs={'class':'form-control'} ))
flexible
{ "blob_id": "7da5a7476c807619bed805cb892774c23c04c6f7", "index": 4917, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass LoginForm(forms.Form):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass LoginForm(forms.Form):\n usuario = forms.CharField(label='Usuario', max_length=20, required=True,\n widget=forms.TextInput(attrs={'class': 'form-control'}))\n contraseña = forms.CharField(label='Contraseña', max_length=20, widget=\n forms.PasswordInput(attrs={'class': 'form-control'}))\n", "step-4": "from django import forms\n\n\nclass LoginForm(forms.Form):\n usuario = forms.CharField(label='Usuario', max_length=20, required=True,\n widget=forms.TextInput(attrs={'class': 'form-control'}))\n contraseña = forms.CharField(label='Contraseña', max_length=20, widget=\n forms.PasswordInput(attrs={'class': 'form-control'}))\n", "step-5": "from django import forms\n\nclass LoginForm(forms.Form):\n usuario=forms.CharField(label=\"Usuario\",max_length=20, required=True, widget=forms.TextInput(\n attrs={'class':'form-control'} \n ))\n contraseña=forms.CharField(label=\"Contraseña\",max_length=20, widget=forms.PasswordInput(\n attrs={'class':'form-control'}\n ))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class DLModeler(object): def __init__(self, model_path, hf_path, num_examples, class_percentages, predictors, model_args, model_type): self.model_path = model_path self.hf_path = hf_path self.num_examples = num_examples self.class_percentages = class_percentages self.model_args = model_args self.model_type = model_type long_predictors = [] for predictor in predictors: if '_' in predictor: predictor_name = predictor.split('_')[0].upper( ) + predictor.split('_')[-1] elif ' ' in predictor: predictor_name = ''.join([v[0].upper() for v in predictor. split()]) else: predictor_name = predictor long_predictors.append(predictor_name) self.predictors = np.array(long_predictors) self.dldataeng = DLDataEngineering(self.model_path, self.hf_path, self.num_examples, self.class_percentages, self.predictors, self.model_args) return def train_models(self, member, train_dates, valid_dates): """ Function that reads and extracts pre-processed 2d member data from an ensemble to train a convolutional neural net (cnn) or UNET. The model data is standardized before being input to the cnn, with the observation data in the shape (# examples, # classes). Args: member (str): ensemble member data that trains a DL model """ train_data, train_label = self.dldataeng.extract_training_data(member, train_dates, self.model_type) valid_data, valid_label = [], [] if self.model_type == 'CNN': onehot_encoder = OneHotEncoder(sparse=False, categories='auto') encoded_label = onehot_encoder.fit_transform(train_label. reshape(-1, 1)) self.train_CNN(member, train_data, encoded_label, valid_data, valid_label) elif 'UNET' in self.model_type: self.train_UNET(member, train_data, train_label, valid_data, valid_label) return def train_UNET(self, member, trainX, trainY, validX, validY): model_file = (self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5') """ if os.path.exists(model_file): del trainX,trainY,validX,validY unet = tf.keras.models.load_model(model_file,compile=False) print(f' Opening {model_file} ') #self.validate_UNET(model,validX,validY,threshold_file) return """ print('\nTraining {0} models'.format(member)) print('Training data shape {0}'.format(np.shape(trainX))) print('Training label data shape {0}\n'.format(np.shape(trainY))) model_obj_params = {'input_size': np.shape(trainX[0]), 'n_labels': 1, 'stack_num_down': 2, 'stack_num_up': 1, 'activation': 'LeakyReLU', 'output_activation': 'ReLU', 'batch_norm': False, 'pool': True, 'unpool': False, 'name': f'{self.model_type}'} if self.model_type == 'UNET': model_obj_params['filter_num'] = [16, 32, 64, 128] unet_model_obj = models.unet_2d compile_params = {'loss': 'mean_squared_error'} else: compile_params = {'loss': ['mean_squared_error', 'mean_squared_error', 'mean_squared_error', 'mean_squared_error', 'mean_squared_error'], 'loss_weights': [0.25, 0.25, 0.25, 0.25, 1.0]} if self.model_type == 'UNET2plus': plus_model_params = {'filter_num': [16, 32, 64, 128, 256], 'deep_supervision': True} model_obj_params.update(plus_model_params) unet_model_obj = models.unet_plus_2d elif self.model_type == 'UNET3plus': plus_model_params = {'filter_num_downi': [16, 32, 64, 128, 256], 'filter_num_skip': 'auto', 'filter_num_aggregate': 'auto', 'deep_supervision': True} model_obj_params.update(plus_model_params) unet_model_obj = models.unet_3plus_2d try: unet_model = unet_model_obj(**model_obj_params) except: print(f'{self.model_type} Model type not found.') return unet_model.compile(**compile_params, optimizer=tf.keras.optimizers. Adam(lr=0.0001)) print(unet_model.summary()) aug = ImageDataGenerator(rotation_range=10, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, fill_mode='nearest') n_epochs = 15 bs = 256 conv_hist = unet_model.fit(aug.flow(trainX, trainY, batch_size=bs), steps_per_epoch=len(trainX) / bs, epochs=n_epochs, verbose=1) """ pred_s = trainX[0].reshape(1,input_shape[0], input_shape[1],input_shape[2]) prediction = unet.predict(pred_s)[0,:,:,:] print(prediction.shape) plt.imshow(prediction) plt.colorbar() plt.show() return """ unet_model.save(model_file) print(f'Writing out {model_file}') tf.keras.backend.clear_session() return def train_CNN(self, member, input_data): """ Function to train a convolutional neural net (CNN) for random training data and associated labels. Args: member (str): Ensemble member trainX (tuple): Tuple of (train data, train labels, validation data, validation labels) """ trainX, trainY, validX, validY = input_data print('\nTraining {0} models'.format(member)) print('Training data shape {0}'.format(np.shape(trainX))) print('Training label data shape {0}\n'.format(np.shape(trainY))) print('Validation data shape {0}'.format(np.shape(validX))) print('Validation label data shape {0}\n'.format(np.shape(validY))) model_file = (self.model_path + f'/{member}_{self.model_args}_CNN_model.h5') print(model_file) if not os.path.exists(model_file): tf.keras.backend.clear_session() model = models.Sequential() input_shape = np.shape(trainX[0]) model.add(layers.GaussianNoise(0.01, input_shape=input_shape)) for filters in [32, 64, 128]: model.add(layers.Conv2D(filters, (3, 3), padding='same')) model.add(layers.Conv2D(filters, (3, 3), padding='same')) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(alpha=0.3)) model.add(layers.MaxPooling2D()) model.add(layers.Flatten()) model.add(layers.Dense(256)) model.add(layers.LeakyReLU(alpha=0.3)) model.add(layers.Dense(4, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[tf.keras.metrics.AUC()]) print(model.summary()) n_epochs = 10 bs = 256 aug = imagedatagenerator(rotation_range=10, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, fill_mode= 'nearest') train_generator = aug.flow(trainx, trainy, batch_size=bs) conv_hist = model.fit(train_generator, steps_per_epoch=len( trainx) // bs, epochs=n_epochs, verbose=1, class_weight= self.class_percentages) model.save(model_file) print(f'Writing out {model_file}') else: model = tf.keras.models.load_model(model_file) print(f'\nOpening {model_file}\n') del trainY, trainX threshold_file = (self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5') if os.path.exists(threshold_file): del validX, validY return self.validate_CNN(model, validX, validY, threshold_file) return def validate_CNN(self, model, validX, validY, threshold_file): print() cnn_preds = model.predict(validX) sev_hail = cnn_preds[:, 2] sig_hail = cnn_preds[:, 3] sev_prob_preds = sev_hail + sig_hail print('Max probability', np.nanmax(sev_prob_preds)) true_preds = np.where(validY >= 2, 1, 0) del validX, validY df_best_score = pd.DataFrame(np.zeros((1, 1)), columns=[ 'Size Threshold']) auc_score = [] thresholds = np.arange(0.1, 1.01, 0.02) for t in thresholds: threshold_preds = np.where(sev_prob_preds >= t, 1, 0) auc_score.append(roc_auc_score(true_preds, threshold_preds)) print(auc_score) df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)] print(df_best_score) df_best_score.to_csv(threshold_file) print(f'Writing out {threshold_file}') return def predict_model(self, member, patch_map_conversion_indices, total_map_shape, subset_map_shape, date, patch_radius, forecast_grid_path, lon_grid, lat_grid): """ Function that opens a pre-trained convolutional neural net (cnn). and predicts hail probability forecasts for a single ensemble member. Args: Right now only includes severe hail prediction, not sig-severe """ tf.keras.backend.clear_session() model_file = (self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5') DL_model = tf.keras.models.load_model(model_file, compile=False) if self.model_type == 'CNN': threshold_file = (self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5') if not os.path.exists(threshold_file): print('No thresholds found') return prob_thresh = 0 print(prob_thresh) total_count = 0 forecast_data = self.dldataeng.read_files('forecast', member, date, [None], [None]) if forecast_data is None: print('No forecast data found') return standard_forecast_data = np.array([self.dldataeng.standardize_data( member, forecast_data[hour]) for hour in np.arange( forecast_data.shape[0])]) del forecast_data total_grid = np.empty((standard_forecast_data.shape[0], total_map_shape[0] * total_map_shape[1])) * np.nan for hour in np.arange(standard_forecast_data.shape[0]): print(hour) DL_prediction = np.array(DL_model.predict( standard_forecast_data[hour])) if self.model_type == 'CNN': severe_proba_indices = np.where(cnn_preds[:, 2] + cnn_preds [:, 3] >= prob_thresh)[0] severe_patches = np.zeros(subset_map_shape) if len(severe_proba_indices) < 1: continue severe_patches[severe_proba_indices] = np.full(( patch_radius, patch_radius), 1) total_grid[hour, map_conversion_inds] = severe_patches.ravel() print(hour, len(severe_proba_indices), np.nanmax(cnn_preds[ :, 2] + cnn_preds[:, 3])) total_count += len(severe_proba_indices) print('Total severe probs:', total_count) print() elif 'UNET' in self.model_type: for patch in np.arange(standard_forecast_data.shape[1]): patch_indices = patch_map_conversion_indices[patch] overlap_pt = 4 if DL_prediction.ndim > 4: hourly_patch_data = DL_prediction[-1, patch, overlap_pt:-overlap_pt, overlap_pt:-overlap_pt, 0 ].ravel() else: hourly_patch_data = DL_prediction[patch, overlap_pt :-overlap_pt, overlap_pt:-overlap_pt, 0].ravel() total_grid[hour, patch_indices] = hourly_patch_data del DL_prediction del standard_forecast_data output_data = total_grid.reshape((total_grid.shape[0],) + total_map_shape) date_outpath = forecast_grid_path + f'{date[0][:-5]}/' if not os.path.exists(date_outpath): os.makedirs(date_outpath) gridded_out_file = (date_outpath + f'{member}_{date[0]}_forecast_grid.h5') print(f'Writing out {gridded_out_file}') with h5py.File(gridded_out_file, 'w') as hf: hf.create_dataset('data', data=output_data, compression='gzip', compression_opts=6) return <|reserved_special_token_0|> def down_block(x, filters, kernel_size=(3, 3)): c = layers.Conv2D(filters, kernel_size, padding='same')(x) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) c = layers.Conv2D(filters, kernel_size, padding='same')(c) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) p = layers.MaxPooling2D((2, 2))(c) return c, p <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class DLModeler(object): def __init__(self, model_path, hf_path, num_examples, class_percentages, predictors, model_args, model_type): self.model_path = model_path self.hf_path = hf_path self.num_examples = num_examples self.class_percentages = class_percentages self.model_args = model_args self.model_type = model_type long_predictors = [] for predictor in predictors: if '_' in predictor: predictor_name = predictor.split('_')[0].upper( ) + predictor.split('_')[-1] elif ' ' in predictor: predictor_name = ''.join([v[0].upper() for v in predictor. split()]) else: predictor_name = predictor long_predictors.append(predictor_name) self.predictors = np.array(long_predictors) self.dldataeng = DLDataEngineering(self.model_path, self.hf_path, self.num_examples, self.class_percentages, self.predictors, self.model_args) return def train_models(self, member, train_dates, valid_dates): """ Function that reads and extracts pre-processed 2d member data from an ensemble to train a convolutional neural net (cnn) or UNET. The model data is standardized before being input to the cnn, with the observation data in the shape (# examples, # classes). Args: member (str): ensemble member data that trains a DL model """ train_data, train_label = self.dldataeng.extract_training_data(member, train_dates, self.model_type) valid_data, valid_label = [], [] if self.model_type == 'CNN': onehot_encoder = OneHotEncoder(sparse=False, categories='auto') encoded_label = onehot_encoder.fit_transform(train_label. reshape(-1, 1)) self.train_CNN(member, train_data, encoded_label, valid_data, valid_label) elif 'UNET' in self.model_type: self.train_UNET(member, train_data, train_label, valid_data, valid_label) return def train_UNET(self, member, trainX, trainY, validX, validY): model_file = (self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5') """ if os.path.exists(model_file): del trainX,trainY,validX,validY unet = tf.keras.models.load_model(model_file,compile=False) print(f' Opening {model_file} ') #self.validate_UNET(model,validX,validY,threshold_file) return """ print('\nTraining {0} models'.format(member)) print('Training data shape {0}'.format(np.shape(trainX))) print('Training label data shape {0}\n'.format(np.shape(trainY))) model_obj_params = {'input_size': np.shape(trainX[0]), 'n_labels': 1, 'stack_num_down': 2, 'stack_num_up': 1, 'activation': 'LeakyReLU', 'output_activation': 'ReLU', 'batch_norm': False, 'pool': True, 'unpool': False, 'name': f'{self.model_type}'} if self.model_type == 'UNET': model_obj_params['filter_num'] = [16, 32, 64, 128] unet_model_obj = models.unet_2d compile_params = {'loss': 'mean_squared_error'} else: compile_params = {'loss': ['mean_squared_error', 'mean_squared_error', 'mean_squared_error', 'mean_squared_error', 'mean_squared_error'], 'loss_weights': [0.25, 0.25, 0.25, 0.25, 1.0]} if self.model_type == 'UNET2plus': plus_model_params = {'filter_num': [16, 32, 64, 128, 256], 'deep_supervision': True} model_obj_params.update(plus_model_params) unet_model_obj = models.unet_plus_2d elif self.model_type == 'UNET3plus': plus_model_params = {'filter_num_downi': [16, 32, 64, 128, 256], 'filter_num_skip': 'auto', 'filter_num_aggregate': 'auto', 'deep_supervision': True} model_obj_params.update(plus_model_params) unet_model_obj = models.unet_3plus_2d try: unet_model = unet_model_obj(**model_obj_params) except: print(f'{self.model_type} Model type not found.') return unet_model.compile(**compile_params, optimizer=tf.keras.optimizers. Adam(lr=0.0001)) print(unet_model.summary()) aug = ImageDataGenerator(rotation_range=10, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, fill_mode='nearest') n_epochs = 15 bs = 256 conv_hist = unet_model.fit(aug.flow(trainX, trainY, batch_size=bs), steps_per_epoch=len(trainX) / bs, epochs=n_epochs, verbose=1) """ pred_s = trainX[0].reshape(1,input_shape[0], input_shape[1],input_shape[2]) prediction = unet.predict(pred_s)[0,:,:,:] print(prediction.shape) plt.imshow(prediction) plt.colorbar() plt.show() return """ unet_model.save(model_file) print(f'Writing out {model_file}') tf.keras.backend.clear_session() return def train_CNN(self, member, input_data): """ Function to train a convolutional neural net (CNN) for random training data and associated labels. Args: member (str): Ensemble member trainX (tuple): Tuple of (train data, train labels, validation data, validation labels) """ trainX, trainY, validX, validY = input_data print('\nTraining {0} models'.format(member)) print('Training data shape {0}'.format(np.shape(trainX))) print('Training label data shape {0}\n'.format(np.shape(trainY))) print('Validation data shape {0}'.format(np.shape(validX))) print('Validation label data shape {0}\n'.format(np.shape(validY))) model_file = (self.model_path + f'/{member}_{self.model_args}_CNN_model.h5') print(model_file) if not os.path.exists(model_file): tf.keras.backend.clear_session() model = models.Sequential() input_shape = np.shape(trainX[0]) model.add(layers.GaussianNoise(0.01, input_shape=input_shape)) for filters in [32, 64, 128]: model.add(layers.Conv2D(filters, (3, 3), padding='same')) model.add(layers.Conv2D(filters, (3, 3), padding='same')) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(alpha=0.3)) model.add(layers.MaxPooling2D()) model.add(layers.Flatten()) model.add(layers.Dense(256)) model.add(layers.LeakyReLU(alpha=0.3)) model.add(layers.Dense(4, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[tf.keras.metrics.AUC()]) print(model.summary()) n_epochs = 10 bs = 256 aug = imagedatagenerator(rotation_range=10, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, fill_mode= 'nearest') train_generator = aug.flow(trainx, trainy, batch_size=bs) conv_hist = model.fit(train_generator, steps_per_epoch=len( trainx) // bs, epochs=n_epochs, verbose=1, class_weight= self.class_percentages) model.save(model_file) print(f'Writing out {model_file}') else: model = tf.keras.models.load_model(model_file) print(f'\nOpening {model_file}\n') del trainY, trainX threshold_file = (self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5') if os.path.exists(threshold_file): del validX, validY return self.validate_CNN(model, validX, validY, threshold_file) return def validate_CNN(self, model, validX, validY, threshold_file): print() cnn_preds = model.predict(validX) sev_hail = cnn_preds[:, 2] sig_hail = cnn_preds[:, 3] sev_prob_preds = sev_hail + sig_hail print('Max probability', np.nanmax(sev_prob_preds)) true_preds = np.where(validY >= 2, 1, 0) del validX, validY df_best_score = pd.DataFrame(np.zeros((1, 1)), columns=[ 'Size Threshold']) auc_score = [] thresholds = np.arange(0.1, 1.01, 0.02) for t in thresholds: threshold_preds = np.where(sev_prob_preds >= t, 1, 0) auc_score.append(roc_auc_score(true_preds, threshold_preds)) print(auc_score) df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)] print(df_best_score) df_best_score.to_csv(threshold_file) print(f'Writing out {threshold_file}') return def predict_model(self, member, patch_map_conversion_indices, total_map_shape, subset_map_shape, date, patch_radius, forecast_grid_path, lon_grid, lat_grid): """ Function that opens a pre-trained convolutional neural net (cnn). and predicts hail probability forecasts for a single ensemble member. Args: Right now only includes severe hail prediction, not sig-severe """ tf.keras.backend.clear_session() model_file = (self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5') DL_model = tf.keras.models.load_model(model_file, compile=False) if self.model_type == 'CNN': threshold_file = (self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5') if not os.path.exists(threshold_file): print('No thresholds found') return prob_thresh = 0 print(prob_thresh) total_count = 0 forecast_data = self.dldataeng.read_files('forecast', member, date, [None], [None]) if forecast_data is None: print('No forecast data found') return standard_forecast_data = np.array([self.dldataeng.standardize_data( member, forecast_data[hour]) for hour in np.arange( forecast_data.shape[0])]) del forecast_data total_grid = np.empty((standard_forecast_data.shape[0], total_map_shape[0] * total_map_shape[1])) * np.nan for hour in np.arange(standard_forecast_data.shape[0]): print(hour) DL_prediction = np.array(DL_model.predict( standard_forecast_data[hour])) if self.model_type == 'CNN': severe_proba_indices = np.where(cnn_preds[:, 2] + cnn_preds [:, 3] >= prob_thresh)[0] severe_patches = np.zeros(subset_map_shape) if len(severe_proba_indices) < 1: continue severe_patches[severe_proba_indices] = np.full(( patch_radius, patch_radius), 1) total_grid[hour, map_conversion_inds] = severe_patches.ravel() print(hour, len(severe_proba_indices), np.nanmax(cnn_preds[ :, 2] + cnn_preds[:, 3])) total_count += len(severe_proba_indices) print('Total severe probs:', total_count) print() elif 'UNET' in self.model_type: for patch in np.arange(standard_forecast_data.shape[1]): patch_indices = patch_map_conversion_indices[patch] overlap_pt = 4 if DL_prediction.ndim > 4: hourly_patch_data = DL_prediction[-1, patch, overlap_pt:-overlap_pt, overlap_pt:-overlap_pt, 0 ].ravel() else: hourly_patch_data = DL_prediction[patch, overlap_pt :-overlap_pt, overlap_pt:-overlap_pt, 0].ravel() total_grid[hour, patch_indices] = hourly_patch_data del DL_prediction del standard_forecast_data output_data = total_grid.reshape((total_grid.shape[0],) + total_map_shape) date_outpath = forecast_grid_path + f'{date[0][:-5]}/' if not os.path.exists(date_outpath): os.makedirs(date_outpath) gridded_out_file = (date_outpath + f'{member}_{date[0]}_forecast_grid.h5') print(f'Writing out {gridded_out_file}') with h5py.File(gridded_out_file, 'w') as hf: hf.create_dataset('data', data=output_data, compression='gzip', compression_opts=6) return <|reserved_special_token_0|> def down_block(x, filters, kernel_size=(3, 3)): c = layers.Conv2D(filters, kernel_size, padding='same')(x) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) c = layers.Conv2D(filters, kernel_size, padding='same')(c) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) p = layers.MaxPooling2D((2, 2))(c) return c, p <|reserved_special_token_0|> def bottleneck(x, filters, kernel_size=(3, 3)): c = layers.Conv2D(filters, kernel_size, padding='same')(x) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) c = layers.Conv2D(filters, kernel_size, padding='same')(c) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) return c <|reserved_special_token_1|> <|reserved_special_token_0|> class DLModeler(object): def __init__(self, model_path, hf_path, num_examples, class_percentages, predictors, model_args, model_type): self.model_path = model_path self.hf_path = hf_path self.num_examples = num_examples self.class_percentages = class_percentages self.model_args = model_args self.model_type = model_type long_predictors = [] for predictor in predictors: if '_' in predictor: predictor_name = predictor.split('_')[0].upper( ) + predictor.split('_')[-1] elif ' ' in predictor: predictor_name = ''.join([v[0].upper() for v in predictor. split()]) else: predictor_name = predictor long_predictors.append(predictor_name) self.predictors = np.array(long_predictors) self.dldataeng = DLDataEngineering(self.model_path, self.hf_path, self.num_examples, self.class_percentages, self.predictors, self.model_args) return def train_models(self, member, train_dates, valid_dates): """ Function that reads and extracts pre-processed 2d member data from an ensemble to train a convolutional neural net (cnn) or UNET. The model data is standardized before being input to the cnn, with the observation data in the shape (# examples, # classes). Args: member (str): ensemble member data that trains a DL model """ train_data, train_label = self.dldataeng.extract_training_data(member, train_dates, self.model_type) valid_data, valid_label = [], [] if self.model_type == 'CNN': onehot_encoder = OneHotEncoder(sparse=False, categories='auto') encoded_label = onehot_encoder.fit_transform(train_label. reshape(-1, 1)) self.train_CNN(member, train_data, encoded_label, valid_data, valid_label) elif 'UNET' in self.model_type: self.train_UNET(member, train_data, train_label, valid_data, valid_label) return def train_UNET(self, member, trainX, trainY, validX, validY): model_file = (self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5') """ if os.path.exists(model_file): del trainX,trainY,validX,validY unet = tf.keras.models.load_model(model_file,compile=False) print(f' Opening {model_file} ') #self.validate_UNET(model,validX,validY,threshold_file) return """ print('\nTraining {0} models'.format(member)) print('Training data shape {0}'.format(np.shape(trainX))) print('Training label data shape {0}\n'.format(np.shape(trainY))) model_obj_params = {'input_size': np.shape(trainX[0]), 'n_labels': 1, 'stack_num_down': 2, 'stack_num_up': 1, 'activation': 'LeakyReLU', 'output_activation': 'ReLU', 'batch_norm': False, 'pool': True, 'unpool': False, 'name': f'{self.model_type}'} if self.model_type == 'UNET': model_obj_params['filter_num'] = [16, 32, 64, 128] unet_model_obj = models.unet_2d compile_params = {'loss': 'mean_squared_error'} else: compile_params = {'loss': ['mean_squared_error', 'mean_squared_error', 'mean_squared_error', 'mean_squared_error', 'mean_squared_error'], 'loss_weights': [0.25, 0.25, 0.25, 0.25, 1.0]} if self.model_type == 'UNET2plus': plus_model_params = {'filter_num': [16, 32, 64, 128, 256], 'deep_supervision': True} model_obj_params.update(plus_model_params) unet_model_obj = models.unet_plus_2d elif self.model_type == 'UNET3plus': plus_model_params = {'filter_num_downi': [16, 32, 64, 128, 256], 'filter_num_skip': 'auto', 'filter_num_aggregate': 'auto', 'deep_supervision': True} model_obj_params.update(plus_model_params) unet_model_obj = models.unet_3plus_2d try: unet_model = unet_model_obj(**model_obj_params) except: print(f'{self.model_type} Model type not found.') return unet_model.compile(**compile_params, optimizer=tf.keras.optimizers. Adam(lr=0.0001)) print(unet_model.summary()) aug = ImageDataGenerator(rotation_range=10, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, fill_mode='nearest') n_epochs = 15 bs = 256 conv_hist = unet_model.fit(aug.flow(trainX, trainY, batch_size=bs), steps_per_epoch=len(trainX) / bs, epochs=n_epochs, verbose=1) """ pred_s = trainX[0].reshape(1,input_shape[0], input_shape[1],input_shape[2]) prediction = unet.predict(pred_s)[0,:,:,:] print(prediction.shape) plt.imshow(prediction) plt.colorbar() plt.show() return """ unet_model.save(model_file) print(f'Writing out {model_file}') tf.keras.backend.clear_session() return def train_CNN(self, member, input_data): """ Function to train a convolutional neural net (CNN) for random training data and associated labels. Args: member (str): Ensemble member trainX (tuple): Tuple of (train data, train labels, validation data, validation labels) """ trainX, trainY, validX, validY = input_data print('\nTraining {0} models'.format(member)) print('Training data shape {0}'.format(np.shape(trainX))) print('Training label data shape {0}\n'.format(np.shape(trainY))) print('Validation data shape {0}'.format(np.shape(validX))) print('Validation label data shape {0}\n'.format(np.shape(validY))) model_file = (self.model_path + f'/{member}_{self.model_args}_CNN_model.h5') print(model_file) if not os.path.exists(model_file): tf.keras.backend.clear_session() model = models.Sequential() input_shape = np.shape(trainX[0]) model.add(layers.GaussianNoise(0.01, input_shape=input_shape)) for filters in [32, 64, 128]: model.add(layers.Conv2D(filters, (3, 3), padding='same')) model.add(layers.Conv2D(filters, (3, 3), padding='same')) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(alpha=0.3)) model.add(layers.MaxPooling2D()) model.add(layers.Flatten()) model.add(layers.Dense(256)) model.add(layers.LeakyReLU(alpha=0.3)) model.add(layers.Dense(4, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[tf.keras.metrics.AUC()]) print(model.summary()) n_epochs = 10 bs = 256 aug = imagedatagenerator(rotation_range=10, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, fill_mode= 'nearest') train_generator = aug.flow(trainx, trainy, batch_size=bs) conv_hist = model.fit(train_generator, steps_per_epoch=len( trainx) // bs, epochs=n_epochs, verbose=1, class_weight= self.class_percentages) model.save(model_file) print(f'Writing out {model_file}') else: model = tf.keras.models.load_model(model_file) print(f'\nOpening {model_file}\n') del trainY, trainX threshold_file = (self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5') if os.path.exists(threshold_file): del validX, validY return self.validate_CNN(model, validX, validY, threshold_file) return def validate_CNN(self, model, validX, validY, threshold_file): print() cnn_preds = model.predict(validX) sev_hail = cnn_preds[:, 2] sig_hail = cnn_preds[:, 3] sev_prob_preds = sev_hail + sig_hail print('Max probability', np.nanmax(sev_prob_preds)) true_preds = np.where(validY >= 2, 1, 0) del validX, validY df_best_score = pd.DataFrame(np.zeros((1, 1)), columns=[ 'Size Threshold']) auc_score = [] thresholds = np.arange(0.1, 1.01, 0.02) for t in thresholds: threshold_preds = np.where(sev_prob_preds >= t, 1, 0) auc_score.append(roc_auc_score(true_preds, threshold_preds)) print(auc_score) df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)] print(df_best_score) df_best_score.to_csv(threshold_file) print(f'Writing out {threshold_file}') return def predict_model(self, member, patch_map_conversion_indices, total_map_shape, subset_map_shape, date, patch_radius, forecast_grid_path, lon_grid, lat_grid): """ Function that opens a pre-trained convolutional neural net (cnn). and predicts hail probability forecasts for a single ensemble member. Args: Right now only includes severe hail prediction, not sig-severe """ tf.keras.backend.clear_session() model_file = (self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5') DL_model = tf.keras.models.load_model(model_file, compile=False) if self.model_type == 'CNN': threshold_file = (self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5') if not os.path.exists(threshold_file): print('No thresholds found') return prob_thresh = 0 print(prob_thresh) total_count = 0 forecast_data = self.dldataeng.read_files('forecast', member, date, [None], [None]) if forecast_data is None: print('No forecast data found') return standard_forecast_data = np.array([self.dldataeng.standardize_data( member, forecast_data[hour]) for hour in np.arange( forecast_data.shape[0])]) del forecast_data total_grid = np.empty((standard_forecast_data.shape[0], total_map_shape[0] * total_map_shape[1])) * np.nan for hour in np.arange(standard_forecast_data.shape[0]): print(hour) DL_prediction = np.array(DL_model.predict( standard_forecast_data[hour])) if self.model_type == 'CNN': severe_proba_indices = np.where(cnn_preds[:, 2] + cnn_preds [:, 3] >= prob_thresh)[0] severe_patches = np.zeros(subset_map_shape) if len(severe_proba_indices) < 1: continue severe_patches[severe_proba_indices] = np.full(( patch_radius, patch_radius), 1) total_grid[hour, map_conversion_inds] = severe_patches.ravel() print(hour, len(severe_proba_indices), np.nanmax(cnn_preds[ :, 2] + cnn_preds[:, 3])) total_count += len(severe_proba_indices) print('Total severe probs:', total_count) print() elif 'UNET' in self.model_type: for patch in np.arange(standard_forecast_data.shape[1]): patch_indices = patch_map_conversion_indices[patch] overlap_pt = 4 if DL_prediction.ndim > 4: hourly_patch_data = DL_prediction[-1, patch, overlap_pt:-overlap_pt, overlap_pt:-overlap_pt, 0 ].ravel() else: hourly_patch_data = DL_prediction[patch, overlap_pt :-overlap_pt, overlap_pt:-overlap_pt, 0].ravel() total_grid[hour, patch_indices] = hourly_patch_data del DL_prediction del standard_forecast_data output_data = total_grid.reshape((total_grid.shape[0],) + total_map_shape) date_outpath = forecast_grid_path + f'{date[0][:-5]}/' if not os.path.exists(date_outpath): os.makedirs(date_outpath) gridded_out_file = (date_outpath + f'{member}_{date[0]}_forecast_grid.h5') print(f'Writing out {gridded_out_file}') with h5py.File(gridded_out_file, 'w') as hf: hf.create_dataset('data', data=output_data, compression='gzip', compression_opts=6) return <|reserved_special_token_0|> def down_block(x, filters, kernel_size=(3, 3)): c = layers.Conv2D(filters, kernel_size, padding='same')(x) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) c = layers.Conv2D(filters, kernel_size, padding='same')(c) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) p = layers.MaxPooling2D((2, 2))(c) return c, p def up_block(x, skip, filters, kernel_size=(3, 3)): up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x) concat = layers.Concatenate()([up, skip]) c = layers.Conv2D(filters, kernel_size, padding='same')(concat) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) c = layers.Conv2D(filters, kernel_size, padding='same')(c) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) return c def bottleneck(x, filters, kernel_size=(3, 3)): c = layers.Conv2D(filters, kernel_size, padding='same')(x) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) c = layers.Conv2D(filters, kernel_size, padding='same')(c) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) return c <|reserved_special_token_1|> from processing.DLDataEngineering import DLDataEngineering from sklearn.preprocessing import OneHotEncoder import pandas as pd import numpy as np import h5py import os from scipy.ndimage import gaussian_filter import tensorflow as tf from tensorflow.keras.layers import Input, Conv2D, Dropout, Activation, UpSampling2D, GlobalMaxPooling2D, multiply from tensorflow.keras.backend import max from tensorflow.keras.preprocessing.image import ImageDataGenerator from sklearn.metrics import f1_score, roc_auc_score import matplotlib.pyplot as plt import cartopy.feature as cf import cartopy.crs as ccrs import cartopy from keras_unet_collection import models, base, utils class DLModeler(object): def __init__(self, model_path, hf_path, num_examples, class_percentages, predictors, model_args, model_type): self.model_path = model_path self.hf_path = hf_path self.num_examples = num_examples self.class_percentages = class_percentages self.model_args = model_args self.model_type = model_type long_predictors = [] for predictor in predictors: if '_' in predictor: predictor_name = predictor.split('_')[0].upper( ) + predictor.split('_')[-1] elif ' ' in predictor: predictor_name = ''.join([v[0].upper() for v in predictor. split()]) else: predictor_name = predictor long_predictors.append(predictor_name) self.predictors = np.array(long_predictors) self.dldataeng = DLDataEngineering(self.model_path, self.hf_path, self.num_examples, self.class_percentages, self.predictors, self.model_args) return def train_models(self, member, train_dates, valid_dates): """ Function that reads and extracts pre-processed 2d member data from an ensemble to train a convolutional neural net (cnn) or UNET. The model data is standardized before being input to the cnn, with the observation data in the shape (# examples, # classes). Args: member (str): ensemble member data that trains a DL model """ train_data, train_label = self.dldataeng.extract_training_data(member, train_dates, self.model_type) valid_data, valid_label = [], [] if self.model_type == 'CNN': onehot_encoder = OneHotEncoder(sparse=False, categories='auto') encoded_label = onehot_encoder.fit_transform(train_label. reshape(-1, 1)) self.train_CNN(member, train_data, encoded_label, valid_data, valid_label) elif 'UNET' in self.model_type: self.train_UNET(member, train_data, train_label, valid_data, valid_label) return def train_UNET(self, member, trainX, trainY, validX, validY): model_file = (self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5') """ if os.path.exists(model_file): del trainX,trainY,validX,validY unet = tf.keras.models.load_model(model_file,compile=False) print(f' Opening {model_file} ') #self.validate_UNET(model,validX,validY,threshold_file) return """ print('\nTraining {0} models'.format(member)) print('Training data shape {0}'.format(np.shape(trainX))) print('Training label data shape {0}\n'.format(np.shape(trainY))) model_obj_params = {'input_size': np.shape(trainX[0]), 'n_labels': 1, 'stack_num_down': 2, 'stack_num_up': 1, 'activation': 'LeakyReLU', 'output_activation': 'ReLU', 'batch_norm': False, 'pool': True, 'unpool': False, 'name': f'{self.model_type}'} if self.model_type == 'UNET': model_obj_params['filter_num'] = [16, 32, 64, 128] unet_model_obj = models.unet_2d compile_params = {'loss': 'mean_squared_error'} else: compile_params = {'loss': ['mean_squared_error', 'mean_squared_error', 'mean_squared_error', 'mean_squared_error', 'mean_squared_error'], 'loss_weights': [0.25, 0.25, 0.25, 0.25, 1.0]} if self.model_type == 'UNET2plus': plus_model_params = {'filter_num': [16, 32, 64, 128, 256], 'deep_supervision': True} model_obj_params.update(plus_model_params) unet_model_obj = models.unet_plus_2d elif self.model_type == 'UNET3plus': plus_model_params = {'filter_num_downi': [16, 32, 64, 128, 256], 'filter_num_skip': 'auto', 'filter_num_aggregate': 'auto', 'deep_supervision': True} model_obj_params.update(plus_model_params) unet_model_obj = models.unet_3plus_2d try: unet_model = unet_model_obj(**model_obj_params) except: print(f'{self.model_type} Model type not found.') return unet_model.compile(**compile_params, optimizer=tf.keras.optimizers. Adam(lr=0.0001)) print(unet_model.summary()) aug = ImageDataGenerator(rotation_range=10, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, fill_mode='nearest') n_epochs = 15 bs = 256 conv_hist = unet_model.fit(aug.flow(trainX, trainY, batch_size=bs), steps_per_epoch=len(trainX) / bs, epochs=n_epochs, verbose=1) """ pred_s = trainX[0].reshape(1,input_shape[0], input_shape[1],input_shape[2]) prediction = unet.predict(pred_s)[0,:,:,:] print(prediction.shape) plt.imshow(prediction) plt.colorbar() plt.show() return """ unet_model.save(model_file) print(f'Writing out {model_file}') tf.keras.backend.clear_session() return def train_CNN(self, member, input_data): """ Function to train a convolutional neural net (CNN) for random training data and associated labels. Args: member (str): Ensemble member trainX (tuple): Tuple of (train data, train labels, validation data, validation labels) """ trainX, trainY, validX, validY = input_data print('\nTraining {0} models'.format(member)) print('Training data shape {0}'.format(np.shape(trainX))) print('Training label data shape {0}\n'.format(np.shape(trainY))) print('Validation data shape {0}'.format(np.shape(validX))) print('Validation label data shape {0}\n'.format(np.shape(validY))) model_file = (self.model_path + f'/{member}_{self.model_args}_CNN_model.h5') print(model_file) if not os.path.exists(model_file): tf.keras.backend.clear_session() model = models.Sequential() input_shape = np.shape(trainX[0]) model.add(layers.GaussianNoise(0.01, input_shape=input_shape)) for filters in [32, 64, 128]: model.add(layers.Conv2D(filters, (3, 3), padding='same')) model.add(layers.Conv2D(filters, (3, 3), padding='same')) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(alpha=0.3)) model.add(layers.MaxPooling2D()) model.add(layers.Flatten()) model.add(layers.Dense(256)) model.add(layers.LeakyReLU(alpha=0.3)) model.add(layers.Dense(4, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[tf.keras.metrics.AUC()]) print(model.summary()) n_epochs = 10 bs = 256 aug = imagedatagenerator(rotation_range=10, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, fill_mode= 'nearest') train_generator = aug.flow(trainx, trainy, batch_size=bs) conv_hist = model.fit(train_generator, steps_per_epoch=len( trainx) // bs, epochs=n_epochs, verbose=1, class_weight= self.class_percentages) model.save(model_file) print(f'Writing out {model_file}') else: model = tf.keras.models.load_model(model_file) print(f'\nOpening {model_file}\n') del trainY, trainX threshold_file = (self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5') if os.path.exists(threshold_file): del validX, validY return self.validate_CNN(model, validX, validY, threshold_file) return def validate_CNN(self, model, validX, validY, threshold_file): print() cnn_preds = model.predict(validX) sev_hail = cnn_preds[:, 2] sig_hail = cnn_preds[:, 3] sev_prob_preds = sev_hail + sig_hail print('Max probability', np.nanmax(sev_prob_preds)) true_preds = np.where(validY >= 2, 1, 0) del validX, validY df_best_score = pd.DataFrame(np.zeros((1, 1)), columns=[ 'Size Threshold']) auc_score = [] thresholds = np.arange(0.1, 1.01, 0.02) for t in thresholds: threshold_preds = np.where(sev_prob_preds >= t, 1, 0) auc_score.append(roc_auc_score(true_preds, threshold_preds)) print(auc_score) df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)] print(df_best_score) df_best_score.to_csv(threshold_file) print(f'Writing out {threshold_file}') return def predict_model(self, member, patch_map_conversion_indices, total_map_shape, subset_map_shape, date, patch_radius, forecast_grid_path, lon_grid, lat_grid): """ Function that opens a pre-trained convolutional neural net (cnn). and predicts hail probability forecasts for a single ensemble member. Args: Right now only includes severe hail prediction, not sig-severe """ tf.keras.backend.clear_session() model_file = (self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5') DL_model = tf.keras.models.load_model(model_file, compile=False) if self.model_type == 'CNN': threshold_file = (self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5') if not os.path.exists(threshold_file): print('No thresholds found') return prob_thresh = 0 print(prob_thresh) total_count = 0 forecast_data = self.dldataeng.read_files('forecast', member, date, [None], [None]) if forecast_data is None: print('No forecast data found') return standard_forecast_data = np.array([self.dldataeng.standardize_data( member, forecast_data[hour]) for hour in np.arange( forecast_data.shape[0])]) del forecast_data total_grid = np.empty((standard_forecast_data.shape[0], total_map_shape[0] * total_map_shape[1])) * np.nan for hour in np.arange(standard_forecast_data.shape[0]): print(hour) DL_prediction = np.array(DL_model.predict( standard_forecast_data[hour])) if self.model_type == 'CNN': severe_proba_indices = np.where(cnn_preds[:, 2] + cnn_preds [:, 3] >= prob_thresh)[0] severe_patches = np.zeros(subset_map_shape) if len(severe_proba_indices) < 1: continue severe_patches[severe_proba_indices] = np.full(( patch_radius, patch_radius), 1) total_grid[hour, map_conversion_inds] = severe_patches.ravel() print(hour, len(severe_proba_indices), np.nanmax(cnn_preds[ :, 2] + cnn_preds[:, 3])) total_count += len(severe_proba_indices) print('Total severe probs:', total_count) print() elif 'UNET' in self.model_type: for patch in np.arange(standard_forecast_data.shape[1]): patch_indices = patch_map_conversion_indices[patch] overlap_pt = 4 if DL_prediction.ndim > 4: hourly_patch_data = DL_prediction[-1, patch, overlap_pt:-overlap_pt, overlap_pt:-overlap_pt, 0 ].ravel() else: hourly_patch_data = DL_prediction[patch, overlap_pt :-overlap_pt, overlap_pt:-overlap_pt, 0].ravel() total_grid[hour, patch_indices] = hourly_patch_data del DL_prediction del standard_forecast_data output_data = total_grid.reshape((total_grid.shape[0],) + total_map_shape) date_outpath = forecast_grid_path + f'{date[0][:-5]}/' if not os.path.exists(date_outpath): os.makedirs(date_outpath) gridded_out_file = (date_outpath + f'{member}_{date[0]}_forecast_grid.h5') print(f'Writing out {gridded_out_file}') with h5py.File(gridded_out_file, 'w') as hf: hf.create_dataset('data', data=output_data, compression='gzip', compression_opts=6) return def dice_loss(y_true, y_pred): y_true = tf.cast(y_true, tf.float32) y_pred = tf.math.sigmoid(y_pred) numerator = 2 * tf.reduce_sum(y_true * y_pred) denominator = tf.reduce_sum(y_true + y_pred) return 1 - numerator / denominator <|reserved_special_token_0|> def down_block(x, filters, kernel_size=(3, 3)): c = layers.Conv2D(filters, kernel_size, padding='same')(x) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) c = layers.Conv2D(filters, kernel_size, padding='same')(c) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) p = layers.MaxPooling2D((2, 2))(c) return c, p def up_block(x, skip, filters, kernel_size=(3, 3)): up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x) concat = layers.Concatenate()([up, skip]) c = layers.Conv2D(filters, kernel_size, padding='same')(concat) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) c = layers.Conv2D(filters, kernel_size, padding='same')(c) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) return c def bottleneck(x, filters, kernel_size=(3, 3)): c = layers.Conv2D(filters, kernel_size, padding='same')(x) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) c = layers.Conv2D(filters, kernel_size, padding='same')(c) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) return c <|reserved_special_token_1|> from processing.DLDataEngineering import DLDataEngineering from sklearn.preprocessing import OneHotEncoder import pandas as pd import numpy as np import h5py import os from scipy.ndimage import gaussian_filter #Deep learning packages import tensorflow as tf #from tensorflow import keras from tensorflow.keras.layers import Input, Conv2D, Dropout, Activation, UpSampling2D, GlobalMaxPooling2D, multiply from tensorflow.keras.backend import max from tensorflow.keras.preprocessing.image import ImageDataGenerator #from tensorflow import keras from sklearn.metrics import f1_score,roc_auc_score import matplotlib.pyplot as plt import cartopy.feature as cf import cartopy.crs as ccrs import cartopy from keras_unet_collection import models, base, utils class DLModeler(object): def __init__(self,model_path,hf_path,num_examples, class_percentages,predictors,model_args, model_type): self.model_path = model_path self.hf_path = hf_path self.num_examples = num_examples self.class_percentages = class_percentages self.model_args = model_args self.model_type = model_type long_predictors = [] #Shorten predictor names for predictor in predictors: if "_" in predictor: predictor_name = predictor.split('_')[0].upper() + predictor.split('_')[-1] elif " " in predictor: predictor_name = ''.join([v[0].upper() for v in predictor.split()]) else: predictor_name = predictor long_predictors.append(predictor_name) self.predictors = np.array(long_predictors) #Class to read data and standardize self.dldataeng = DLDataEngineering(self.model_path,self.hf_path, self.num_examples,self.class_percentages,self.predictors, self.model_args) return def train_models(self,member,train_dates,valid_dates): """ Function that reads and extracts pre-processed 2d member data from an ensemble to train a convolutional neural net (cnn) or UNET. The model data is standardized before being input to the cnn, with the observation data in the shape (# examples, # classes). Args: member (str): ensemble member data that trains a DL model """ train_data, train_label = self.dldataeng.extract_training_data(member, train_dates,self.model_type) #valid_data, valid_label = self.dldataeng.extract_validation_data(member,valid_dates,self.model_type) valid_data, valid_label = [],[] if self.model_type == 'CNN': onehot_encoder = OneHotEncoder(sparse=False,categories='auto') encoded_label = onehot_encoder.fit_transform(train_label.reshape(-1, 1)) self.train_CNN(member,train_data,encoded_label,valid_data,valid_label) elif 'UNET' in self.model_type: #train_label[train_label >= 50.] = 50. #log_train_label = np.log((train_label+1.0)) self.train_UNET(member,train_data,train_label,valid_data,valid_label) return def train_UNET(self,member,trainX,trainY,validX,validY): model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5' ''' if os.path.exists(model_file): del trainX,trainY,validX,validY unet = tf.keras.models.load_model(model_file,compile=False) print(f'\nOpening {model_file}\n') #self.validate_UNET(model,validX,validY,threshold_file) return ''' print('\nTraining {0} models'.format(member)) print('Training data shape {0}'.format(np.shape(trainX))) print('Training label data shape {0}\n'.format(np.shape(trainY))) #print('Validation data shape {0}'.format(np.shape(validX))) #print('Validation label data shape {0}\n'.format(np.shape(validY))) model_obj_params = {'input_size':np.shape(trainX[0]),'n_labels':1, 'stack_num_down':2, 'stack_num_up':1, 'activation':'LeakyReLU', 'output_activation':'ReLU', 'batch_norm':False, 'pool':True, 'unpool':False, 'name':f'{self.model_type}'} if self.model_type == 'UNET': model_obj_params['filter_num'] = [16, 32, 64, 128]# 256] unet_model_obj = models.unet_2d compile_params = {'loss': 'mean_squared_error'} else: compile_params = {'loss': ['mean_squared_error', 'mean_squared_error','mean_squared_error', 'mean_squared_error','mean_squared_error'], 'loss_weights':[0.25, 0.25, 0.25, 0.25, 1.0]} if self.model_type == 'UNET2plus': plus_model_params = {'filter_num':[16, 32, 64, 128, 256], 'deep_supervision':True} model_obj_params.update(plus_model_params) unet_model_obj = models.unet_plus_2d elif self.model_type == 'UNET3plus': plus_model_params = {'filter_num_downi':[16, 32, 64, 128, 256], 'filter_num_skip':'auto', 'filter_num_aggregate':'auto', 'deep_supervision':True} model_obj_params.update(plus_model_params) unet_model_obj = models.unet_3plus_2d try: unet_model = unet_model_obj(**model_obj_params) except: print(f"{self.model_type} Model type not found.") return unet_model.compile(**compile_params,optimizer=tf.keras.optimizers.Adam(lr=1e-4)) print(unet_model.summary()) #Augment data aug = ImageDataGenerator( rotation_range=10,zoom_range=0.15, width_shift_range=0.2,height_shift_range=0.2, fill_mode="nearest") #Fit UNET n_epochs = 15 bs = 256 conv_hist = unet_model.fit( aug.flow(trainX,trainY,batch_size=bs), steps_per_epoch=len(trainX)/bs, epochs=n_epochs,verbose=1) ''' pred_s = trainX[0].reshape(1,input_shape[0], input_shape[1],input_shape[2]) prediction = unet.predict(pred_s)[0,:,:,:] print(prediction.shape) plt.imshow(prediction) plt.colorbar() plt.show() return ''' #Save trained model unet_model.save(model_file) print(f'Writing out {model_file}') #Clear graphs tf.keras.backend.clear_session() #self.validate_UNET(model,validX,validY,threshold_file) return def train_CNN(self,member,input_data): """ Function to train a convolutional neural net (CNN) for random training data and associated labels. Args: member (str): Ensemble member trainX (tuple): Tuple of (train data, train labels, validation data, validation labels) """ trainX,trainY,validX,validY = input_data print('\nTraining {0} models'.format(member)) print('Training data shape {0}'.format(np.shape(trainX))) print('Training label data shape {0}\n'.format(np.shape(trainY))) print('Validation data shape {0}'.format(np.shape(validX))) print('Validation label data shape {0}\n'.format(np.shape(validY))) model_file = self.model_path + f'/{member}_{self.model_args}_CNN_model.h5' print(model_file) if not os.path.exists(model_file): # Clear graphs tf.keras.backend.clear_session() #Initiliaze Convolutional Neural Net (CNN) model = models.Sequential() input_shape = np.shape(trainX[0]) #First layer: input shape (y,x,# variables) #Add noise model.add(layers.GaussianNoise(0.01, input_shape=(input_shape))) for filters in [32,64,128]: model.add(layers.Conv2D(filters, (3,3),padding='same')) model.add(layers.Conv2D(filters, (3,3),padding='same')) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(alpha=0.3)) model.add(layers.MaxPooling2D()) #Flatten the last convolutional layer model.add(layers.Flatten()) model.add(layers.Dense(256)) model.add(layers.LeakyReLU(alpha=0.3)) model.add(layers.Dense(4,activation='softmax')) #Compile neural net model.compile(optimizer='adam',loss='categorical_crossentropy', metrics=[tf.keras.metrics.AUC()]) print(model.summary()) #fit neural net n_epochs = 10 bs = 256 #augment data aug = imagedatagenerator( rotation_range=10,zoom_range=0.15, width_shift_range=0.2,height_shift_range=0.2, fill_mode="nearest") train_generator = aug.flow(trainx,trainy,batch_size=bs) conv_hist = model.fit( train_generator,steps_per_epoch=len(trainx) // bs, epochs=n_epochs,verbose=1,class_weight=self.class_percentages) #save trained model model.save(model_file) print(f'Writing out {model_file}') else: model = tf.keras.models.load_model(model_file) print(f'\nOpening {model_file}\n') del trainY,trainX threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5' if os.path.exists(threshold_file): del validX,validY return self.validate_CNN(model,validX,validY,threshold_file) return def validate_CNN(self,model,validX,validY,threshold_file): print() #Predict on validation data cnn_preds = model.predict(validX) sev_hail = cnn_preds[:,2] sig_hail = cnn_preds[:,3] #combine the severe hail and sig severe hail classes sev_prob_preds = sev_hail+sig_hail print('Max probability',np.nanmax(sev_prob_preds)) #classify labels as severe hail or no hail true_preds = np.where(validY >= 2, 1, 0) del validX, validY df_best_score = pd.DataFrame(np.zeros((1,1)),columns=['Size Threshold']) #Find threshold with the highest validation AUC score auc_score = [] thresholds = np.arange(0.1,1.01,0.02) for t in thresholds: threshold_preds = np.where(sev_prob_preds >= t,1,0) auc_score.append(roc_auc_score(true_preds, threshold_preds)) print(auc_score) #output threshold with highest AUC df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)] print(df_best_score) df_best_score.to_csv(threshold_file) print(f'Writing out {threshold_file}') return def predict_model(self,member,patch_map_conversion_indices, total_map_shape,subset_map_shape,date,patch_radius,forecast_grid_path,#): lon_grid,lat_grid): """ Function that opens a pre-trained convolutional neural net (cnn). and predicts hail probability forecasts for a single ensemble member. Args: Right now only includes severe hail prediction, not sig-severe """ ################## # Load in any saved DL model files ################## #Clear any saved DL graphs tf.keras.backend.clear_session() #Load DL model model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5' DL_model = tf.keras.models.load_model(model_file,compile=False) if self.model_type == 'CNN': #Use minimum prob threshold chosen with validation data threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5' if not os.path.exists(threshold_file): print('No thresholds found') return prob_thresh = 0 #pd.read_csv(threshold_file).loc[0,'size_threshold']+0.05 print(prob_thresh) total_count = 0 ################## #Extract forecast data (#hours, #patches, nx, ny, #variables) ################## forecast_data = self.dldataeng.read_files('forecast',member,date,[None],[None]) if forecast_data is None: print('No forecast data found') return ################## # Standardize hourly data ################## standard_forecast_data = np.array([self.dldataeng.standardize_data(member,forecast_data[hour]) for hour in np.arange(forecast_data.shape[0])]) del forecast_data ################## # Produce gridded hourly hail forecast ################## total_grid = np.empty( (standard_forecast_data.shape[0], total_map_shape[0]*total_map_shape[1]) )*np.nan for hour in np.arange(standard_forecast_data.shape[0]): print(hour) #Predict probability of severe hail DL_prediction = np.array(DL_model.predict(standard_forecast_data[hour])) ###### # Will need to fix CNN code to reflect the conversion inds are in #patches x (patch_radius*patch_radius) instead of (patches*radius*radius) ##### if self.model_type == 'CNN': severe_proba_indices = np.where( (cnn_preds[:,2]+cnn_preds[:,3]) >= prob_thresh)[0] severe_patches = np.zeros(subset_map_shape) #If no hourly severe hail predicted, continue if len(severe_proba_indices) <1 : continue severe_patches[severe_proba_indices] = np.full((patch_radius,patch_radius), 1) total_grid[hour,map_conversion_inds] = severe_patches.ravel() print(hour,len(severe_proba_indices),np.nanmax((cnn_preds[:,2]+cnn_preds[:,3]))) total_count += len(severe_proba_indices) print('Total severe probs:',total_count) print() elif 'UNET' in self.model_type: for patch in np.arange(standard_forecast_data.shape[1]): patch_indices = patch_map_conversion_indices[patch] #Gets rid of overlapping edges overlap_pt = 4 # If unet3+ then the last output tensor is the correct one if DL_prediction.ndim > 4: hourly_patch_data = DL_prediction[-1,patch,overlap_pt:-overlap_pt, overlap_pt:-overlap_pt,0].ravel() else: hourly_patch_data = DL_prediction[patch,overlap_pt:-overlap_pt, overlap_pt:-overlap_pt,0].ravel() total_grid[hour,patch_indices] = hourly_patch_data del DL_prediction del standard_forecast_data output_data=total_grid.reshape((total_grid.shape[0],)+total_map_shape) date_outpath = forecast_grid_path + f'{date[0][:-5]}/' #Output gridded forecasts if not os.path.exists(date_outpath): os.makedirs(date_outpath) gridded_out_file = date_outpath + f'{member}_{date[0]}_forecast_grid.h5' print(f'Writing out {gridded_out_file}') with h5py.File(gridded_out_file, 'w') as hf: hf.create_dataset("data",data=output_data, compression='gzip',compression_opts=6) return def dice_loss(y_true, y_pred): y_true = tf.cast(y_true, tf.float32) y_pred = tf.math.sigmoid(y_pred) numerator = 2 * tf.reduce_sum(y_true * y_pred) denominator = tf.reduce_sum(y_true + y_pred) return 1 - numerator / denominator ''' From: https://idiotdeveloper.com/unet-segmentation-in-tensorflow/ ''' def down_block(x, filters, kernel_size=(3, 3)): c = layers.Conv2D(filters, kernel_size, padding='same')(x) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) c = layers.Conv2D(filters, kernel_size, padding='same')(c) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) p = layers.MaxPooling2D((2,2))(c) return c, p def up_block(x, skip, filters, kernel_size=(3, 3)): up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x) concat = layers.Concatenate()([up, skip]) c = layers.Conv2D(filters, kernel_size, padding='same')(concat) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) c = layers.Conv2D(filters, kernel_size, padding='same')(c) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) return c def bottleneck(x, filters, kernel_size=(3, 3)): c = layers.Conv2D(filters, kernel_size, padding='same')(x) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) c = layers.Conv2D(filters, kernel_size, padding='same')(c) c = layers.LeakyReLU(alpha=0.2)(c) c = layers.BatchNormalization()(c) return c
flexible
{ "blob_id": "a0a6bd5de39a7599f7872639cdf3a59b8cda5498", "index": 5230, "step-1": "<mask token>\n\n\nclass DLModeler(object):\n\n def __init__(self, model_path, hf_path, num_examples, class_percentages,\n predictors, model_args, model_type):\n self.model_path = model_path\n self.hf_path = hf_path\n self.num_examples = num_examples\n self.class_percentages = class_percentages\n self.model_args = model_args\n self.model_type = model_type\n long_predictors = []\n for predictor in predictors:\n if '_' in predictor:\n predictor_name = predictor.split('_')[0].upper(\n ) + predictor.split('_')[-1]\n elif ' ' in predictor:\n predictor_name = ''.join([v[0].upper() for v in predictor.\n split()])\n else:\n predictor_name = predictor\n long_predictors.append(predictor_name)\n self.predictors = np.array(long_predictors)\n self.dldataeng = DLDataEngineering(self.model_path, self.hf_path,\n self.num_examples, self.class_percentages, self.predictors,\n self.model_args)\n return\n\n def train_models(self, member, train_dates, valid_dates):\n \"\"\"\n Function that reads and extracts pre-processed 2d member data \n from an ensemble to train a convolutional neural net (cnn) or \n UNET. \n The model data is standardized before being input to the cnn, \n with the observation data in the shape (# examples, # classes). \n\n Args:\n member (str): ensemble member data that trains a DL model\n \"\"\"\n train_data, train_label = self.dldataeng.extract_training_data(member,\n train_dates, self.model_type)\n valid_data, valid_label = [], []\n if self.model_type == 'CNN':\n onehot_encoder = OneHotEncoder(sparse=False, categories='auto')\n encoded_label = onehot_encoder.fit_transform(train_label.\n reshape(-1, 1))\n self.train_CNN(member, train_data, encoded_label, valid_data,\n valid_label)\n elif 'UNET' in self.model_type:\n self.train_UNET(member, train_data, train_label, valid_data,\n valid_label)\n return\n\n def train_UNET(self, member, trainX, trainY, validX, validY):\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n \"\"\"\n if os.path.exists(model_file):\n del trainX,trainY,validX,validY\n unet = tf.keras.models.load_model(model_file,compile=False)\n print(f'\nOpening {model_file}\n')\n #self.validate_UNET(model,validX,validY,threshold_file)\n return \n \"\"\"\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n model_obj_params = {'input_size': np.shape(trainX[0]), 'n_labels': \n 1, 'stack_num_down': 2, 'stack_num_up': 1, 'activation':\n 'LeakyReLU', 'output_activation': 'ReLU', 'batch_norm': False,\n 'pool': True, 'unpool': False, 'name': f'{self.model_type}'}\n if self.model_type == 'UNET':\n model_obj_params['filter_num'] = [16, 32, 64, 128]\n unet_model_obj = models.unet_2d\n compile_params = {'loss': 'mean_squared_error'}\n else:\n compile_params = {'loss': ['mean_squared_error',\n 'mean_squared_error', 'mean_squared_error',\n 'mean_squared_error', 'mean_squared_error'], 'loss_weights':\n [0.25, 0.25, 0.25, 0.25, 1.0]}\n if self.model_type == 'UNET2plus':\n plus_model_params = {'filter_num': [16, 32, 64, 128, 256],\n 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_plus_2d\n elif self.model_type == 'UNET3plus':\n plus_model_params = {'filter_num_downi': [16, 32, 64, 128, \n 256], 'filter_num_skip': 'auto', 'filter_num_aggregate':\n 'auto', 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_3plus_2d\n try:\n unet_model = unet_model_obj(**model_obj_params)\n except:\n print(f'{self.model_type} Model type not found.')\n return\n unet_model.compile(**compile_params, optimizer=tf.keras.optimizers.\n Adam(lr=0.0001))\n print(unet_model.summary())\n aug = ImageDataGenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode='nearest')\n n_epochs = 15\n bs = 256\n conv_hist = unet_model.fit(aug.flow(trainX, trainY, batch_size=bs),\n steps_per_epoch=len(trainX) / bs, epochs=n_epochs, verbose=1)\n \"\"\"\n pred_s = trainX[0].reshape(1,input_shape[0],\n input_shape[1],input_shape[2])\n\n prediction = unet.predict(pred_s)[0,:,:,:]\n print(prediction.shape)\n plt.imshow(prediction)\n plt.colorbar()\n plt.show()\n return\n \"\"\"\n unet_model.save(model_file)\n print(f'Writing out {model_file}')\n tf.keras.backend.clear_session()\n return\n\n def train_CNN(self, member, input_data):\n \"\"\"\n Function to train a convolutional neural net (CNN) for random \n training data and associated labels.\n\n Args:\n member (str): Ensemble member \n trainX (tuple): Tuple of (train data, train labels, \n validation data, validation labels) \n \"\"\"\n trainX, trainY, validX, validY = input_data\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n print('Validation data shape {0}'.format(np.shape(validX)))\n print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model.h5')\n print(model_file)\n if not os.path.exists(model_file):\n tf.keras.backend.clear_session()\n model = models.Sequential()\n input_shape = np.shape(trainX[0])\n model.add(layers.GaussianNoise(0.01, input_shape=input_shape))\n for filters in [32, 64, 128]:\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.MaxPooling2D())\n model.add(layers.Flatten())\n model.add(layers.Dense(256))\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.Dense(4, activation='softmax'))\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=[tf.keras.metrics.AUC()])\n print(model.summary())\n n_epochs = 10\n bs = 256\n aug = imagedatagenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode=\n 'nearest')\n train_generator = aug.flow(trainx, trainy, batch_size=bs)\n conv_hist = model.fit(train_generator, steps_per_epoch=len(\n trainx) // bs, epochs=n_epochs, verbose=1, class_weight=\n self.class_percentages)\n model.save(model_file)\n print(f'Writing out {model_file}')\n else:\n model = tf.keras.models.load_model(model_file)\n print(f'\\nOpening {model_file}\\n')\n del trainY, trainX\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if os.path.exists(threshold_file):\n del validX, validY\n return\n self.validate_CNN(model, validX, validY, threshold_file)\n return\n\n def validate_CNN(self, model, validX, validY, threshold_file):\n print()\n cnn_preds = model.predict(validX)\n sev_hail = cnn_preds[:, 2]\n sig_hail = cnn_preds[:, 3]\n sev_prob_preds = sev_hail + sig_hail\n print('Max probability', np.nanmax(sev_prob_preds))\n true_preds = np.where(validY >= 2, 1, 0)\n del validX, validY\n df_best_score = pd.DataFrame(np.zeros((1, 1)), columns=[\n 'Size Threshold'])\n auc_score = []\n thresholds = np.arange(0.1, 1.01, 0.02)\n for t in thresholds:\n threshold_preds = np.where(sev_prob_preds >= t, 1, 0)\n auc_score.append(roc_auc_score(true_preds, threshold_preds))\n print(auc_score)\n df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]\n print(df_best_score)\n df_best_score.to_csv(threshold_file)\n print(f'Writing out {threshold_file}')\n return\n\n def predict_model(self, member, patch_map_conversion_indices,\n total_map_shape, subset_map_shape, date, patch_radius,\n forecast_grid_path, lon_grid, lat_grid):\n \"\"\"\n Function that opens a pre-trained convolutional neural net (cnn). \n and predicts hail probability forecasts for a single ensemble member.\n \n Args:\n Right now only includes severe hail prediction, not sig-severe\n \"\"\"\n tf.keras.backend.clear_session()\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n DL_model = tf.keras.models.load_model(model_file, compile=False)\n if self.model_type == 'CNN':\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if not os.path.exists(threshold_file):\n print('No thresholds found')\n return\n prob_thresh = 0\n print(prob_thresh)\n total_count = 0\n forecast_data = self.dldataeng.read_files('forecast', member, date,\n [None], [None])\n if forecast_data is None:\n print('No forecast data found')\n return\n standard_forecast_data = np.array([self.dldataeng.standardize_data(\n member, forecast_data[hour]) for hour in np.arange(\n forecast_data.shape[0])])\n del forecast_data\n total_grid = np.empty((standard_forecast_data.shape[0], \n total_map_shape[0] * total_map_shape[1])) * np.nan\n for hour in np.arange(standard_forecast_data.shape[0]):\n print(hour)\n DL_prediction = np.array(DL_model.predict(\n standard_forecast_data[hour]))\n if self.model_type == 'CNN':\n severe_proba_indices = np.where(cnn_preds[:, 2] + cnn_preds\n [:, 3] >= prob_thresh)[0]\n severe_patches = np.zeros(subset_map_shape)\n if len(severe_proba_indices) < 1:\n continue\n severe_patches[severe_proba_indices] = np.full((\n patch_radius, patch_radius), 1)\n total_grid[hour, map_conversion_inds] = severe_patches.ravel()\n print(hour, len(severe_proba_indices), np.nanmax(cnn_preds[\n :, 2] + cnn_preds[:, 3]))\n total_count += len(severe_proba_indices)\n print('Total severe probs:', total_count)\n print()\n elif 'UNET' in self.model_type:\n for patch in np.arange(standard_forecast_data.shape[1]):\n patch_indices = patch_map_conversion_indices[patch]\n overlap_pt = 4\n if DL_prediction.ndim > 4:\n hourly_patch_data = DL_prediction[-1, patch,\n overlap_pt:-overlap_pt, overlap_pt:-overlap_pt, 0\n ].ravel()\n else:\n hourly_patch_data = DL_prediction[patch, overlap_pt\n :-overlap_pt, overlap_pt:-overlap_pt, 0].ravel()\n total_grid[hour, patch_indices] = hourly_patch_data\n del DL_prediction\n del standard_forecast_data\n output_data = total_grid.reshape((total_grid.shape[0],) +\n total_map_shape)\n date_outpath = forecast_grid_path + f'{date[0][:-5]}/'\n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n gridded_out_file = (date_outpath +\n f'{member}_{date[0]}_forecast_grid.h5')\n print(f'Writing out {gridded_out_file}')\n with h5py.File(gridded_out_file, 'w') as hf:\n hf.create_dataset('data', data=output_data, compression='gzip',\n compression_opts=6)\n return\n\n\n<mask token>\n\n\ndef down_block(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n p = layers.MaxPooling2D((2, 2))(c)\n return c, p\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass DLModeler(object):\n\n def __init__(self, model_path, hf_path, num_examples, class_percentages,\n predictors, model_args, model_type):\n self.model_path = model_path\n self.hf_path = hf_path\n self.num_examples = num_examples\n self.class_percentages = class_percentages\n self.model_args = model_args\n self.model_type = model_type\n long_predictors = []\n for predictor in predictors:\n if '_' in predictor:\n predictor_name = predictor.split('_')[0].upper(\n ) + predictor.split('_')[-1]\n elif ' ' in predictor:\n predictor_name = ''.join([v[0].upper() for v in predictor.\n split()])\n else:\n predictor_name = predictor\n long_predictors.append(predictor_name)\n self.predictors = np.array(long_predictors)\n self.dldataeng = DLDataEngineering(self.model_path, self.hf_path,\n self.num_examples, self.class_percentages, self.predictors,\n self.model_args)\n return\n\n def train_models(self, member, train_dates, valid_dates):\n \"\"\"\n Function that reads and extracts pre-processed 2d member data \n from an ensemble to train a convolutional neural net (cnn) or \n UNET. \n The model data is standardized before being input to the cnn, \n with the observation data in the shape (# examples, # classes). \n\n Args:\n member (str): ensemble member data that trains a DL model\n \"\"\"\n train_data, train_label = self.dldataeng.extract_training_data(member,\n train_dates, self.model_type)\n valid_data, valid_label = [], []\n if self.model_type == 'CNN':\n onehot_encoder = OneHotEncoder(sparse=False, categories='auto')\n encoded_label = onehot_encoder.fit_transform(train_label.\n reshape(-1, 1))\n self.train_CNN(member, train_data, encoded_label, valid_data,\n valid_label)\n elif 'UNET' in self.model_type:\n self.train_UNET(member, train_data, train_label, valid_data,\n valid_label)\n return\n\n def train_UNET(self, member, trainX, trainY, validX, validY):\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n \"\"\"\n if os.path.exists(model_file):\n del trainX,trainY,validX,validY\n unet = tf.keras.models.load_model(model_file,compile=False)\n print(f'\nOpening {model_file}\n')\n #self.validate_UNET(model,validX,validY,threshold_file)\n return \n \"\"\"\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n model_obj_params = {'input_size': np.shape(trainX[0]), 'n_labels': \n 1, 'stack_num_down': 2, 'stack_num_up': 1, 'activation':\n 'LeakyReLU', 'output_activation': 'ReLU', 'batch_norm': False,\n 'pool': True, 'unpool': False, 'name': f'{self.model_type}'}\n if self.model_type == 'UNET':\n model_obj_params['filter_num'] = [16, 32, 64, 128]\n unet_model_obj = models.unet_2d\n compile_params = {'loss': 'mean_squared_error'}\n else:\n compile_params = {'loss': ['mean_squared_error',\n 'mean_squared_error', 'mean_squared_error',\n 'mean_squared_error', 'mean_squared_error'], 'loss_weights':\n [0.25, 0.25, 0.25, 0.25, 1.0]}\n if self.model_type == 'UNET2plus':\n plus_model_params = {'filter_num': [16, 32, 64, 128, 256],\n 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_plus_2d\n elif self.model_type == 'UNET3plus':\n plus_model_params = {'filter_num_downi': [16, 32, 64, 128, \n 256], 'filter_num_skip': 'auto', 'filter_num_aggregate':\n 'auto', 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_3plus_2d\n try:\n unet_model = unet_model_obj(**model_obj_params)\n except:\n print(f'{self.model_type} Model type not found.')\n return\n unet_model.compile(**compile_params, optimizer=tf.keras.optimizers.\n Adam(lr=0.0001))\n print(unet_model.summary())\n aug = ImageDataGenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode='nearest')\n n_epochs = 15\n bs = 256\n conv_hist = unet_model.fit(aug.flow(trainX, trainY, batch_size=bs),\n steps_per_epoch=len(trainX) / bs, epochs=n_epochs, verbose=1)\n \"\"\"\n pred_s = trainX[0].reshape(1,input_shape[0],\n input_shape[1],input_shape[2])\n\n prediction = unet.predict(pred_s)[0,:,:,:]\n print(prediction.shape)\n plt.imshow(prediction)\n plt.colorbar()\n plt.show()\n return\n \"\"\"\n unet_model.save(model_file)\n print(f'Writing out {model_file}')\n tf.keras.backend.clear_session()\n return\n\n def train_CNN(self, member, input_data):\n \"\"\"\n Function to train a convolutional neural net (CNN) for random \n training data and associated labels.\n\n Args:\n member (str): Ensemble member \n trainX (tuple): Tuple of (train data, train labels, \n validation data, validation labels) \n \"\"\"\n trainX, trainY, validX, validY = input_data\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n print('Validation data shape {0}'.format(np.shape(validX)))\n print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model.h5')\n print(model_file)\n if not os.path.exists(model_file):\n tf.keras.backend.clear_session()\n model = models.Sequential()\n input_shape = np.shape(trainX[0])\n model.add(layers.GaussianNoise(0.01, input_shape=input_shape))\n for filters in [32, 64, 128]:\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.MaxPooling2D())\n model.add(layers.Flatten())\n model.add(layers.Dense(256))\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.Dense(4, activation='softmax'))\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=[tf.keras.metrics.AUC()])\n print(model.summary())\n n_epochs = 10\n bs = 256\n aug = imagedatagenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode=\n 'nearest')\n train_generator = aug.flow(trainx, trainy, batch_size=bs)\n conv_hist = model.fit(train_generator, steps_per_epoch=len(\n trainx) // bs, epochs=n_epochs, verbose=1, class_weight=\n self.class_percentages)\n model.save(model_file)\n print(f'Writing out {model_file}')\n else:\n model = tf.keras.models.load_model(model_file)\n print(f'\\nOpening {model_file}\\n')\n del trainY, trainX\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if os.path.exists(threshold_file):\n del validX, validY\n return\n self.validate_CNN(model, validX, validY, threshold_file)\n return\n\n def validate_CNN(self, model, validX, validY, threshold_file):\n print()\n cnn_preds = model.predict(validX)\n sev_hail = cnn_preds[:, 2]\n sig_hail = cnn_preds[:, 3]\n sev_prob_preds = sev_hail + sig_hail\n print('Max probability', np.nanmax(sev_prob_preds))\n true_preds = np.where(validY >= 2, 1, 0)\n del validX, validY\n df_best_score = pd.DataFrame(np.zeros((1, 1)), columns=[\n 'Size Threshold'])\n auc_score = []\n thresholds = np.arange(0.1, 1.01, 0.02)\n for t in thresholds:\n threshold_preds = np.where(sev_prob_preds >= t, 1, 0)\n auc_score.append(roc_auc_score(true_preds, threshold_preds))\n print(auc_score)\n df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]\n print(df_best_score)\n df_best_score.to_csv(threshold_file)\n print(f'Writing out {threshold_file}')\n return\n\n def predict_model(self, member, patch_map_conversion_indices,\n total_map_shape, subset_map_shape, date, patch_radius,\n forecast_grid_path, lon_grid, lat_grid):\n \"\"\"\n Function that opens a pre-trained convolutional neural net (cnn). \n and predicts hail probability forecasts for a single ensemble member.\n \n Args:\n Right now only includes severe hail prediction, not sig-severe\n \"\"\"\n tf.keras.backend.clear_session()\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n DL_model = tf.keras.models.load_model(model_file, compile=False)\n if self.model_type == 'CNN':\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if not os.path.exists(threshold_file):\n print('No thresholds found')\n return\n prob_thresh = 0\n print(prob_thresh)\n total_count = 0\n forecast_data = self.dldataeng.read_files('forecast', member, date,\n [None], [None])\n if forecast_data is None:\n print('No forecast data found')\n return\n standard_forecast_data = np.array([self.dldataeng.standardize_data(\n member, forecast_data[hour]) for hour in np.arange(\n forecast_data.shape[0])])\n del forecast_data\n total_grid = np.empty((standard_forecast_data.shape[0], \n total_map_shape[0] * total_map_shape[1])) * np.nan\n for hour in np.arange(standard_forecast_data.shape[0]):\n print(hour)\n DL_prediction = np.array(DL_model.predict(\n standard_forecast_data[hour]))\n if self.model_type == 'CNN':\n severe_proba_indices = np.where(cnn_preds[:, 2] + cnn_preds\n [:, 3] >= prob_thresh)[0]\n severe_patches = np.zeros(subset_map_shape)\n if len(severe_proba_indices) < 1:\n continue\n severe_patches[severe_proba_indices] = np.full((\n patch_radius, patch_radius), 1)\n total_grid[hour, map_conversion_inds] = severe_patches.ravel()\n print(hour, len(severe_proba_indices), np.nanmax(cnn_preds[\n :, 2] + cnn_preds[:, 3]))\n total_count += len(severe_proba_indices)\n print('Total severe probs:', total_count)\n print()\n elif 'UNET' in self.model_type:\n for patch in np.arange(standard_forecast_data.shape[1]):\n patch_indices = patch_map_conversion_indices[patch]\n overlap_pt = 4\n if DL_prediction.ndim > 4:\n hourly_patch_data = DL_prediction[-1, patch,\n overlap_pt:-overlap_pt, overlap_pt:-overlap_pt, 0\n ].ravel()\n else:\n hourly_patch_data = DL_prediction[patch, overlap_pt\n :-overlap_pt, overlap_pt:-overlap_pt, 0].ravel()\n total_grid[hour, patch_indices] = hourly_patch_data\n del DL_prediction\n del standard_forecast_data\n output_data = total_grid.reshape((total_grid.shape[0],) +\n total_map_shape)\n date_outpath = forecast_grid_path + f'{date[0][:-5]}/'\n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n gridded_out_file = (date_outpath +\n f'{member}_{date[0]}_forecast_grid.h5')\n print(f'Writing out {gridded_out_file}')\n with h5py.File(gridded_out_file, 'w') as hf:\n hf.create_dataset('data', data=output_data, compression='gzip',\n compression_opts=6)\n return\n\n\n<mask token>\n\n\ndef down_block(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n p = layers.MaxPooling2D((2, 2))(c)\n return c, p\n\n\n<mask token>\n\n\ndef bottleneck(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n return c\n", "step-3": "<mask token>\n\n\nclass DLModeler(object):\n\n def __init__(self, model_path, hf_path, num_examples, class_percentages,\n predictors, model_args, model_type):\n self.model_path = model_path\n self.hf_path = hf_path\n self.num_examples = num_examples\n self.class_percentages = class_percentages\n self.model_args = model_args\n self.model_type = model_type\n long_predictors = []\n for predictor in predictors:\n if '_' in predictor:\n predictor_name = predictor.split('_')[0].upper(\n ) + predictor.split('_')[-1]\n elif ' ' in predictor:\n predictor_name = ''.join([v[0].upper() for v in predictor.\n split()])\n else:\n predictor_name = predictor\n long_predictors.append(predictor_name)\n self.predictors = np.array(long_predictors)\n self.dldataeng = DLDataEngineering(self.model_path, self.hf_path,\n self.num_examples, self.class_percentages, self.predictors,\n self.model_args)\n return\n\n def train_models(self, member, train_dates, valid_dates):\n \"\"\"\n Function that reads and extracts pre-processed 2d member data \n from an ensemble to train a convolutional neural net (cnn) or \n UNET. \n The model data is standardized before being input to the cnn, \n with the observation data in the shape (# examples, # classes). \n\n Args:\n member (str): ensemble member data that trains a DL model\n \"\"\"\n train_data, train_label = self.dldataeng.extract_training_data(member,\n train_dates, self.model_type)\n valid_data, valid_label = [], []\n if self.model_type == 'CNN':\n onehot_encoder = OneHotEncoder(sparse=False, categories='auto')\n encoded_label = onehot_encoder.fit_transform(train_label.\n reshape(-1, 1))\n self.train_CNN(member, train_data, encoded_label, valid_data,\n valid_label)\n elif 'UNET' in self.model_type:\n self.train_UNET(member, train_data, train_label, valid_data,\n valid_label)\n return\n\n def train_UNET(self, member, trainX, trainY, validX, validY):\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n \"\"\"\n if os.path.exists(model_file):\n del trainX,trainY,validX,validY\n unet = tf.keras.models.load_model(model_file,compile=False)\n print(f'\nOpening {model_file}\n')\n #self.validate_UNET(model,validX,validY,threshold_file)\n return \n \"\"\"\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n model_obj_params = {'input_size': np.shape(trainX[0]), 'n_labels': \n 1, 'stack_num_down': 2, 'stack_num_up': 1, 'activation':\n 'LeakyReLU', 'output_activation': 'ReLU', 'batch_norm': False,\n 'pool': True, 'unpool': False, 'name': f'{self.model_type}'}\n if self.model_type == 'UNET':\n model_obj_params['filter_num'] = [16, 32, 64, 128]\n unet_model_obj = models.unet_2d\n compile_params = {'loss': 'mean_squared_error'}\n else:\n compile_params = {'loss': ['mean_squared_error',\n 'mean_squared_error', 'mean_squared_error',\n 'mean_squared_error', 'mean_squared_error'], 'loss_weights':\n [0.25, 0.25, 0.25, 0.25, 1.0]}\n if self.model_type == 'UNET2plus':\n plus_model_params = {'filter_num': [16, 32, 64, 128, 256],\n 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_plus_2d\n elif self.model_type == 'UNET3plus':\n plus_model_params = {'filter_num_downi': [16, 32, 64, 128, \n 256], 'filter_num_skip': 'auto', 'filter_num_aggregate':\n 'auto', 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_3plus_2d\n try:\n unet_model = unet_model_obj(**model_obj_params)\n except:\n print(f'{self.model_type} Model type not found.')\n return\n unet_model.compile(**compile_params, optimizer=tf.keras.optimizers.\n Adam(lr=0.0001))\n print(unet_model.summary())\n aug = ImageDataGenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode='nearest')\n n_epochs = 15\n bs = 256\n conv_hist = unet_model.fit(aug.flow(trainX, trainY, batch_size=bs),\n steps_per_epoch=len(trainX) / bs, epochs=n_epochs, verbose=1)\n \"\"\"\n pred_s = trainX[0].reshape(1,input_shape[0],\n input_shape[1],input_shape[2])\n\n prediction = unet.predict(pred_s)[0,:,:,:]\n print(prediction.shape)\n plt.imshow(prediction)\n plt.colorbar()\n plt.show()\n return\n \"\"\"\n unet_model.save(model_file)\n print(f'Writing out {model_file}')\n tf.keras.backend.clear_session()\n return\n\n def train_CNN(self, member, input_data):\n \"\"\"\n Function to train a convolutional neural net (CNN) for random \n training data and associated labels.\n\n Args:\n member (str): Ensemble member \n trainX (tuple): Tuple of (train data, train labels, \n validation data, validation labels) \n \"\"\"\n trainX, trainY, validX, validY = input_data\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n print('Validation data shape {0}'.format(np.shape(validX)))\n print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model.h5')\n print(model_file)\n if not os.path.exists(model_file):\n tf.keras.backend.clear_session()\n model = models.Sequential()\n input_shape = np.shape(trainX[0])\n model.add(layers.GaussianNoise(0.01, input_shape=input_shape))\n for filters in [32, 64, 128]:\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.MaxPooling2D())\n model.add(layers.Flatten())\n model.add(layers.Dense(256))\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.Dense(4, activation='softmax'))\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=[tf.keras.metrics.AUC()])\n print(model.summary())\n n_epochs = 10\n bs = 256\n aug = imagedatagenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode=\n 'nearest')\n train_generator = aug.flow(trainx, trainy, batch_size=bs)\n conv_hist = model.fit(train_generator, steps_per_epoch=len(\n trainx) // bs, epochs=n_epochs, verbose=1, class_weight=\n self.class_percentages)\n model.save(model_file)\n print(f'Writing out {model_file}')\n else:\n model = tf.keras.models.load_model(model_file)\n print(f'\\nOpening {model_file}\\n')\n del trainY, trainX\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if os.path.exists(threshold_file):\n del validX, validY\n return\n self.validate_CNN(model, validX, validY, threshold_file)\n return\n\n def validate_CNN(self, model, validX, validY, threshold_file):\n print()\n cnn_preds = model.predict(validX)\n sev_hail = cnn_preds[:, 2]\n sig_hail = cnn_preds[:, 3]\n sev_prob_preds = sev_hail + sig_hail\n print('Max probability', np.nanmax(sev_prob_preds))\n true_preds = np.where(validY >= 2, 1, 0)\n del validX, validY\n df_best_score = pd.DataFrame(np.zeros((1, 1)), columns=[\n 'Size Threshold'])\n auc_score = []\n thresholds = np.arange(0.1, 1.01, 0.02)\n for t in thresholds:\n threshold_preds = np.where(sev_prob_preds >= t, 1, 0)\n auc_score.append(roc_auc_score(true_preds, threshold_preds))\n print(auc_score)\n df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]\n print(df_best_score)\n df_best_score.to_csv(threshold_file)\n print(f'Writing out {threshold_file}')\n return\n\n def predict_model(self, member, patch_map_conversion_indices,\n total_map_shape, subset_map_shape, date, patch_radius,\n forecast_grid_path, lon_grid, lat_grid):\n \"\"\"\n Function that opens a pre-trained convolutional neural net (cnn). \n and predicts hail probability forecasts for a single ensemble member.\n \n Args:\n Right now only includes severe hail prediction, not sig-severe\n \"\"\"\n tf.keras.backend.clear_session()\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n DL_model = tf.keras.models.load_model(model_file, compile=False)\n if self.model_type == 'CNN':\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if not os.path.exists(threshold_file):\n print('No thresholds found')\n return\n prob_thresh = 0\n print(prob_thresh)\n total_count = 0\n forecast_data = self.dldataeng.read_files('forecast', member, date,\n [None], [None])\n if forecast_data is None:\n print('No forecast data found')\n return\n standard_forecast_data = np.array([self.dldataeng.standardize_data(\n member, forecast_data[hour]) for hour in np.arange(\n forecast_data.shape[0])])\n del forecast_data\n total_grid = np.empty((standard_forecast_data.shape[0], \n total_map_shape[0] * total_map_shape[1])) * np.nan\n for hour in np.arange(standard_forecast_data.shape[0]):\n print(hour)\n DL_prediction = np.array(DL_model.predict(\n standard_forecast_data[hour]))\n if self.model_type == 'CNN':\n severe_proba_indices = np.where(cnn_preds[:, 2] + cnn_preds\n [:, 3] >= prob_thresh)[0]\n severe_patches = np.zeros(subset_map_shape)\n if len(severe_proba_indices) < 1:\n continue\n severe_patches[severe_proba_indices] = np.full((\n patch_radius, patch_radius), 1)\n total_grid[hour, map_conversion_inds] = severe_patches.ravel()\n print(hour, len(severe_proba_indices), np.nanmax(cnn_preds[\n :, 2] + cnn_preds[:, 3]))\n total_count += len(severe_proba_indices)\n print('Total severe probs:', total_count)\n print()\n elif 'UNET' in self.model_type:\n for patch in np.arange(standard_forecast_data.shape[1]):\n patch_indices = patch_map_conversion_indices[patch]\n overlap_pt = 4\n if DL_prediction.ndim > 4:\n hourly_patch_data = DL_prediction[-1, patch,\n overlap_pt:-overlap_pt, overlap_pt:-overlap_pt, 0\n ].ravel()\n else:\n hourly_patch_data = DL_prediction[patch, overlap_pt\n :-overlap_pt, overlap_pt:-overlap_pt, 0].ravel()\n total_grid[hour, patch_indices] = hourly_patch_data\n del DL_prediction\n del standard_forecast_data\n output_data = total_grid.reshape((total_grid.shape[0],) +\n total_map_shape)\n date_outpath = forecast_grid_path + f'{date[0][:-5]}/'\n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n gridded_out_file = (date_outpath +\n f'{member}_{date[0]}_forecast_grid.h5')\n print(f'Writing out {gridded_out_file}')\n with h5py.File(gridded_out_file, 'w') as hf:\n hf.create_dataset('data', data=output_data, compression='gzip',\n compression_opts=6)\n return\n\n\n<mask token>\n\n\ndef down_block(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n p = layers.MaxPooling2D((2, 2))(c)\n return c, p\n\n\ndef up_block(x, skip, filters, kernel_size=(3, 3)):\n up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x)\n concat = layers.Concatenate()([up, skip])\n c = layers.Conv2D(filters, kernel_size, padding='same')(concat)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n return c\n\n\ndef bottleneck(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n return c\n", "step-4": "from processing.DLDataEngineering import DLDataEngineering\nfrom sklearn.preprocessing import OneHotEncoder\nimport pandas as pd\nimport numpy as np\nimport h5py\nimport os\nfrom scipy.ndimage import gaussian_filter\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Conv2D, Dropout, Activation, UpSampling2D, GlobalMaxPooling2D, multiply\nfrom tensorflow.keras.backend import max\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.metrics import f1_score, roc_auc_score\nimport matplotlib.pyplot as plt\nimport cartopy.feature as cf\nimport cartopy.crs as ccrs\nimport cartopy\nfrom keras_unet_collection import models, base, utils\n\n\nclass DLModeler(object):\n\n def __init__(self, model_path, hf_path, num_examples, class_percentages,\n predictors, model_args, model_type):\n self.model_path = model_path\n self.hf_path = hf_path\n self.num_examples = num_examples\n self.class_percentages = class_percentages\n self.model_args = model_args\n self.model_type = model_type\n long_predictors = []\n for predictor in predictors:\n if '_' in predictor:\n predictor_name = predictor.split('_')[0].upper(\n ) + predictor.split('_')[-1]\n elif ' ' in predictor:\n predictor_name = ''.join([v[0].upper() for v in predictor.\n split()])\n else:\n predictor_name = predictor\n long_predictors.append(predictor_name)\n self.predictors = np.array(long_predictors)\n self.dldataeng = DLDataEngineering(self.model_path, self.hf_path,\n self.num_examples, self.class_percentages, self.predictors,\n self.model_args)\n return\n\n def train_models(self, member, train_dates, valid_dates):\n \"\"\"\n Function that reads and extracts pre-processed 2d member data \n from an ensemble to train a convolutional neural net (cnn) or \n UNET. \n The model data is standardized before being input to the cnn, \n with the observation data in the shape (# examples, # classes). \n\n Args:\n member (str): ensemble member data that trains a DL model\n \"\"\"\n train_data, train_label = self.dldataeng.extract_training_data(member,\n train_dates, self.model_type)\n valid_data, valid_label = [], []\n if self.model_type == 'CNN':\n onehot_encoder = OneHotEncoder(sparse=False, categories='auto')\n encoded_label = onehot_encoder.fit_transform(train_label.\n reshape(-1, 1))\n self.train_CNN(member, train_data, encoded_label, valid_data,\n valid_label)\n elif 'UNET' in self.model_type:\n self.train_UNET(member, train_data, train_label, valid_data,\n valid_label)\n return\n\n def train_UNET(self, member, trainX, trainY, validX, validY):\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n \"\"\"\n if os.path.exists(model_file):\n del trainX,trainY,validX,validY\n unet = tf.keras.models.load_model(model_file,compile=False)\n print(f'\nOpening {model_file}\n')\n #self.validate_UNET(model,validX,validY,threshold_file)\n return \n \"\"\"\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n model_obj_params = {'input_size': np.shape(trainX[0]), 'n_labels': \n 1, 'stack_num_down': 2, 'stack_num_up': 1, 'activation':\n 'LeakyReLU', 'output_activation': 'ReLU', 'batch_norm': False,\n 'pool': True, 'unpool': False, 'name': f'{self.model_type}'}\n if self.model_type == 'UNET':\n model_obj_params['filter_num'] = [16, 32, 64, 128]\n unet_model_obj = models.unet_2d\n compile_params = {'loss': 'mean_squared_error'}\n else:\n compile_params = {'loss': ['mean_squared_error',\n 'mean_squared_error', 'mean_squared_error',\n 'mean_squared_error', 'mean_squared_error'], 'loss_weights':\n [0.25, 0.25, 0.25, 0.25, 1.0]}\n if self.model_type == 'UNET2plus':\n plus_model_params = {'filter_num': [16, 32, 64, 128, 256],\n 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_plus_2d\n elif self.model_type == 'UNET3plus':\n plus_model_params = {'filter_num_downi': [16, 32, 64, 128, \n 256], 'filter_num_skip': 'auto', 'filter_num_aggregate':\n 'auto', 'deep_supervision': True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_3plus_2d\n try:\n unet_model = unet_model_obj(**model_obj_params)\n except:\n print(f'{self.model_type} Model type not found.')\n return\n unet_model.compile(**compile_params, optimizer=tf.keras.optimizers.\n Adam(lr=0.0001))\n print(unet_model.summary())\n aug = ImageDataGenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode='nearest')\n n_epochs = 15\n bs = 256\n conv_hist = unet_model.fit(aug.flow(trainX, trainY, batch_size=bs),\n steps_per_epoch=len(trainX) / bs, epochs=n_epochs, verbose=1)\n \"\"\"\n pred_s = trainX[0].reshape(1,input_shape[0],\n input_shape[1],input_shape[2])\n\n prediction = unet.predict(pred_s)[0,:,:,:]\n print(prediction.shape)\n plt.imshow(prediction)\n plt.colorbar()\n plt.show()\n return\n \"\"\"\n unet_model.save(model_file)\n print(f'Writing out {model_file}')\n tf.keras.backend.clear_session()\n return\n\n def train_CNN(self, member, input_data):\n \"\"\"\n Function to train a convolutional neural net (CNN) for random \n training data and associated labels.\n\n Args:\n member (str): Ensemble member \n trainX (tuple): Tuple of (train data, train labels, \n validation data, validation labels) \n \"\"\"\n trainX, trainY, validX, validY = input_data\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n print('Validation data shape {0}'.format(np.shape(validX)))\n print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model.h5')\n print(model_file)\n if not os.path.exists(model_file):\n tf.keras.backend.clear_session()\n model = models.Sequential()\n input_shape = np.shape(trainX[0])\n model.add(layers.GaussianNoise(0.01, input_shape=input_shape))\n for filters in [32, 64, 128]:\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.Conv2D(filters, (3, 3), padding='same'))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.MaxPooling2D())\n model.add(layers.Flatten())\n model.add(layers.Dense(256))\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.Dense(4, activation='softmax'))\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=[tf.keras.metrics.AUC()])\n print(model.summary())\n n_epochs = 10\n bs = 256\n aug = imagedatagenerator(rotation_range=10, zoom_range=0.15,\n width_shift_range=0.2, height_shift_range=0.2, fill_mode=\n 'nearest')\n train_generator = aug.flow(trainx, trainy, batch_size=bs)\n conv_hist = model.fit(train_generator, steps_per_epoch=len(\n trainx) // bs, epochs=n_epochs, verbose=1, class_weight=\n self.class_percentages)\n model.save(model_file)\n print(f'Writing out {model_file}')\n else:\n model = tf.keras.models.load_model(model_file)\n print(f'\\nOpening {model_file}\\n')\n del trainY, trainX\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if os.path.exists(threshold_file):\n del validX, validY\n return\n self.validate_CNN(model, validX, validY, threshold_file)\n return\n\n def validate_CNN(self, model, validX, validY, threshold_file):\n print()\n cnn_preds = model.predict(validX)\n sev_hail = cnn_preds[:, 2]\n sig_hail = cnn_preds[:, 3]\n sev_prob_preds = sev_hail + sig_hail\n print('Max probability', np.nanmax(sev_prob_preds))\n true_preds = np.where(validY >= 2, 1, 0)\n del validX, validY\n df_best_score = pd.DataFrame(np.zeros((1, 1)), columns=[\n 'Size Threshold'])\n auc_score = []\n thresholds = np.arange(0.1, 1.01, 0.02)\n for t in thresholds:\n threshold_preds = np.where(sev_prob_preds >= t, 1, 0)\n auc_score.append(roc_auc_score(true_preds, threshold_preds))\n print(auc_score)\n df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]\n print(df_best_score)\n df_best_score.to_csv(threshold_file)\n print(f'Writing out {threshold_file}')\n return\n\n def predict_model(self, member, patch_map_conversion_indices,\n total_map_shape, subset_map_shape, date, patch_radius,\n forecast_grid_path, lon_grid, lat_grid):\n \"\"\"\n Function that opens a pre-trained convolutional neural net (cnn). \n and predicts hail probability forecasts for a single ensemble member.\n \n Args:\n Right now only includes severe hail prediction, not sig-severe\n \"\"\"\n tf.keras.backend.clear_session()\n model_file = (self.model_path +\n f'/{member}_{self.model_args}_{self.model_type}.h5')\n DL_model = tf.keras.models.load_model(model_file, compile=False)\n if self.model_type == 'CNN':\n threshold_file = (self.model_path +\n f'/{member}_{self.model_args}_CNN_model_threshold.h5')\n if not os.path.exists(threshold_file):\n print('No thresholds found')\n return\n prob_thresh = 0\n print(prob_thresh)\n total_count = 0\n forecast_data = self.dldataeng.read_files('forecast', member, date,\n [None], [None])\n if forecast_data is None:\n print('No forecast data found')\n return\n standard_forecast_data = np.array([self.dldataeng.standardize_data(\n member, forecast_data[hour]) for hour in np.arange(\n forecast_data.shape[0])])\n del forecast_data\n total_grid = np.empty((standard_forecast_data.shape[0], \n total_map_shape[0] * total_map_shape[1])) * np.nan\n for hour in np.arange(standard_forecast_data.shape[0]):\n print(hour)\n DL_prediction = np.array(DL_model.predict(\n standard_forecast_data[hour]))\n if self.model_type == 'CNN':\n severe_proba_indices = np.where(cnn_preds[:, 2] + cnn_preds\n [:, 3] >= prob_thresh)[0]\n severe_patches = np.zeros(subset_map_shape)\n if len(severe_proba_indices) < 1:\n continue\n severe_patches[severe_proba_indices] = np.full((\n patch_radius, patch_radius), 1)\n total_grid[hour, map_conversion_inds] = severe_patches.ravel()\n print(hour, len(severe_proba_indices), np.nanmax(cnn_preds[\n :, 2] + cnn_preds[:, 3]))\n total_count += len(severe_proba_indices)\n print('Total severe probs:', total_count)\n print()\n elif 'UNET' in self.model_type:\n for patch in np.arange(standard_forecast_data.shape[1]):\n patch_indices = patch_map_conversion_indices[patch]\n overlap_pt = 4\n if DL_prediction.ndim > 4:\n hourly_patch_data = DL_prediction[-1, patch,\n overlap_pt:-overlap_pt, overlap_pt:-overlap_pt, 0\n ].ravel()\n else:\n hourly_patch_data = DL_prediction[patch, overlap_pt\n :-overlap_pt, overlap_pt:-overlap_pt, 0].ravel()\n total_grid[hour, patch_indices] = hourly_patch_data\n del DL_prediction\n del standard_forecast_data\n output_data = total_grid.reshape((total_grid.shape[0],) +\n total_map_shape)\n date_outpath = forecast_grid_path + f'{date[0][:-5]}/'\n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n gridded_out_file = (date_outpath +\n f'{member}_{date[0]}_forecast_grid.h5')\n print(f'Writing out {gridded_out_file}')\n with h5py.File(gridded_out_file, 'w') as hf:\n hf.create_dataset('data', data=output_data, compression='gzip',\n compression_opts=6)\n return\n\n\ndef dice_loss(y_true, y_pred):\n y_true = tf.cast(y_true, tf.float32)\n y_pred = tf.math.sigmoid(y_pred)\n numerator = 2 * tf.reduce_sum(y_true * y_pred)\n denominator = tf.reduce_sum(y_true + y_pred)\n return 1 - numerator / denominator\n\n\n<mask token>\n\n\ndef down_block(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n p = layers.MaxPooling2D((2, 2))(c)\n return c, p\n\n\ndef up_block(x, skip, filters, kernel_size=(3, 3)):\n up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x)\n concat = layers.Concatenate()([up, skip])\n c = layers.Conv2D(filters, kernel_size, padding='same')(concat)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n return c\n\n\ndef bottleneck(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n return c\n", "step-5": "from processing.DLDataEngineering import DLDataEngineering\nfrom sklearn.preprocessing import OneHotEncoder\nimport pandas as pd\nimport numpy as np\nimport h5py\nimport os\n\nfrom scipy.ndimage import gaussian_filter\n \n#Deep learning packages\nimport tensorflow as tf\n#from tensorflow import keras\nfrom tensorflow.keras.layers import Input, Conv2D, Dropout, Activation, UpSampling2D, GlobalMaxPooling2D, multiply\nfrom tensorflow.keras.backend import max\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n\n#from tensorflow import keras \nfrom sklearn.metrics import f1_score,roc_auc_score\n\nimport matplotlib.pyplot as plt\nimport cartopy.feature as cf \nimport cartopy.crs as ccrs\nimport cartopy\n\nfrom keras_unet_collection import models, base, utils\n\nclass DLModeler(object):\n def __init__(self,model_path,hf_path,num_examples,\n class_percentages,predictors,model_args,\n model_type):\n \n self.model_path = model_path\n self.hf_path = hf_path\n self.num_examples = num_examples\n self.class_percentages = class_percentages\n self.model_args = model_args \n self.model_type = model_type\n \n long_predictors = []\n #Shorten predictor names\n \n for predictor in predictors:\n if \"_\" in predictor: \n predictor_name = predictor.split('_')[0].upper() + predictor.split('_')[-1]\n elif \" \" in predictor: \n predictor_name = ''.join([v[0].upper() for v in predictor.split()])\n else: predictor_name = predictor\n long_predictors.append(predictor_name)\n \n self.predictors = np.array(long_predictors)\n \n #Class to read data and standardize\n self.dldataeng = DLDataEngineering(self.model_path,self.hf_path, \n self.num_examples,self.class_percentages,self.predictors,\n self.model_args)\n \n \n return\n \n\n def train_models(self,member,train_dates,valid_dates):\n \"\"\"\n Function that reads and extracts pre-processed 2d member data \n from an ensemble to train a convolutional neural net (cnn) or \n UNET. \n The model data is standardized before being input to the cnn, \n with the observation data in the shape (# examples, # classes). \n\n Args:\n member (str): ensemble member data that trains a DL model\n \"\"\"\n train_data, train_label = self.dldataeng.extract_training_data(member,\n train_dates,self.model_type)\n \n #valid_data, valid_label = self.dldataeng.extract_validation_data(member,valid_dates,self.model_type)\n valid_data, valid_label = [],[]\n \n if self.model_type == 'CNN':\n onehot_encoder = OneHotEncoder(sparse=False,categories='auto')\n encoded_label = onehot_encoder.fit_transform(train_label.reshape(-1, 1))\n self.train_CNN(member,train_data,encoded_label,valid_data,valid_label)\n\n elif 'UNET' in self.model_type:\n #train_label[train_label >= 50.] = 50. \n #log_train_label = np.log((train_label+1.0))\n self.train_UNET(member,train_data,train_label,valid_data,valid_label)\n \n return \n\n def train_UNET(self,member,trainX,trainY,validX,validY):\n \n model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'\n \n '''\n if os.path.exists(model_file):\n del trainX,trainY,validX,validY\n unet = tf.keras.models.load_model(model_file,compile=False)\n print(f'\\nOpening {model_file}\\n')\n #self.validate_UNET(model,validX,validY,threshold_file)\n return \n '''\n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n #print('Validation data shape {0}'.format(np.shape(validX)))\n #print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n \n model_obj_params = {'input_size':np.shape(trainX[0]),'n_labels':1, \n 'stack_num_down':2, 'stack_num_up':1, 'activation':'LeakyReLU', \n 'output_activation':'ReLU', 'batch_norm':False, 'pool':True, \n 'unpool':False, 'name':f'{self.model_type}'}\n \n if self.model_type == 'UNET':\n model_obj_params['filter_num'] = [16, 32, 64, 128]# 256]\n unet_model_obj = models.unet_2d\n compile_params = {'loss': 'mean_squared_error'}\n \n else:\n compile_params = {'loss': ['mean_squared_error',\n 'mean_squared_error','mean_squared_error',\n 'mean_squared_error','mean_squared_error'],\n 'loss_weights':[0.25, 0.25, 0.25, 0.25, 1.0]}\n if self.model_type == 'UNET2plus': \n plus_model_params = {'filter_num':[16, 32, 64, 128, 256],\n 'deep_supervision':True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_plus_2d\n\n elif self.model_type == 'UNET3plus': \n plus_model_params = {'filter_num_downi':[16, 32, 64, 128, 256],\n 'filter_num_skip':'auto', 'filter_num_aggregate':'auto',\n 'deep_supervision':True}\n model_obj_params.update(plus_model_params)\n unet_model_obj = models.unet_3plus_2d\n \n try: unet_model = unet_model_obj(**model_obj_params)\n except: \n print(f\"{self.model_type} Model type not found.\")\n return\n \n unet_model.compile(**compile_params,optimizer=tf.keras.optimizers.Adam(lr=1e-4))\n print(unet_model.summary())\n \n #Augment data\n aug = ImageDataGenerator(\n rotation_range=10,zoom_range=0.15,\n width_shift_range=0.2,height_shift_range=0.2,\n fill_mode=\"nearest\")\n #Fit UNET\n n_epochs = 15\n bs = 256\n \n conv_hist = unet_model.fit(\n aug.flow(trainX,trainY,batch_size=bs),\n steps_per_epoch=len(trainX)/bs,\n epochs=n_epochs,verbose=1) \n '''\n pred_s = trainX[0].reshape(1,input_shape[0],\n input_shape[1],input_shape[2])\n\n prediction = unet.predict(pred_s)[0,:,:,:]\n print(prediction.shape)\n plt.imshow(prediction)\n plt.colorbar()\n plt.show()\n return\n '''\n #Save trained model\n unet_model.save(model_file)\n print(f'Writing out {model_file}')\n \n #Clear graphs\n tf.keras.backend.clear_session()\n \n #self.validate_UNET(model,validX,validY,threshold_file)\n return \n \n \n def train_CNN(self,member,input_data): \n \"\"\"\n Function to train a convolutional neural net (CNN) for random \n training data and associated labels.\n\n Args:\n member (str): Ensemble member \n trainX (tuple): Tuple of (train data, train labels, \n validation data, validation labels) \n \"\"\"\n trainX,trainY,validX,validY = input_data\n \n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n print('Validation data shape {0}'.format(np.shape(validX)))\n print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n \n \n model_file = self.model_path + f'/{member}_{self.model_args}_CNN_model.h5'\n print(model_file)\n if not os.path.exists(model_file):\n # Clear graphs\n tf.keras.backend.clear_session()\n \n #Initiliaze Convolutional Neural Net (CNN)\n model = models.Sequential()\n input_shape = np.shape(trainX[0])\n \n #First layer: input shape (y,x,# variables) \n #Add noise\n model.add(layers.GaussianNoise(0.01, input_shape=(input_shape)))\n for filters in [32,64,128]:\n model.add(layers.Conv2D(filters, (3,3),padding='same'))\n model.add(layers.Conv2D(filters, (3,3),padding='same'))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.MaxPooling2D())\n \n #Flatten the last convolutional layer \n model.add(layers.Flatten())\n model.add(layers.Dense(256))\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.Dense(4,activation='softmax'))\n #Compile neural net\n model.compile(optimizer='adam',loss='categorical_crossentropy',\n metrics=[tf.keras.metrics.AUC()])\n print(model.summary())\n #fit neural net\n n_epochs = 10\n bs = 256\n\n #augment data\n aug = imagedatagenerator(\n rotation_range=10,zoom_range=0.15,\n width_shift_range=0.2,height_shift_range=0.2,\n fill_mode=\"nearest\")\n \n train_generator = aug.flow(trainx,trainy,batch_size=bs)\n conv_hist = model.fit(\n train_generator,steps_per_epoch=len(trainx) // bs,\n epochs=n_epochs,verbose=1,class_weight=self.class_percentages)\n #save trained model\n model.save(model_file)\n print(f'Writing out {model_file}')\n else:\n model = tf.keras.models.load_model(model_file)\n print(f'\\nOpening {model_file}\\n')\n\n del trainY,trainX\n \n threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'\n if os.path.exists(threshold_file): \n del validX,validY\n return\n \n self.validate_CNN(model,validX,validY,threshold_file)\n return \n\n def validate_CNN(self,model,validX,validY,threshold_file): \n print()\n #Predict on validation data\n cnn_preds = model.predict(validX)\n sev_hail = cnn_preds[:,2]\n sig_hail = cnn_preds[:,3]\n #combine the severe hail and sig severe hail classes\n sev_prob_preds = sev_hail+sig_hail\n print('Max probability',np.nanmax(sev_prob_preds))\n #classify labels as severe hail or no hail\n true_preds = np.where(validY >= 2, 1, 0)\n del validX, validY\n \n df_best_score = pd.DataFrame(np.zeros((1,1)),columns=['Size Threshold'])\n #Find threshold with the highest validation AUC score \n auc_score = []\n thresholds = np.arange(0.1,1.01,0.02)\n for t in thresholds:\n threshold_preds = np.where(sev_prob_preds >= t,1,0)\n auc_score.append(roc_auc_score(true_preds, threshold_preds))\n \n print(auc_score)\n #output threshold with highest AUC \n df_best_score['Size Threshold'] = thresholds[np.argmax(auc_score)]\n print(df_best_score)\n df_best_score.to_csv(threshold_file)\n print(f'Writing out {threshold_file}')\n return \n \n \n def predict_model(self,member,patch_map_conversion_indices,\n total_map_shape,subset_map_shape,date,patch_radius,forecast_grid_path,#):\n lon_grid,lat_grid):\n \"\"\"\n Function that opens a pre-trained convolutional neural net (cnn). \n and predicts hail probability forecasts for a single ensemble member.\n \n Args:\n Right now only includes severe hail prediction, not sig-severe\n \"\"\"\n \n ################## \n # Load in any saved DL model files\n ################## \n \n #Clear any saved DL graphs\n tf.keras.backend.clear_session()\n \n #Load DL model\n model_file = self.model_path + f'/{member}_{self.model_args}_{self.model_type}.h5'\n DL_model = tf.keras.models.load_model(model_file,compile=False) \n \n if self.model_type == 'CNN':\n #Use minimum prob threshold chosen with validation data\n threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'\n if not os.path.exists(threshold_file):\n print('No thresholds found')\n return \n prob_thresh = 0 #pd.read_csv(threshold_file).loc[0,'size_threshold']+0.05\n print(prob_thresh) \n total_count = 0\n \n ################## \n #Extract forecast data (#hours, #patches, nx, ny, #variables)\n ################## \n \n forecast_data = self.dldataeng.read_files('forecast',member,date,[None],[None])\n \n if forecast_data is None: \n print('No forecast data found')\n return\n \n ################## \n # Standardize hourly data\n ################## \n \n standard_forecast_data = np.array([self.dldataeng.standardize_data(member,forecast_data[hour]) \n for hour in np.arange(forecast_data.shape[0])])\n \n del forecast_data\n ################## \n # Produce gridded hourly hail forecast \n ################## \n\n total_grid = np.empty( (standard_forecast_data.shape[0],\n total_map_shape[0]*total_map_shape[1]) )*np.nan\n\n for hour in np.arange(standard_forecast_data.shape[0]):\n print(hour)\n #Predict probability of severe hail\n DL_prediction = np.array(DL_model.predict(standard_forecast_data[hour]))\n ######\n # Will need to fix CNN code to reflect the conversion inds are in \n #patches x (patch_radius*patch_radius) instead of (patches*radius*radius)\n #####\n if self.model_type == 'CNN':\n severe_proba_indices = np.where( (cnn_preds[:,2]+cnn_preds[:,3]) >= prob_thresh)[0]\n severe_patches = np.zeros(subset_map_shape)\n #If no hourly severe hail predicted, continue\n if len(severe_proba_indices) <1 : continue\n severe_patches[severe_proba_indices] = np.full((patch_radius,patch_radius), 1)\n total_grid[hour,map_conversion_inds] = severe_patches.ravel()\n print(hour,len(severe_proba_indices),np.nanmax((cnn_preds[:,2]+cnn_preds[:,3])))\n total_count += len(severe_proba_indices)\n print('Total severe probs:',total_count)\n print()\n elif 'UNET' in self.model_type:\n for patch in np.arange(standard_forecast_data.shape[1]):\n patch_indices = patch_map_conversion_indices[patch]\n #Gets rid of overlapping edges\n overlap_pt = 4\n # If unet3+ then the last output tensor is the correct one\n if DL_prediction.ndim > 4:\n hourly_patch_data = DL_prediction[-1,patch,overlap_pt:-overlap_pt,\n overlap_pt:-overlap_pt,0].ravel()\n else:\n hourly_patch_data = DL_prediction[patch,overlap_pt:-overlap_pt,\n overlap_pt:-overlap_pt,0].ravel()\n total_grid[hour,patch_indices] = hourly_patch_data\n del DL_prediction\n del standard_forecast_data\n output_data=total_grid.reshape((total_grid.shape[0],)+total_map_shape)\n \n date_outpath = forecast_grid_path + f'{date[0][:-5]}/'\n \n #Output gridded forecasts\n if not os.path.exists(date_outpath): os.makedirs(date_outpath)\n gridded_out_file = date_outpath + f'{member}_{date[0]}_forecast_grid.h5'\n print(f'Writing out {gridded_out_file}')\n with h5py.File(gridded_out_file, 'w') as hf: \n hf.create_dataset(\"data\",data=output_data,\n compression='gzip',compression_opts=6)\n \n return\n\ndef dice_loss(y_true, y_pred):\n y_true = tf.cast(y_true, tf.float32)\n y_pred = tf.math.sigmoid(y_pred)\n numerator = 2 * tf.reduce_sum(y_true * y_pred)\n denominator = tf.reduce_sum(y_true + y_pred)\n return 1 - numerator / denominator\n\n'''\nFrom: https://idiotdeveloper.com/unet-segmentation-in-tensorflow/\n''' \n\ndef down_block(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n p = layers.MaxPooling2D((2,2))(c)\n return c, p\n\ndef up_block(x, skip, filters, kernel_size=(3, 3)):\n up = layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(x)\n concat = layers.Concatenate()([up, skip])\n c = layers.Conv2D(filters, kernel_size, padding='same')(concat)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n return c\n\ndef bottleneck(x, filters, kernel_size=(3, 3)):\n c = layers.Conv2D(filters, kernel_size, padding='same')(x)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n c = layers.Conv2D(filters, kernel_size, padding='same')(c)\n c = layers.LeakyReLU(alpha=0.2)(c)\n c = layers.BatchNormalization()(c)\n return c\n", "step-ids": [ 8, 9, 10, 12, 13 ] }
[ 8, 9, 10, 12, 13 ]
from datetime import date def diff_in_date(first, second): value = str(second - first) if value.__contains__(','): generated_sum = value.split(',') return generated_sum[0] else: return value first_date = date(2014, 7, 2) second_date = date(2014, 7, 11) current_date = date.today() val = diff_in_date(first_date, second_date) print(val) newVal = diff_in_date(second_date, current_date) print(newVal)
normal
{ "blob_id": "9b6d30a40bafa0e9e4760843d6a2f750f0f88a57", "index": 6106, "step-1": "<mask token>\n\n\ndef diff_in_date(first, second):\n value = str(second - first)\n if value.__contains__(','):\n generated_sum = value.split(',')\n return generated_sum[0]\n else:\n return value\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef diff_in_date(first, second):\n value = str(second - first)\n if value.__contains__(','):\n generated_sum = value.split(',')\n return generated_sum[0]\n else:\n return value\n\n\n<mask token>\nprint(val)\n<mask token>\nprint(newVal)\n", "step-3": "<mask token>\n\n\ndef diff_in_date(first, second):\n value = str(second - first)\n if value.__contains__(','):\n generated_sum = value.split(',')\n return generated_sum[0]\n else:\n return value\n\n\nfirst_date = date(2014, 7, 2)\nsecond_date = date(2014, 7, 11)\ncurrent_date = date.today()\nval = diff_in_date(first_date, second_date)\nprint(val)\nnewVal = diff_in_date(second_date, current_date)\nprint(newVal)\n", "step-4": "from datetime import date\n\n\ndef diff_in_date(first, second):\n value = str(second - first)\n if value.__contains__(','):\n generated_sum = value.split(',')\n return generated_sum[0]\n else:\n return value\n\n\nfirst_date = date(2014, 7, 2)\nsecond_date = date(2014, 7, 11)\ncurrent_date = date.today()\nval = diff_in_date(first_date, second_date)\nprint(val)\nnewVal = diff_in_date(second_date, current_date)\nprint(newVal)\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
<|reserved_special_token_0|> class NormalizeImageDict(object): <|reserved_special_token_0|> <|reserved_special_token_0|> def __call__(self, sample): for key in self.image_keys: if self.normalizeRange: sample[key] /= 255.0 sample[key] = self.normalize(sample[key]) return sample <|reserved_special_token_1|> <|reserved_special_token_0|> class NormalizeImageDict(object): <|reserved_special_token_0|> def __init__(self, image_keys, normalizeRange=True): self.image_keys = image_keys self.normalizeRange = normalizeRange self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) def __call__(self, sample): for key in self.image_keys: if self.normalizeRange: sample[key] /= 255.0 sample[key] = self.normalize(sample[key]) return sample <|reserved_special_token_1|> <|reserved_special_token_0|> class NormalizeImageDict(object): """ Normalize image in dictionary normalize range is True, the image is divided by 255 """ def __init__(self, image_keys, normalizeRange=True): self.image_keys = image_keys self.normalizeRange = normalizeRange self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) def __call__(self, sample): for key in self.image_keys: if self.normalizeRange: sample[key] /= 255.0 sample[key] = self.normalize(sample[key]) return sample <|reserved_special_token_1|> import torch from torchvision import transforms from torch.autograd import Variable class NormalizeImageDict(object): """ Normalize image in dictionary normalize range is True, the image is divided by 255 """ def __init__(self, image_keys, normalizeRange=True): self.image_keys = image_keys self.normalizeRange = normalizeRange self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) def __call__(self, sample): for key in self.image_keys: if self.normalizeRange: sample[key] /= 255.0 sample[key] = self.normalize(sample[key]) return sample
flexible
{ "blob_id": "4293ad0b2a4a352d6bdc4b860448c4a3b14ca629", "index": 8648, "step-1": "<mask token>\n\n\nclass NormalizeImageDict(object):\n <mask token>\n <mask token>\n\n def __call__(self, sample):\n for key in self.image_keys:\n if self.normalizeRange:\n sample[key] /= 255.0\n sample[key] = self.normalize(sample[key])\n return sample\n", "step-2": "<mask token>\n\n\nclass NormalizeImageDict(object):\n <mask token>\n\n def __init__(self, image_keys, normalizeRange=True):\n self.image_keys = image_keys\n self.normalizeRange = normalizeRange\n self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n def __call__(self, sample):\n for key in self.image_keys:\n if self.normalizeRange:\n sample[key] /= 255.0\n sample[key] = self.normalize(sample[key])\n return sample\n", "step-3": "<mask token>\n\n\nclass NormalizeImageDict(object):\n \"\"\"\n Normalize image in dictionary\n normalize range is True, the image is divided by 255\n \"\"\"\n\n def __init__(self, image_keys, normalizeRange=True):\n self.image_keys = image_keys\n self.normalizeRange = normalizeRange\n self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n def __call__(self, sample):\n for key in self.image_keys:\n if self.normalizeRange:\n sample[key] /= 255.0\n sample[key] = self.normalize(sample[key])\n return sample\n", "step-4": "import torch\nfrom torchvision import transforms\nfrom torch.autograd import Variable\n\n\nclass NormalizeImageDict(object):\n \"\"\"\n Normalize image in dictionary\n normalize range is True, the image is divided by 255\n \"\"\"\n\n def __init__(self, image_keys, normalizeRange=True):\n self.image_keys = image_keys\n self.normalizeRange = normalizeRange\n self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n def __call__(self, sample):\n for key in self.image_keys:\n if self.normalizeRange:\n sample[key] /= 255.0\n sample[key] = self.normalize(sample[key])\n return sample\n", "step-5": null, "step-ids": [ 2, 3, 4, 5 ] }
[ 2, 3, 4, 5 ]
from torch.utils.data.sampler import Sampler import torch import random class SwitchingBatchSampler(Sampler): def __init__(self, data_source, batch_size, drop_last=False): self.data_source = data_source self.batch_size = batch_size self.drop_last = drop_last # Divide the indices into two indices groups self.data_len = len(self.data_source) count = 0 for i in range(self.data_len): if self.data_source.imgs[i][1] == 1: break else: count += 1 print("Total Images: %d [Class 0: %d, Class 1: %d]\n"%(self.data_len, count, (self.data_len-count))) self.first_size = count if random.uniform(0, 1) > 0.5: self.turn = 0 else: self.turn = 1 def __iter__(self): # Initialize both iters second_size = self.data_len - self.first_size self.first_iter = iter(torch.randperm(self.first_size)) self.second_iter = iter(torch.randperm(second_size) + self.first_size) # Counting variables i = 0 count_first = 0 # Counts how many imgs of first iter has been returned count_second = 0 # Counts second iter batch = [] # Until no data left, keep iterating while count_first+count_second < self.data_len: # Fill the batch if self.turn == 0: if count_first == self.first_size: self.turn = 1 if len(batch) > 0 and not self.drop_last: yield batch batch = [] else: batch.append(next(self.first_iter)) count_first += 1 i += 1 else: if count_second == (self.data_len-self.first_size): self.turn = 0 if len(batch) > 0 and not self.drop_last: yield batch batch = [] else: batch.append(next(self.second_iter)) count_second += 1 i += 1 # Yield the batch and switch the turn randomly if i != 0 and i % self.batch_size == 0: yield batch batch = [] if count_first != self.first_size and count_second != second_size and random.uniform(0, 1) > 0.5: self.turn = (self.turn + 1) % 2 # If drop_last is False, return the rest if len(batch) > 0 and not self.drop_last: yield batch def __len__(self): if self.drop_last: return (self.first_size // self.batch_size) + ((self.data_len - self.first_size) // self.batch_size) else: return ((self.first_size + self.batch_size - 1) // self.batch_size) + ((self.data_len - self.first_size + self.batch_size - 1) // self.batch_size)
normal
{ "blob_id": "6b7bc40ba842ff565e7141fb1d51def99d9ab96a", "index": 1124, "step-1": "<mask token>\n\n\nclass SwitchingBatchSampler(Sampler):\n <mask token>\n\n def __iter__(self):\n second_size = self.data_len - self.first_size\n self.first_iter = iter(torch.randperm(self.first_size))\n self.second_iter = iter(torch.randperm(second_size) + self.first_size)\n i = 0\n count_first = 0\n count_second = 0\n batch = []\n while count_first + count_second < self.data_len:\n if self.turn == 0:\n if count_first == self.first_size:\n self.turn = 1\n if len(batch) > 0 and not self.drop_last:\n yield batch\n batch = []\n else:\n batch.append(next(self.first_iter))\n count_first += 1\n i += 1\n elif count_second == self.data_len - self.first_size:\n self.turn = 0\n if len(batch) > 0 and not self.drop_last:\n yield batch\n batch = []\n else:\n batch.append(next(self.second_iter))\n count_second += 1\n i += 1\n if i != 0 and i % self.batch_size == 0:\n yield batch\n batch = []\n if (count_first != self.first_size and count_second !=\n second_size and random.uniform(0, 1) > 0.5):\n self.turn = (self.turn + 1) % 2\n if len(batch) > 0 and not self.drop_last:\n yield batch\n <mask token>\n", "step-2": "<mask token>\n\n\nclass SwitchingBatchSampler(Sampler):\n <mask token>\n\n def __iter__(self):\n second_size = self.data_len - self.first_size\n self.first_iter = iter(torch.randperm(self.first_size))\n self.second_iter = iter(torch.randperm(second_size) + self.first_size)\n i = 0\n count_first = 0\n count_second = 0\n batch = []\n while count_first + count_second < self.data_len:\n if self.turn == 0:\n if count_first == self.first_size:\n self.turn = 1\n if len(batch) > 0 and not self.drop_last:\n yield batch\n batch = []\n else:\n batch.append(next(self.first_iter))\n count_first += 1\n i += 1\n elif count_second == self.data_len - self.first_size:\n self.turn = 0\n if len(batch) > 0 and not self.drop_last:\n yield batch\n batch = []\n else:\n batch.append(next(self.second_iter))\n count_second += 1\n i += 1\n if i != 0 and i % self.batch_size == 0:\n yield batch\n batch = []\n if (count_first != self.first_size and count_second !=\n second_size and random.uniform(0, 1) > 0.5):\n self.turn = (self.turn + 1) % 2\n if len(batch) > 0 and not self.drop_last:\n yield batch\n\n def __len__(self):\n if self.drop_last:\n return self.first_size // self.batch_size\n +((self.data_len - self.first_size) // self.batch_size)\n else:\n return (self.first_size + self.batch_size - 1) // self.batch_size\n +((self.data_len - self.first_size + self.batch_size - 1) //\n self.batch_size)\n", "step-3": "<mask token>\n\n\nclass SwitchingBatchSampler(Sampler):\n\n def __init__(self, data_source, batch_size, drop_last=False):\n self.data_source = data_source\n self.batch_size = batch_size\n self.drop_last = drop_last\n self.data_len = len(self.data_source)\n count = 0\n for i in range(self.data_len):\n if self.data_source.imgs[i][1] == 1:\n break\n else:\n count += 1\n print('Total Images: %d [Class 0: %d, Class 1: %d]\\n' % (self.\n data_len, count, self.data_len - count))\n self.first_size = count\n if random.uniform(0, 1) > 0.5:\n self.turn = 0\n else:\n self.turn = 1\n\n def __iter__(self):\n second_size = self.data_len - self.first_size\n self.first_iter = iter(torch.randperm(self.first_size))\n self.second_iter = iter(torch.randperm(second_size) + self.first_size)\n i = 0\n count_first = 0\n count_second = 0\n batch = []\n while count_first + count_second < self.data_len:\n if self.turn == 0:\n if count_first == self.first_size:\n self.turn = 1\n if len(batch) > 0 and not self.drop_last:\n yield batch\n batch = []\n else:\n batch.append(next(self.first_iter))\n count_first += 1\n i += 1\n elif count_second == self.data_len - self.first_size:\n self.turn = 0\n if len(batch) > 0 and not self.drop_last:\n yield batch\n batch = []\n else:\n batch.append(next(self.second_iter))\n count_second += 1\n i += 1\n if i != 0 and i % self.batch_size == 0:\n yield batch\n batch = []\n if (count_first != self.first_size and count_second !=\n second_size and random.uniform(0, 1) > 0.5):\n self.turn = (self.turn + 1) % 2\n if len(batch) > 0 and not self.drop_last:\n yield batch\n\n def __len__(self):\n if self.drop_last:\n return self.first_size // self.batch_size\n +((self.data_len - self.first_size) // self.batch_size)\n else:\n return (self.first_size + self.batch_size - 1) // self.batch_size\n +((self.data_len - self.first_size + self.batch_size - 1) //\n self.batch_size)\n", "step-4": "from torch.utils.data.sampler import Sampler\nimport torch\nimport random\n\n\nclass SwitchingBatchSampler(Sampler):\n\n def __init__(self, data_source, batch_size, drop_last=False):\n self.data_source = data_source\n self.batch_size = batch_size\n self.drop_last = drop_last\n self.data_len = len(self.data_source)\n count = 0\n for i in range(self.data_len):\n if self.data_source.imgs[i][1] == 1:\n break\n else:\n count += 1\n print('Total Images: %d [Class 0: %d, Class 1: %d]\\n' % (self.\n data_len, count, self.data_len - count))\n self.first_size = count\n if random.uniform(0, 1) > 0.5:\n self.turn = 0\n else:\n self.turn = 1\n\n def __iter__(self):\n second_size = self.data_len - self.first_size\n self.first_iter = iter(torch.randperm(self.first_size))\n self.second_iter = iter(torch.randperm(second_size) + self.first_size)\n i = 0\n count_first = 0\n count_second = 0\n batch = []\n while count_first + count_second < self.data_len:\n if self.turn == 0:\n if count_first == self.first_size:\n self.turn = 1\n if len(batch) > 0 and not self.drop_last:\n yield batch\n batch = []\n else:\n batch.append(next(self.first_iter))\n count_first += 1\n i += 1\n elif count_second == self.data_len - self.first_size:\n self.turn = 0\n if len(batch) > 0 and not self.drop_last:\n yield batch\n batch = []\n else:\n batch.append(next(self.second_iter))\n count_second += 1\n i += 1\n if i != 0 and i % self.batch_size == 0:\n yield batch\n batch = []\n if (count_first != self.first_size and count_second !=\n second_size and random.uniform(0, 1) > 0.5):\n self.turn = (self.turn + 1) % 2\n if len(batch) > 0 and not self.drop_last:\n yield batch\n\n def __len__(self):\n if self.drop_last:\n return self.first_size // self.batch_size\n +((self.data_len - self.first_size) // self.batch_size)\n else:\n return (self.first_size + self.batch_size - 1) // self.batch_size\n +((self.data_len - self.first_size + self.batch_size - 1) //\n self.batch_size)\n", "step-5": "from torch.utils.data.sampler import Sampler\nimport torch\nimport random\n\nclass SwitchingBatchSampler(Sampler):\n\n\tdef __init__(self, data_source, batch_size, drop_last=False):\n\t\tself.data_source = data_source\n\t\tself.batch_size = batch_size\n\t\tself.drop_last = drop_last\n\n\t\t# Divide the indices into two indices groups\n\t\tself.data_len = len(self.data_source)\n\t\tcount = 0\n\t\tfor i in range(self.data_len):\n\t\t\tif self.data_source.imgs[i][1] == 1:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcount += 1\n\n\t\tprint(\"Total Images: %d [Class 0: %d, Class 1: %d]\\n\"%(self.data_len, count, (self.data_len-count)))\n\n\t\tself.first_size = count\n\n\t\tif random.uniform(0, 1) > 0.5:\n\t\t\tself.turn = 0\n\t\telse:\n\t\t\tself.turn = 1\n\n\n\tdef __iter__(self):\n\t\t# Initialize both iters\n\t\tsecond_size = self.data_len - self.first_size\n\t\tself.first_iter = iter(torch.randperm(self.first_size))\n\t\tself.second_iter = iter(torch.randperm(second_size) + self.first_size)\n\n\t\t# Counting variables\n\t\ti = 0\n\t\tcount_first = 0 # Counts how many imgs of first iter has been returned\n\t\tcount_second = 0 # Counts second iter\t\t\n\t\tbatch = []\n\n\t\t# Until no data left, keep iterating\n\t\twhile count_first+count_second < self.data_len:\n\t\t\t# Fill the batch\n\t\t\tif self.turn == 0:\n\t\t\t\tif count_first == self.first_size:\n\t\t\t\t\tself.turn = 1\n\t\t\t\t\tif len(batch) > 0 and not self.drop_last:\n\t\t\t\t\t\tyield batch\n\t\t\t\t\tbatch = [] \t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tbatch.append(next(self.first_iter))\n\t\t\t\t\tcount_first += 1\n\t\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tif count_second == (self.data_len-self.first_size):\n\t\t\t\t\tself.turn = 0\n\t\t\t\t\tif len(batch) > 0 and not self.drop_last:\n\t\t\t\t\t\tyield batch\n\t\t\t\t\tbatch = [] \t\n\t\t\t\telse:\n\t\t\t\t\tbatch.append(next(self.second_iter))\n\t\t\t\t\tcount_second += 1\n\t\t\t\t\ti += 1\n\t\t\t# Yield the batch and switch the turn randomly\n\t\t\tif i != 0 and i % self.batch_size == 0:\n\t\t\t\tyield batch\n\t\t\t\tbatch = []\n\t\t\t\tif count_first != self.first_size and count_second != second_size and random.uniform(0, 1) > 0.5:\n\t\t\t\t\tself.turn = (self.turn + 1) % 2\n\n\t\t# If drop_last is False, return the rest\n\t\tif len(batch) > 0 and not self.drop_last:\n\t\t\tyield batch\n\n\n\tdef __len__(self):\n\t\tif self.drop_last:\n\t\t\treturn (self.first_size // self.batch_size)\n\t\t\t+ ((self.data_len - self.first_size) // self.batch_size)\n\t\telse:\n\t\t\treturn ((self.first_size + self.batch_size - 1) // self.batch_size)\n\t\t\t+ ((self.data_len - self.first_size + self.batch_size - 1) // self.batch_size)", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
# -*- coding: utf-8 -*- ''' ======================================================================= AutoTest Team Source File. Copyright(C), Changyou.com ----------------------------------------------------------------------- Created: 2017/3/2 by ChengLongLong ----------------------------------------------------------------------- Description: ----------------------------------------------------------------------- History: 2017/3/2 ======================================================================= '''
normal
{ "blob_id": "38f7c529cd0a8d85de266c6a932e6c8342aee273", "index": 4969, "step-1": "<mask token>\n", "step-2": "# -*- coding: utf-8 -*-\n'''\n=======================================================================\nAutoTest Team Source File.\nCopyright(C), Changyou.com\n-----------------------------------------------------------------------\nCreated: 2017/3/2 by ChengLongLong\n-----------------------------------------------------------------------\nDescription: \n-----------------------------------------------------------------------\nHistory: \n2017/3/2 \n=======================================================================\n'''", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
import time from helpers.handler import port_handler from helpers.functions import fetch_all class ascii_handler(port_handler): """ Serve ASCII server list """ def handle_data(self): """ Show a nicely formatted server list and immediately close connection """ self.ls.log.info("Sending ascii server list to %s" % self.ip) self.cleanup() servers = fetch_all( "SELECT * FROM servers WHERE max > 0 ORDER BY prefer DESC, private ASC, (players = max) ASC, players DESC, created ASC") asciilist = "" server_count = 0 for server in servers: try: entry = server['ip'] + ':' + str(server['port']) + ' ' # ip:port entry += 'local ' if server['remote'] == 0 else 'mirror ' # 'local' or 'mirror' entry += 'public ' if server['private'] == 0 else 'private ' # 'public' or 'private' entry += server['mode'] + ' ' # game mode entry += server['version'][:6].ljust(6, ' ') + ' ' # version entry += str(int(time.time()) - int(server['created'])) + ' ' # uptime in seconds entry += '[' + str(server['players']) + '/' + str(server['max']) + '] ' # [players/max] entry += server['name'] + "\r\n" # server name asciilist += entry server_count += 1 except TypeError: continue self.msg(asciilist) self.end()
normal
{ "blob_id": "cbf93eb96f40ff0aedc4b8d9238669da72934b27", "index": 2400, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass ascii_handler(port_handler):\n <mask token>\n\n def handle_data(self):\n \"\"\"\n Show a nicely formatted server list and immediately close connection\n \"\"\"\n self.ls.log.info('Sending ascii server list to %s' % self.ip)\n self.cleanup()\n servers = fetch_all(\n 'SELECT * FROM servers WHERE max > 0 ORDER BY prefer DESC, private ASC, (players = max) ASC, players DESC, created ASC'\n )\n asciilist = ''\n server_count = 0\n for server in servers:\n try:\n entry = server['ip'] + ':' + str(server['port']) + ' '\n entry += 'local ' if server['remote'] == 0 else 'mirror '\n entry += 'public ' if server['private'] == 0 else 'private '\n entry += server['mode'] + ' '\n entry += server['version'][:6].ljust(6, ' ') + ' '\n entry += str(int(time.time()) - int(server['created'])) + ' '\n entry += '[' + str(server['players']) + '/' + str(server['max']\n ) + '] '\n entry += server['name'] + '\\r\\n'\n asciilist += entry\n server_count += 1\n except TypeError:\n continue\n self.msg(asciilist)\n self.end()\n", "step-3": "<mask token>\n\n\nclass ascii_handler(port_handler):\n \"\"\"\n Serve ASCII server list\n \"\"\"\n\n def handle_data(self):\n \"\"\"\n Show a nicely formatted server list and immediately close connection\n \"\"\"\n self.ls.log.info('Sending ascii server list to %s' % self.ip)\n self.cleanup()\n servers = fetch_all(\n 'SELECT * FROM servers WHERE max > 0 ORDER BY prefer DESC, private ASC, (players = max) ASC, players DESC, created ASC'\n )\n asciilist = ''\n server_count = 0\n for server in servers:\n try:\n entry = server['ip'] + ':' + str(server['port']) + ' '\n entry += 'local ' if server['remote'] == 0 else 'mirror '\n entry += 'public ' if server['private'] == 0 else 'private '\n entry += server['mode'] + ' '\n entry += server['version'][:6].ljust(6, ' ') + ' '\n entry += str(int(time.time()) - int(server['created'])) + ' '\n entry += '[' + str(server['players']) + '/' + str(server['max']\n ) + '] '\n entry += server['name'] + '\\r\\n'\n asciilist += entry\n server_count += 1\n except TypeError:\n continue\n self.msg(asciilist)\n self.end()\n", "step-4": "import time\nfrom helpers.handler import port_handler\nfrom helpers.functions import fetch_all\n\n\nclass ascii_handler(port_handler):\n \"\"\"\n Serve ASCII server list\n \"\"\"\n\n def handle_data(self):\n \"\"\"\n Show a nicely formatted server list and immediately close connection\n \"\"\"\n self.ls.log.info('Sending ascii server list to %s' % self.ip)\n self.cleanup()\n servers = fetch_all(\n 'SELECT * FROM servers WHERE max > 0 ORDER BY prefer DESC, private ASC, (players = max) ASC, players DESC, created ASC'\n )\n asciilist = ''\n server_count = 0\n for server in servers:\n try:\n entry = server['ip'] + ':' + str(server['port']) + ' '\n entry += 'local ' if server['remote'] == 0 else 'mirror '\n entry += 'public ' if server['private'] == 0 else 'private '\n entry += server['mode'] + ' '\n entry += server['version'][:6].ljust(6, ' ') + ' '\n entry += str(int(time.time()) - int(server['created'])) + ' '\n entry += '[' + str(server['players']) + '/' + str(server['max']\n ) + '] '\n entry += server['name'] + '\\r\\n'\n asciilist += entry\n server_count += 1\n except TypeError:\n continue\n self.msg(asciilist)\n self.end()\n", "step-5": "import time\n\nfrom helpers.handler import port_handler\nfrom helpers.functions import fetch_all\n\n\nclass ascii_handler(port_handler):\n \"\"\"\n Serve ASCII server list\n \"\"\"\n\n def handle_data(self):\n \"\"\"\n Show a nicely formatted server list and immediately close connection\n \"\"\"\n self.ls.log.info(\"Sending ascii server list to %s\" % self.ip)\n\n self.cleanup()\n servers = fetch_all(\n \"SELECT * FROM servers WHERE max > 0 ORDER BY prefer DESC, private ASC, (players = max) ASC, players DESC, created ASC\")\n\n asciilist = \"\"\n\n server_count = 0\n for server in servers:\n try:\n entry = server['ip'] + ':' + str(server['port']) + ' ' # ip:port\n entry += 'local ' if server['remote'] == 0 else 'mirror ' # 'local' or 'mirror'\n entry += 'public ' if server['private'] == 0 else 'private ' # 'public' or 'private'\n entry += server['mode'] + ' ' # game mode\n entry += server['version'][:6].ljust(6, ' ') + ' ' # version\n entry += str(int(time.time()) - int(server['created'])) + ' ' # uptime in seconds\n entry += '[' + str(server['players']) + '/' + str(server['max']) + '] ' # [players/max]\n entry += server['name'] + \"\\r\\n\" # server name\n asciilist += entry\n server_count += 1\n except TypeError:\n continue\n\n self.msg(asciilist)\n self.end()\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
class NlpUtility: <|reserved_special_token_0|> def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == 'NN': nouns.push(word) <|reserved_special_token_0|> <|reserved_special_token_0|> def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == 'NN': nouns.push(word) def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == 'NN': nouns.push(word) <|reserved_special_token_1|> class NlpUtility: <|reserved_special_token_0|> def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == 'NN': nouns.push(word) def get_verbs(self, tokens): verbs = [] for word, pos in tokens: if pos == 'VB': nouns.push(word) <|reserved_special_token_0|> def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == 'NN': nouns.push(word) def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == 'NN': nouns.push(word) <|reserved_special_token_1|> class NlpUtility: <|reserved_special_token_0|> def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == 'NN': nouns.push(word) def get_verbs(self, tokens): verbs = [] for word, pos in tokens: if pos == 'VB': nouns.push(word) def get_adjectives(self, tokens): nouns = [] for word, pos in tokens: if pos == 'NN': nouns.push(word) def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == 'NN': nouns.push(word) def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == 'NN': nouns.push(word) <|reserved_special_token_1|> class NlpUtility: """ Utility methods to get particular parts of speech from a token set """ def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == 'NN': nouns.push(word) def get_verbs(self, tokens): verbs = [] for word, pos in tokens: if pos == 'VB': nouns.push(word) def get_adjectives(self, tokens): nouns = [] for word, pos in tokens: if pos == 'NN': nouns.push(word) def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == 'NN': nouns.push(word) def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == 'NN': nouns.push(word) <|reserved_special_token_1|> class NlpUtility(): """ Utility methods to get particular parts of speech from a token set """ def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == "NN": nouns.push(word) def get_verbs(self, tokens): verbs = [] for word, pos in tokens: if pos == "VB": nouns.push(word) def get_adjectives(self, tokens): nouns = [] for word, pos in tokens: if pos == "NN": nouns.push(word) def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == "NN": nouns.push(word) def get_nouns(self, tokens): nouns = [] for word, pos in tokens: if pos == "NN": nouns.push(word)
flexible
{ "blob_id": "c6502ea2b32ad90c76b6dfaf3ee3218d029eba15", "index": 56, "step-1": "class NlpUtility:\n <mask token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n <mask token>\n <mask token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n", "step-2": "class NlpUtility:\n <mask token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_verbs(self, tokens):\n verbs = []\n for word, pos in tokens:\n if pos == 'VB':\n nouns.push(word)\n <mask token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n", "step-3": "class NlpUtility:\n <mask token>\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_verbs(self, tokens):\n verbs = []\n for word, pos in tokens:\n if pos == 'VB':\n nouns.push(word)\n\n def get_adjectives(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n", "step-4": "class NlpUtility:\n \"\"\"\n\t\tUtility methods to get particular parts of speech from a token set\n\t\"\"\"\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_verbs(self, tokens):\n verbs = []\n for word, pos in tokens:\n if pos == 'VB':\n nouns.push(word)\n\n def get_adjectives(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n\n def get_nouns(self, tokens):\n nouns = []\n for word, pos in tokens:\n if pos == 'NN':\n nouns.push(word)\n", "step-5": "class NlpUtility():\n\t\"\"\"\n\t\tUtility methods to get particular parts of speech from a token set\n\t\"\"\"\n\tdef get_nouns(self, tokens):\n\t\tnouns = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"NN\":\n\t\t\t\tnouns.push(word)\n\n\tdef get_verbs(self, tokens):\n\t\tverbs = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"VB\":\n\t\t\t\tnouns.push(word)\n\n\tdef get_adjectives(self, tokens):\n\t\tnouns = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"NN\":\n\t\t\t\tnouns.push(word)\n\n\tdef get_nouns(self, tokens):\n\t\tnouns = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"NN\":\n\t\t\t\tnouns.push(word)\n\n\tdef get_nouns(self, tokens):\n\t\tnouns = []\n\t\tfor word, pos in tokens:\n\t\t\tif pos == \"NN\":\n\t\t\t\tnouns.push(word)\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
''' Implement GreedyMotifSearch http://rosalind.info/problems/ba2d/ Given: Integers k and t, followed by a collection of strings Dna. Return: A collection of strings BestMotifs resulting from running GreedyMotifSearch(Dna, k, t). If at any step you find more than one Profile-most probable k-mer in a given string, use the one occurring first. ''' import pandas as pd from ba1g import hamming_distance from ba2c import profile_most_probable filename = 'rosalind_ba2d.txt' BASES = ['A', 'C', 'G', 'T'] def greedy_motif_search(dnas, k, t): # took ~4 min to run on test dataset but seems to be the correct algorithm # based on pseudocode (and other peoples' submissions) best_motifs = [dna[:k] for dna in dnas] best_score = score_motifs(best_motifs) for i in range(len(dnas[0]) - k + 1): print(i) motifs = [dnas[0][i:i+k]] for j in range(1, t): motifs.append(profile_most_probable(dnas[j], k, form_profile(motifs))) score = score_motifs(motifs) if score < best_score: best_motifs = motifs best_score = score return best_motifs def form_profile(motifs): profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES) for motif in motifs: for i, base in enumerate(motif): profile.loc[base, i] += 1 return profile / len(motifs) def score_motifs(motifs): # couldn't figure out what 'score' from pseudocode meant :( # had to reference someone else's code: # https://github.com/NathanielLovin/Rosalind/blob/master/BA2D.py profile = form_profile(motifs) # neat df function generates the consensus string consensus = ''.join(profile.idxmax()) return sum(hamming_distance(motif, consensus) for motif in motifs) def main(): with open(filename) as f: k, t = list(map(int, f.readline().strip().split())) dnas = [line.strip() for line in f.readlines()] for motif in greedy_motif_search(dnas, k, t): print(motif) if __name__ == '__main__': main()
normal
{ "blob_id": "ed7fa6e6f30eb06400cb38128617967a597f6c04", "index": 2450, "step-1": "<mask token>\n\n\ndef greedy_motif_search(dnas, k, t):\n best_motifs = [dna[:k] for dna in dnas]\n best_score = score_motifs(best_motifs)\n for i in range(len(dnas[0]) - k + 1):\n print(i)\n motifs = [dnas[0][i:i + k]]\n for j in range(1, t):\n motifs.append(profile_most_probable(dnas[j], k, form_profile(\n motifs)))\n score = score_motifs(motifs)\n if score < best_score:\n best_motifs = motifs\n best_score = score\n return best_motifs\n\n\ndef form_profile(motifs):\n profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n for motif in motifs:\n for i, base in enumerate(motif):\n profile.loc[base, i] += 1\n return profile / len(motifs)\n\n\ndef score_motifs(motifs):\n profile = form_profile(motifs)\n consensus = ''.join(profile.idxmax())\n return sum(hamming_distance(motif, consensus) for motif in motifs)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef greedy_motif_search(dnas, k, t):\n best_motifs = [dna[:k] for dna in dnas]\n best_score = score_motifs(best_motifs)\n for i in range(len(dnas[0]) - k + 1):\n print(i)\n motifs = [dnas[0][i:i + k]]\n for j in range(1, t):\n motifs.append(profile_most_probable(dnas[j], k, form_profile(\n motifs)))\n score = score_motifs(motifs)\n if score < best_score:\n best_motifs = motifs\n best_score = score\n return best_motifs\n\n\ndef form_profile(motifs):\n profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n for motif in motifs:\n for i, base in enumerate(motif):\n profile.loc[base, i] += 1\n return profile / len(motifs)\n\n\ndef score_motifs(motifs):\n profile = form_profile(motifs)\n consensus = ''.join(profile.idxmax())\n return sum(hamming_distance(motif, consensus) for motif in motifs)\n\n\ndef main():\n with open(filename) as f:\n k, t = list(map(int, f.readline().strip().split()))\n dnas = [line.strip() for line in f.readlines()]\n for motif in greedy_motif_search(dnas, k, t):\n print(motif)\n\n\nif __name__ == '__main__':\n main()\n", "step-3": "<mask token>\nfilename = 'rosalind_ba2d.txt'\nBASES = ['A', 'C', 'G', 'T']\n\n\ndef greedy_motif_search(dnas, k, t):\n best_motifs = [dna[:k] for dna in dnas]\n best_score = score_motifs(best_motifs)\n for i in range(len(dnas[0]) - k + 1):\n print(i)\n motifs = [dnas[0][i:i + k]]\n for j in range(1, t):\n motifs.append(profile_most_probable(dnas[j], k, form_profile(\n motifs)))\n score = score_motifs(motifs)\n if score < best_score:\n best_motifs = motifs\n best_score = score\n return best_motifs\n\n\ndef form_profile(motifs):\n profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n for motif in motifs:\n for i, base in enumerate(motif):\n profile.loc[base, i] += 1\n return profile / len(motifs)\n\n\ndef score_motifs(motifs):\n profile = form_profile(motifs)\n consensus = ''.join(profile.idxmax())\n return sum(hamming_distance(motif, consensus) for motif in motifs)\n\n\ndef main():\n with open(filename) as f:\n k, t = list(map(int, f.readline().strip().split()))\n dnas = [line.strip() for line in f.readlines()]\n for motif in greedy_motif_search(dnas, k, t):\n print(motif)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "<mask token>\nimport pandas as pd\nfrom ba1g import hamming_distance\nfrom ba2c import profile_most_probable\nfilename = 'rosalind_ba2d.txt'\nBASES = ['A', 'C', 'G', 'T']\n\n\ndef greedy_motif_search(dnas, k, t):\n best_motifs = [dna[:k] for dna in dnas]\n best_score = score_motifs(best_motifs)\n for i in range(len(dnas[0]) - k + 1):\n print(i)\n motifs = [dnas[0][i:i + k]]\n for j in range(1, t):\n motifs.append(profile_most_probable(dnas[j], k, form_profile(\n motifs)))\n score = score_motifs(motifs)\n if score < best_score:\n best_motifs = motifs\n best_score = score\n return best_motifs\n\n\ndef form_profile(motifs):\n profile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n for motif in motifs:\n for i, base in enumerate(motif):\n profile.loc[base, i] += 1\n return profile / len(motifs)\n\n\ndef score_motifs(motifs):\n profile = form_profile(motifs)\n consensus = ''.join(profile.idxmax())\n return sum(hamming_distance(motif, consensus) for motif in motifs)\n\n\ndef main():\n with open(filename) as f:\n k, t = list(map(int, f.readline().strip().split()))\n dnas = [line.strip() for line in f.readlines()]\n for motif in greedy_motif_search(dnas, k, t):\n print(motif)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "'''\nImplement GreedyMotifSearch\nhttp://rosalind.info/problems/ba2d/\n\nGiven: Integers k and t, followed by a collection of strings Dna.\n\nReturn: A collection of strings BestMotifs resulting from running GreedyMotifSearch(Dna, k, t). If at any step you find more than one Profile-most probable k-mer in a given string, use the one occurring first.\n'''\nimport pandas as pd\n\nfrom ba1g import hamming_distance\nfrom ba2c import profile_most_probable\n\nfilename = 'rosalind_ba2d.txt'\nBASES = ['A', 'C', 'G', 'T']\n\ndef greedy_motif_search(dnas, k, t):\n\t# took ~4 min to run on test dataset but seems to be the correct algorithm\n\t# based on pseudocode (and other peoples' submissions)\n\tbest_motifs = [dna[:k] for dna in dnas]\n\tbest_score = score_motifs(best_motifs)\n\tfor i in range(len(dnas[0]) - k + 1):\n\t\tprint(i)\n\t\tmotifs = [dnas[0][i:i+k]]\n\t\tfor j in range(1, t):\n\t\t\tmotifs.append(profile_most_probable(dnas[j], k, form_profile(motifs)))\n\t\tscore = score_motifs(motifs)\n\t\tif score < best_score:\n\t\t\tbest_motifs = motifs\n\t\t\tbest_score = score\n\treturn best_motifs\n\ndef form_profile(motifs):\n\tprofile = pd.DataFrame(0, columns=range(len(motifs[0])), index=BASES)\n\tfor motif in motifs:\n\t\tfor i, base in enumerate(motif):\n\t\t\tprofile.loc[base, i] += 1\n\treturn profile / len(motifs)\n\ndef score_motifs(motifs):\n\t# couldn't figure out what 'score' from pseudocode meant :(\n\t# had to reference someone else's code:\n\t# https://github.com/NathanielLovin/Rosalind/blob/master/BA2D.py\n\tprofile = form_profile(motifs)\n\t# neat df function generates the consensus string\n\tconsensus = ''.join(profile.idxmax())\n\treturn sum(hamming_distance(motif, consensus) for motif in motifs)\n\ndef main():\n\twith open(filename) as f:\n\t\tk, t = list(map(int, f.readline().strip().split()))\n\t\tdnas = [line.strip() for line in f.readlines()]\n\tfor motif in greedy_motif_search(dnas, k, t):\n\t\tprint(motif)\n\nif __name__ == '__main__':\n\tmain()\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
# joiner = '+' # seq = ["Sushil","Bahadur","KC"] # txt = joiner.join(seq) # txt # txt = " Sam " # ljus = txt.ljust(7,"*") # ljus # txtstrip = txt.strip().strip('S') # txtstrip # txt = "This is my world." # txtSplit = txt.split(maxsplit=1) # txtSplit # name = input("Enter your full name") # name = name.strip() # txt = name.split() # print("First Name:",txt[0]) # print("Last Name:",txt[1]) # txt = "Amet sint ipsum aliquip ea velit minim.\n \ # Consequat esse do laboris nisi proident nisi tempor magna.\n \ # Occaecat occaecat id qui veniam deserunt ullamco laborum consequat sint ullamco.\n \ # Eu Lorem nisi mollit pariatur commodo minim eu reprehenderit magna ipsum consequat." # print(txt) # newData = txt.splitlines() # newData # Sequence #2. List list1 = ["Sam", "Rocky", 1989, 1890] print(type(list1)) print(list1[0]) print(list1[len(list1)-1]) list1[0] = 6781 print(list1) list4 = list1[2:4] list4 list4 = list1[::-1] list4 list4 = list1[::2] list4 list4 = list1[2:0:-1] list4 list4 = list1+['Hello',2] list4 list4 = list1*2 list4 list1.append("Sam") list1 list1.remove(6781) list1 del list1[2] list1
normal
{ "blob_id": "32b22cccac75c87b8638c76c0c6d27db0de4d750", "index": 8480, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(type(list1))\nprint(list1[0])\nprint(list1[len(list1) - 1])\n<mask token>\nprint(list1)\n<mask token>\nlist4\n<mask token>\nlist4\n<mask token>\nlist4\n<mask token>\nlist4\n<mask token>\nlist4\n<mask token>\nlist4\nlist1.append('Sam')\nlist1\nlist1.remove(6781)\nlist1\ndel list1[2]\nlist1\n", "step-3": "list1 = ['Sam', 'Rocky', 1989, 1890]\nprint(type(list1))\nprint(list1[0])\nprint(list1[len(list1) - 1])\nlist1[0] = 6781\nprint(list1)\nlist4 = list1[2:4]\nlist4\nlist4 = list1[::-1]\nlist4\nlist4 = list1[::2]\nlist4\nlist4 = list1[2:0:-1]\nlist4\nlist4 = list1 + ['Hello', 2]\nlist4\nlist4 = list1 * 2\nlist4\nlist1.append('Sam')\nlist1\nlist1.remove(6781)\nlist1\ndel list1[2]\nlist1\n", "step-4": "# joiner = '+'\n# seq = [\"Sushil\",\"Bahadur\",\"KC\"]\n# txt = joiner.join(seq)\n# txt\n# txt = \" Sam \"\n# ljus = txt.ljust(7,\"*\")\n# ljus\n# txtstrip = txt.strip().strip('S')\n# txtstrip\n# txt = \"This is my world.\"\n# txtSplit = txt.split(maxsplit=1)\n# txtSplit\n\n# name = input(\"Enter your full name\")\n# name = name.strip()\n# txt = name.split()\n# print(\"First Name:\",txt[0])\n# print(\"Last Name:\",txt[1])\n\n\n# txt = \"Amet sint ipsum aliquip ea velit minim.\\n \\\n# Consequat esse do laboris nisi proident nisi tempor magna.\\n \\\n# Occaecat occaecat id qui veniam deserunt ullamco laborum consequat sint ullamco.\\n \\\n# Eu Lorem nisi mollit pariatur commodo minim eu reprehenderit magna ipsum consequat.\"\n# print(txt)\n# newData = txt.splitlines()\n# newData\n\n# Sequence\n#2. List\nlist1 = [\"Sam\", \"Rocky\", 1989, 1890]\nprint(type(list1))\nprint(list1[0])\nprint(list1[len(list1)-1])\nlist1[0] = 6781\nprint(list1)\nlist4 = list1[2:4]\nlist4\nlist4 = list1[::-1]\nlist4\nlist4 = list1[::2]\nlist4\nlist4 = list1[2:0:-1]\nlist4\nlist4 = list1+['Hello',2]\nlist4\nlist4 = list1*2\nlist4\nlist1.append(\"Sam\")\nlist1\nlist1.remove(6781)\nlist1\ndel list1[2]\nlist1\n\n\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import pygame from .Coin import Coin from .Snake import Snake, Block from .Bomb import Bomb from .Rocket import Rocket from pygame.math import Vector2 cell_size = 16 cell_number = 30 sprite_cell = pygame.image.load("Assets/Cell.png") bg = pygame.image.load("Assets/BG.png") bg2 = pygame.image.load("Assets/BG2.png") class GAME(): def __init__(self, mode) -> None: self.playing = 0 self.mode = mode # Classic mode # Colorfull mode with assets etc self.coin = Coin(self.mode) self.moving_coin = pygame.sprite.Group() self.moving_coin.add(self.coin) self.snake = Snake(self.mode) self.bombs = [Bomb(self.mode)] self.rockets = [] self.condition = 4 self.crowd = 2 self.count = 0 self.anim_pos = [Vector2(-1,-1), Vector2(-1,-1), Vector2(-1,-1)] self.game_timer = 0 self.game_over = False # self.acc = 0.1 # self.difficulty = 0 def refresh(self, mode): self.__init__(mode) return 1, 1 def update(self): self.snake.move_snake() self.check_collision() self.check_fail() self.rem_rockets() def rem_rockets(self): for rocket in self.rockets: if not rocket.out_of_frame(): self.rockets.remove(rocket) def check_timer(self): if self.count >= self.crowd: self.game_timer += 1 if self.game_timer > 50: self.game_timer = 0 self.rockets.append(Rocket(self.mode)) def draw_elements(self, screen): if self.mode == 0: screen.blit(bg, (0, 0)) elif self.mode == 1: screen.fill((155, 199, 167)) self.coin.draw_coin(screen) self.snake.draw_snake(screen) self.check_timer() if self.count >= self.condition: self.bombs.insert(0, Bomb(self.mode)) self.condition = self.condition * 2 for rocket in self.rockets: rocket.draw_rocket(screen) for bomb in self.bombs: bomb.draw_bomb(screen) def check_position(self): for bomb in self.bombs: if self.coin.position != bomb.position: self.coin.randomize() else: self.check_position() def check_collision(self): if self.coin.position == self.snake.body[0]: self.count += 1 self.check_position() self.snake.add_block() for rocket in self.rockets: for i, block in enumerate(self.snake.body[:-1]): if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect): self.snake.remove_block(i) self.anim_pos[0] = Vector2(block.x, block.y) for bomb in self.bombs: if bomb.bomb_rect.colliderect(rocket.small_rect): self.anim_pos[1] = bomb.position if len(self.bombs) > 1 : self.bombs.remove(bomb) else: bomb.randomize() if rocket.rocket_rect.colliderect(self.coin.coin_rect): self.anim_pos[2] = Vector2(self.coin.x, self.coin.y) self.coin.randomize() def check_fail(self): if not 0 <= self.snake.body[0].x < cell_number or not 0 <= self.snake.body[0].y < cell_number: self.game_over = 1 for block in self.snake.body[1:] : if block == self.snake.body[0]: self.game_over = 1 for rocket in self.rockets: if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x, self.snake.body[0].y).rect): self.game_over = 1 for bomb in self.bombs: if bomb.position == self.snake.body[0]: self.game_over = 1
normal
{ "blob_id": "2b14607aa2527f5da57284917d06ea60e89f784c", "index": 1659, "step-1": "<mask token>\n\n\nclass GAME:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def check_timer(self):\n if self.count >= self.crowd:\n self.game_timer += 1\n if self.game_timer > 50:\n self.game_timer = 0\n self.rockets.append(Rocket(self.mode))\n <mask token>\n\n def check_position(self):\n for bomb in self.bombs:\n if self.coin.position != bomb.position:\n self.coin.randomize()\n else:\n self.check_position()\n\n def check_collision(self):\n if self.coin.position == self.snake.body[0]:\n self.count += 1\n self.check_position()\n self.snake.add_block()\n for rocket in self.rockets:\n for i, block in enumerate(self.snake.body[:-1]):\n if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect\n ):\n self.snake.remove_block(i)\n self.anim_pos[0] = Vector2(block.x, block.y)\n for bomb in self.bombs:\n if bomb.bomb_rect.colliderect(rocket.small_rect):\n self.anim_pos[1] = bomb.position\n if len(self.bombs) > 1:\n self.bombs.remove(bomb)\n else:\n bomb.randomize()\n if rocket.rocket_rect.colliderect(self.coin.coin_rect):\n self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)\n self.coin.randomize()\n\n def check_fail(self):\n if not 0 <= self.snake.body[0\n ].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:\n self.game_over = 1\n for block in self.snake.body[1:]:\n if block == self.snake.body[0]:\n self.game_over = 1\n for rocket in self.rockets:\n if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,\n self.snake.body[0].y).rect):\n self.game_over = 1\n for bomb in self.bombs:\n if bomb.position == self.snake.body[0]:\n self.game_over = 1\n", "step-2": "<mask token>\n\n\nclass GAME:\n <mask token>\n <mask token>\n <mask token>\n\n def rem_rockets(self):\n for rocket in self.rockets:\n if not rocket.out_of_frame():\n self.rockets.remove(rocket)\n\n def check_timer(self):\n if self.count >= self.crowd:\n self.game_timer += 1\n if self.game_timer > 50:\n self.game_timer = 0\n self.rockets.append(Rocket(self.mode))\n\n def draw_elements(self, screen):\n if self.mode == 0:\n screen.blit(bg, (0, 0))\n elif self.mode == 1:\n screen.fill((155, 199, 167))\n self.coin.draw_coin(screen)\n self.snake.draw_snake(screen)\n self.check_timer()\n if self.count >= self.condition:\n self.bombs.insert(0, Bomb(self.mode))\n self.condition = self.condition * 2\n for rocket in self.rockets:\n rocket.draw_rocket(screen)\n for bomb in self.bombs:\n bomb.draw_bomb(screen)\n\n def check_position(self):\n for bomb in self.bombs:\n if self.coin.position != bomb.position:\n self.coin.randomize()\n else:\n self.check_position()\n\n def check_collision(self):\n if self.coin.position == self.snake.body[0]:\n self.count += 1\n self.check_position()\n self.snake.add_block()\n for rocket in self.rockets:\n for i, block in enumerate(self.snake.body[:-1]):\n if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect\n ):\n self.snake.remove_block(i)\n self.anim_pos[0] = Vector2(block.x, block.y)\n for bomb in self.bombs:\n if bomb.bomb_rect.colliderect(rocket.small_rect):\n self.anim_pos[1] = bomb.position\n if len(self.bombs) > 1:\n self.bombs.remove(bomb)\n else:\n bomb.randomize()\n if rocket.rocket_rect.colliderect(self.coin.coin_rect):\n self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)\n self.coin.randomize()\n\n def check_fail(self):\n if not 0 <= self.snake.body[0\n ].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:\n self.game_over = 1\n for block in self.snake.body[1:]:\n if block == self.snake.body[0]:\n self.game_over = 1\n for rocket in self.rockets:\n if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,\n self.snake.body[0].y).rect):\n self.game_over = 1\n for bomb in self.bombs:\n if bomb.position == self.snake.body[0]:\n self.game_over = 1\n", "step-3": "<mask token>\n\n\nclass GAME:\n\n def __init__(self, mode) ->None:\n self.playing = 0\n self.mode = mode\n self.coin = Coin(self.mode)\n self.moving_coin = pygame.sprite.Group()\n self.moving_coin.add(self.coin)\n self.snake = Snake(self.mode)\n self.bombs = [Bomb(self.mode)]\n self.rockets = []\n self.condition = 4\n self.crowd = 2\n self.count = 0\n self.anim_pos = [Vector2(-1, -1), Vector2(-1, -1), Vector2(-1, -1)]\n self.game_timer = 0\n self.game_over = False\n\n def refresh(self, mode):\n self.__init__(mode)\n return 1, 1\n <mask token>\n\n def rem_rockets(self):\n for rocket in self.rockets:\n if not rocket.out_of_frame():\n self.rockets.remove(rocket)\n\n def check_timer(self):\n if self.count >= self.crowd:\n self.game_timer += 1\n if self.game_timer > 50:\n self.game_timer = 0\n self.rockets.append(Rocket(self.mode))\n\n def draw_elements(self, screen):\n if self.mode == 0:\n screen.blit(bg, (0, 0))\n elif self.mode == 1:\n screen.fill((155, 199, 167))\n self.coin.draw_coin(screen)\n self.snake.draw_snake(screen)\n self.check_timer()\n if self.count >= self.condition:\n self.bombs.insert(0, Bomb(self.mode))\n self.condition = self.condition * 2\n for rocket in self.rockets:\n rocket.draw_rocket(screen)\n for bomb in self.bombs:\n bomb.draw_bomb(screen)\n\n def check_position(self):\n for bomb in self.bombs:\n if self.coin.position != bomb.position:\n self.coin.randomize()\n else:\n self.check_position()\n\n def check_collision(self):\n if self.coin.position == self.snake.body[0]:\n self.count += 1\n self.check_position()\n self.snake.add_block()\n for rocket in self.rockets:\n for i, block in enumerate(self.snake.body[:-1]):\n if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect\n ):\n self.snake.remove_block(i)\n self.anim_pos[0] = Vector2(block.x, block.y)\n for bomb in self.bombs:\n if bomb.bomb_rect.colliderect(rocket.small_rect):\n self.anim_pos[1] = bomb.position\n if len(self.bombs) > 1:\n self.bombs.remove(bomb)\n else:\n bomb.randomize()\n if rocket.rocket_rect.colliderect(self.coin.coin_rect):\n self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)\n self.coin.randomize()\n\n def check_fail(self):\n if not 0 <= self.snake.body[0\n ].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:\n self.game_over = 1\n for block in self.snake.body[1:]:\n if block == self.snake.body[0]:\n self.game_over = 1\n for rocket in self.rockets:\n if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,\n self.snake.body[0].y).rect):\n self.game_over = 1\n for bomb in self.bombs:\n if bomb.position == self.snake.body[0]:\n self.game_over = 1\n", "step-4": "<mask token>\n\n\nclass GAME:\n\n def __init__(self, mode) ->None:\n self.playing = 0\n self.mode = mode\n self.coin = Coin(self.mode)\n self.moving_coin = pygame.sprite.Group()\n self.moving_coin.add(self.coin)\n self.snake = Snake(self.mode)\n self.bombs = [Bomb(self.mode)]\n self.rockets = []\n self.condition = 4\n self.crowd = 2\n self.count = 0\n self.anim_pos = [Vector2(-1, -1), Vector2(-1, -1), Vector2(-1, -1)]\n self.game_timer = 0\n self.game_over = False\n\n def refresh(self, mode):\n self.__init__(mode)\n return 1, 1\n\n def update(self):\n self.snake.move_snake()\n self.check_collision()\n self.check_fail()\n self.rem_rockets()\n\n def rem_rockets(self):\n for rocket in self.rockets:\n if not rocket.out_of_frame():\n self.rockets.remove(rocket)\n\n def check_timer(self):\n if self.count >= self.crowd:\n self.game_timer += 1\n if self.game_timer > 50:\n self.game_timer = 0\n self.rockets.append(Rocket(self.mode))\n\n def draw_elements(self, screen):\n if self.mode == 0:\n screen.blit(bg, (0, 0))\n elif self.mode == 1:\n screen.fill((155, 199, 167))\n self.coin.draw_coin(screen)\n self.snake.draw_snake(screen)\n self.check_timer()\n if self.count >= self.condition:\n self.bombs.insert(0, Bomb(self.mode))\n self.condition = self.condition * 2\n for rocket in self.rockets:\n rocket.draw_rocket(screen)\n for bomb in self.bombs:\n bomb.draw_bomb(screen)\n\n def check_position(self):\n for bomb in self.bombs:\n if self.coin.position != bomb.position:\n self.coin.randomize()\n else:\n self.check_position()\n\n def check_collision(self):\n if self.coin.position == self.snake.body[0]:\n self.count += 1\n self.check_position()\n self.snake.add_block()\n for rocket in self.rockets:\n for i, block in enumerate(self.snake.body[:-1]):\n if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect\n ):\n self.snake.remove_block(i)\n self.anim_pos[0] = Vector2(block.x, block.y)\n for bomb in self.bombs:\n if bomb.bomb_rect.colliderect(rocket.small_rect):\n self.anim_pos[1] = bomb.position\n if len(self.bombs) > 1:\n self.bombs.remove(bomb)\n else:\n bomb.randomize()\n if rocket.rocket_rect.colliderect(self.coin.coin_rect):\n self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)\n self.coin.randomize()\n\n def check_fail(self):\n if not 0 <= self.snake.body[0\n ].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:\n self.game_over = 1\n for block in self.snake.body[1:]:\n if block == self.snake.body[0]:\n self.game_over = 1\n for rocket in self.rockets:\n if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x,\n self.snake.body[0].y).rect):\n self.game_over = 1\n for bomb in self.bombs:\n if bomb.position == self.snake.body[0]:\n self.game_over = 1\n", "step-5": "import pygame\nfrom .Coin import Coin\nfrom .Snake import Snake, Block\nfrom .Bomb import Bomb\nfrom .Rocket import Rocket\nfrom pygame.math import Vector2\n\ncell_size = 16\ncell_number = 30\n\nsprite_cell = pygame.image.load(\"Assets/Cell.png\")\nbg = pygame.image.load(\"Assets/BG.png\")\nbg2 = pygame.image.load(\"Assets/BG2.png\")\n\nclass GAME():\n def __init__(self, mode) -> None:\n self.playing = 0\n\n self.mode = mode\n # Classic mode \n # Colorfull mode with assets etc\n\n self.coin = Coin(self.mode)\n\n self.moving_coin = pygame.sprite.Group()\n self.moving_coin.add(self.coin)\n\n self.snake = Snake(self.mode)\n self.bombs = [Bomb(self.mode)]\n self.rockets = []\n\n self.condition = 4\n self.crowd = 2\n self.count = 0\n\n self.anim_pos = [Vector2(-1,-1), Vector2(-1,-1), Vector2(-1,-1)]\n\n self.game_timer = 0\n\n self.game_over = False\n\n # self.acc = 0.1\n # self.difficulty = 0\n\n def refresh(self, mode):\n self.__init__(mode)\n return 1, 1\n\n def update(self):\n self.snake.move_snake()\n self.check_collision()\n self.check_fail()\n self.rem_rockets()\n \n def rem_rockets(self):\n for rocket in self.rockets:\n if not rocket.out_of_frame():\n self.rockets.remove(rocket)\n \n def check_timer(self):\n if self.count >= self.crowd:\n self.game_timer += 1\n if self.game_timer > 50:\n self.game_timer = 0\n self.rockets.append(Rocket(self.mode))\n\n def draw_elements(self, screen):\n if self.mode == 0:\n screen.blit(bg, (0, 0))\n elif self.mode == 1:\n screen.fill((155, 199, 167))\n self.coin.draw_coin(screen)\n self.snake.draw_snake(screen)\n self.check_timer()\n\n if self.count >= self.condition:\n self.bombs.insert(0, Bomb(self.mode))\n self.condition = self.condition * 2\n\n for rocket in self.rockets:\n rocket.draw_rocket(screen)\n\n for bomb in self.bombs:\n bomb.draw_bomb(screen)\n\n def check_position(self):\n for bomb in self.bombs:\n if self.coin.position != bomb.position:\n self.coin.randomize()\n else:\n self.check_position()\n\n def check_collision(self):\n if self.coin.position == self.snake.body[0]:\n self.count += 1\n self.check_position()\n self.snake.add_block()\n\n for rocket in self.rockets:\n for i, block in enumerate(self.snake.body[:-1]):\n if rocket.rocket_rect.colliderect(Block(block.x, block.y).rect):\n self.snake.remove_block(i)\n self.anim_pos[0] = Vector2(block.x, block.y)\n \n for bomb in self.bombs:\n if bomb.bomb_rect.colliderect(rocket.small_rect):\n self.anim_pos[1] = bomb.position\n if len(self.bombs) > 1 :\n self.bombs.remove(bomb)\n else:\n bomb.randomize()\n if rocket.rocket_rect.colliderect(self.coin.coin_rect):\n self.anim_pos[2] = Vector2(self.coin.x, self.coin.y)\n self.coin.randomize()\n\n\n def check_fail(self):\n if not 0 <= self.snake.body[0].x < cell_number or not 0 <= self.snake.body[0].y < cell_number:\n self.game_over = 1\n \n for block in self.snake.body[1:] :\n if block == self.snake.body[0]:\n self.game_over = 1\n\n for rocket in self.rockets:\n if rocket.rocket_rect.colliderect(Block(self.snake.body[0].x, self.snake.body[0].y).rect):\n self.game_over = 1\n\n for bomb in self.bombs:\n if bomb.position == self.snake.body[0]:\n self.game_over = 1", "step-ids": [ 5, 7, 9, 10, 13 ] }
[ 5, 7, 9, 10, 13 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> ba0563.pngMap = [ '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000011111111000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000001100110111111111111000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000001111111111111111111111000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000111111111111111111111111000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000111111111111111111111110000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000111111111111111111110000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000011111111111111111110000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111100000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111100000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000001111111111100000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000111101011100000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' , '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' ] <|reserved_special_token_1|> ba0563.pngMap = [ '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000011111111000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000001100110111111111111000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000001111111111111111111111000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000111111111111111111111111000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000111111111111111111111110000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000111111111111111111110000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000011111111111111111110000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111100000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111100000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000001111111111100000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000111101011100000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000', ]
flexible
{ "blob_id": "dab1adcd185092fc425b5d87150f27e7b67bff6c", "index": 151, "step-1": "<mask token>\n", "step-2": "ba0563.pngMap = [\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000011111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000001100110111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000001111111111111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000111111111111111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000111111111111111111111110000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000111111111111111111110000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000011111111111111111110000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111100000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111100000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000001111111111100000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000111101011100000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ]\n", "step-3": "ba0563.pngMap = [\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000011111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000001100110111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000001111111111111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000111111111111111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000111111111111111111111110000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000111111111111111111110000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000011111111111111111110000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000111111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111100000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000111111111111100000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111110000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111100000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111110000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000001111111111100000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000111101011100000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n]\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def test_template(): assert True <|reserved_special_token_1|> import pytest def test_template(): assert True
flexible
{ "blob_id": "e7fa84dbc037253c7f852aa618e6ea88d1fda909", "index": 1939, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef test_template():\n assert True\n", "step-3": "import pytest\n\n\ndef test_template():\n assert True\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import sys byte = int(sys.argv[1]) qlty = float(sys.argv[2]) n = 0 while True: o = sys.stdin.read(byte) if qlty>(qlty*n)%1: oo = o sys.stdout.write(o) else: sys.stdout.write(oo) if not o: break n=n+1
normal
{ "blob_id": "70845ab4aab80d988a5c01d0b4fb76e63b800527", "index": 6484, "step-1": "<mask token>\n", "step-2": "<mask token>\nwhile True:\n o = sys.stdin.read(byte)\n if qlty > qlty * n % 1:\n oo = o\n sys.stdout.write(o)\n else:\n sys.stdout.write(oo)\n if not o:\n break\n n = n + 1\n", "step-3": "<mask token>\nbyte = int(sys.argv[1])\nqlty = float(sys.argv[2])\nn = 0\nwhile True:\n o = sys.stdin.read(byte)\n if qlty > qlty * n % 1:\n oo = o\n sys.stdout.write(o)\n else:\n sys.stdout.write(oo)\n if not o:\n break\n n = n + 1\n", "step-4": "import sys\nbyte = int(sys.argv[1])\nqlty = float(sys.argv[2])\nn = 0\nwhile True:\n o = sys.stdin.read(byte)\n if qlty > qlty * n % 1:\n oo = o\n sys.stdout.write(o)\n else:\n sys.stdout.write(oo)\n if not o:\n break\n n = n + 1\n", "step-5": "import sys\r\nbyte = int(sys.argv[1])\r\nqlty = float(sys.argv[2])\r\nn = 0\r\nwhile True:\r\n o = sys.stdin.read(byte)\r\n if qlty>(qlty*n)%1:\r\n oo = o\r\n sys.stdout.write(o)\r\n else:\r\n sys.stdout.write(oo)\r\n if not o:\r\n break\r\n n=n+1", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class cGAN: def __init__(self, input_dim1, input_dim2, input_dim3, latent_size): self.input_dim1 = input_dim1 self.input_dim2 = input_dim2 self.input_dim3 = input_dim3 self.latent_size = latent_size def discriminator(self): input_shape = self.input_dim1, self.input_dim2, self.input_dim3 input_cond = Input(shape=input_shape) input_x = Input(shape=input_shape) merge = Concatenate()([input_x, input_cond]) out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(merge) out = LeakyReLU(alpha=0.2)(out) out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) out = Flatten()(out) out = Dropout(0.5)(out) y = Dense(1, activation='sigmoid')(out) model = Model([input_x, input_cond], y) opt = Adam(lr=0.0002) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[ 'accuracy']) return model def generator(self): image_dim = self.input_dim1 latent_shape = self.latent_size cond_shape = image_dim, image_dim, self.input_dim3 input_latent = Input(shape=(latent_shape,)) num_nodes = image_dim * image_dim latent = Dense(num_nodes)(input_latent) latent = LeakyReLU(alpha=0.2)(latent) latent = Reshape((image_dim, image_dim, 1))(latent) input_cond = Input(shape=cond_shape) cond = input_cond merge = Concatenate()([latent, cond]) out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(merge) out = LeakyReLU(alpha=0.2)(out) out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) x = Conv2D(1, (4, 4), strides=(1, 1), activation='tanh', padding='same' )(out) model = Model([input_latent, input_cond], x) return model def combined(self, g_model, d_model): d_model.trainable = False input_latent, input_cond = g_model.input x = g_model.output y = d_model([x, input_cond]) model = Model([input_latent, input_cond], y) opt = Adam(lr=0.0002, beta_1=0.5) model.compile(loss='binary_crossentropy', optimizer=opt) return model def generate_real_samples(self, focused, defocused, n_samples): idx = randint(0, focused.shape[0], n_samples) x_real, input_cond = focused[idx, :, :, :], defocused[idx, :, :, :] y_real = ones((n_samples, 1)) return [x_real, input_cond], y_real def generate_latent(self, latent_size, n_samples): total_latent = randn(latent_size * n_samples) input_z = total_latent.reshape(n_samples, latent_size) return input_z def generate_fake_samples(self, generator, defocused, latent_dim, n_samples ): idx = randint(0, defocused.shape[0], n_samples) input_cond = defocused[idx, :, :, :] input_z = self.generate_latent(latent_dim, n_samples) x_fake = generator.predict([input_z, input_cond]) y_fake = zeros((n_samples, 1)) return [x_fake, input_cond], y_fake <|reserved_special_token_0|> def train(self, g_model, d_model, gan_model, real, input_cond, latent_dim, n_epochs, n_batch, save): bat_per_epo = int(real.shape[0] / n_batch) half_batch = int(n_batch / 2) g_loss = np.zeros(n_epochs) d_loss_real = np.zeros(n_epochs) d_loss_fake = np.zeros(n_epochs) for i in range(n_epochs): start = timeit.default_timer() print('================== Epoch %d ==================\n' % (i + 1)) for j in range(bat_per_epo): [x_real, input_cond_real], y_real = self.generate_real_samples( real, input_cond, half_batch) d_loss_real[i], _ = d_model.train_on_batch([x_real, input_cond_real], y_real) [x_fake, input_cond_fake], y_fake = self.generate_fake_samples( g_model, input_cond, latent_dim, half_batch) d_loss_fake[i], _ = d_model.train_on_batch([x_fake, input_cond_fake], y_fake) [z_input, input_cond_gan], y_gan = self.generate_gan_input( input_cond, latent_dim, n_batch) g_loss[i] = gan_model.train_on_batch([z_input, input_cond_gan], y_gan) print('Completed: %.f' % np.divide((j + 1) * 100, bat_per_epo) + '%') print( 'Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f' % (i + 1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\n') stop = timeit.default_timer() print('Time: %.2f min' % ((stop - start) / 60)) g_model.save('./models/cgan_' + save + '.h5') loss = np.array([d_loss_real, d_loss_fake, g_loss]) np.save('./models/cgan_loss_' + save, loss) def generate_fakes_givenOne(self, generator, focused, defocused, latent_dim, n_samples): idx = randint(0, defocused.shape[0], 1) x_real = focused[idx, :, :, :] input_cond = defocused[idx, :, :, :] input_cond = np.repeat(input_cond, n_samples, axis=0) input_z = self.generate_latent(latent_dim, n_samples) x_fake = generator.predict([input_z, input_cond]) return x_real, x_fake, input_cond[0, :, :, :] <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class cGAN: def __init__(self, input_dim1, input_dim2, input_dim3, latent_size): self.input_dim1 = input_dim1 self.input_dim2 = input_dim2 self.input_dim3 = input_dim3 self.latent_size = latent_size def discriminator(self): input_shape = self.input_dim1, self.input_dim2, self.input_dim3 input_cond = Input(shape=input_shape) input_x = Input(shape=input_shape) merge = Concatenate()([input_x, input_cond]) out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(merge) out = LeakyReLU(alpha=0.2)(out) out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) out = Flatten()(out) out = Dropout(0.5)(out) y = Dense(1, activation='sigmoid')(out) model = Model([input_x, input_cond], y) opt = Adam(lr=0.0002) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[ 'accuracy']) return model def generator(self): image_dim = self.input_dim1 latent_shape = self.latent_size cond_shape = image_dim, image_dim, self.input_dim3 input_latent = Input(shape=(latent_shape,)) num_nodes = image_dim * image_dim latent = Dense(num_nodes)(input_latent) latent = LeakyReLU(alpha=0.2)(latent) latent = Reshape((image_dim, image_dim, 1))(latent) input_cond = Input(shape=cond_shape) cond = input_cond merge = Concatenate()([latent, cond]) out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(merge) out = LeakyReLU(alpha=0.2)(out) out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) x = Conv2D(1, (4, 4), strides=(1, 1), activation='tanh', padding='same' )(out) model = Model([input_latent, input_cond], x) return model def combined(self, g_model, d_model): d_model.trainable = False input_latent, input_cond = g_model.input x = g_model.output y = d_model([x, input_cond]) model = Model([input_latent, input_cond], y) opt = Adam(lr=0.0002, beta_1=0.5) model.compile(loss='binary_crossentropy', optimizer=opt) return model def generate_real_samples(self, focused, defocused, n_samples): idx = randint(0, focused.shape[0], n_samples) x_real, input_cond = focused[idx, :, :, :], defocused[idx, :, :, :] y_real = ones((n_samples, 1)) return [x_real, input_cond], y_real def generate_latent(self, latent_size, n_samples): total_latent = randn(latent_size * n_samples) input_z = total_latent.reshape(n_samples, latent_size) return input_z def generate_fake_samples(self, generator, defocused, latent_dim, n_samples ): idx = randint(0, defocused.shape[0], n_samples) input_cond = defocused[idx, :, :, :] input_z = self.generate_latent(latent_dim, n_samples) x_fake = generator.predict([input_z, input_cond]) y_fake = zeros((n_samples, 1)) return [x_fake, input_cond], y_fake <|reserved_special_token_0|> def train(self, g_model, d_model, gan_model, real, input_cond, latent_dim, n_epochs, n_batch, save): bat_per_epo = int(real.shape[0] / n_batch) half_batch = int(n_batch / 2) g_loss = np.zeros(n_epochs) d_loss_real = np.zeros(n_epochs) d_loss_fake = np.zeros(n_epochs) for i in range(n_epochs): start = timeit.default_timer() print('================== Epoch %d ==================\n' % (i + 1)) for j in range(bat_per_epo): [x_real, input_cond_real], y_real = self.generate_real_samples( real, input_cond, half_batch) d_loss_real[i], _ = d_model.train_on_batch([x_real, input_cond_real], y_real) [x_fake, input_cond_fake], y_fake = self.generate_fake_samples( g_model, input_cond, latent_dim, half_batch) d_loss_fake[i], _ = d_model.train_on_batch([x_fake, input_cond_fake], y_fake) [z_input, input_cond_gan], y_gan = self.generate_gan_input( input_cond, latent_dim, n_batch) g_loss[i] = gan_model.train_on_batch([z_input, input_cond_gan], y_gan) print('Completed: %.f' % np.divide((j + 1) * 100, bat_per_epo) + '%') print( 'Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f' % (i + 1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\n') stop = timeit.default_timer() print('Time: %.2f min' % ((stop - start) / 60)) g_model.save('./models/cgan_' + save + '.h5') loss = np.array([d_loss_real, d_loss_fake, g_loss]) np.save('./models/cgan_loss_' + save, loss) def generate_fakes_givenOne(self, generator, focused, defocused, latent_dim, n_samples): idx = randint(0, defocused.shape[0], 1) x_real = focused[idx, :, :, :] input_cond = defocused[idx, :, :, :] input_cond = np.repeat(input_cond, n_samples, axis=0) input_z = self.generate_latent(latent_dim, n_samples) x_fake = generator.predict([input_z, input_cond]) return x_real, x_fake, input_cond[0, :, :, :] def generate_fakes_givenMany(self, generator, focused, defocused, latent_dim, n_examples): n_samples = n_examples - 2 x_real_many = np.zeros((n_examples, focused.shape[1], focused.shape [2], focused.shape[3])) input_cond_many = np.zeros((n_examples, focused.shape[1], focused. shape[2], focused.shape[3])) x_fake_many = np.zeros((n_examples, n_samples, focused.shape[1], focused.shape[2], focused.shape[3])) for i in range(n_examples): x_real_many[i, :, :, :], x_fake_many[i, :, :, :, : ], input_cond_many[i, :, :, :] = self.generate_fakes_givenOne( generator, focused, defocused, latent_dim, n_samples) return x_real_many, x_fake_many, input_cond_many <|reserved_special_token_1|> <|reserved_special_token_0|> class cGAN: def __init__(self, input_dim1, input_dim2, input_dim3, latent_size): self.input_dim1 = input_dim1 self.input_dim2 = input_dim2 self.input_dim3 = input_dim3 self.latent_size = latent_size def discriminator(self): input_shape = self.input_dim1, self.input_dim2, self.input_dim3 input_cond = Input(shape=input_shape) input_x = Input(shape=input_shape) merge = Concatenate()([input_x, input_cond]) out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(merge) out = LeakyReLU(alpha=0.2)(out) out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) out = Flatten()(out) out = Dropout(0.5)(out) y = Dense(1, activation='sigmoid')(out) model = Model([input_x, input_cond], y) opt = Adam(lr=0.0002) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[ 'accuracy']) return model def generator(self): image_dim = self.input_dim1 latent_shape = self.latent_size cond_shape = image_dim, image_dim, self.input_dim3 input_latent = Input(shape=(latent_shape,)) num_nodes = image_dim * image_dim latent = Dense(num_nodes)(input_latent) latent = LeakyReLU(alpha=0.2)(latent) latent = Reshape((image_dim, image_dim, 1))(latent) input_cond = Input(shape=cond_shape) cond = input_cond merge = Concatenate()([latent, cond]) out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(merge) out = LeakyReLU(alpha=0.2)(out) out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) x = Conv2D(1, (4, 4), strides=(1, 1), activation='tanh', padding='same' )(out) model = Model([input_latent, input_cond], x) return model def combined(self, g_model, d_model): d_model.trainable = False input_latent, input_cond = g_model.input x = g_model.output y = d_model([x, input_cond]) model = Model([input_latent, input_cond], y) opt = Adam(lr=0.0002, beta_1=0.5) model.compile(loss='binary_crossentropy', optimizer=opt) return model def generate_real_samples(self, focused, defocused, n_samples): idx = randint(0, focused.shape[0], n_samples) x_real, input_cond = focused[idx, :, :, :], defocused[idx, :, :, :] y_real = ones((n_samples, 1)) return [x_real, input_cond], y_real def generate_latent(self, latent_size, n_samples): total_latent = randn(latent_size * n_samples) input_z = total_latent.reshape(n_samples, latent_size) return input_z def generate_fake_samples(self, generator, defocused, latent_dim, n_samples ): idx = randint(0, defocused.shape[0], n_samples) input_cond = defocused[idx, :, :, :] input_z = self.generate_latent(latent_dim, n_samples) x_fake = generator.predict([input_z, input_cond]) y_fake = zeros((n_samples, 1)) return [x_fake, input_cond], y_fake def generate_gan_input(self, defocused, latent_dim, n_samples): idx = randint(0, defocused.shape[0], n_samples) input_cond = defocused[idx, :, :, :] input_z = self.generate_latent(latent_dim, n_samples) y_gan = ones((n_samples, 1)) return [input_z, input_cond], y_gan def train(self, g_model, d_model, gan_model, real, input_cond, latent_dim, n_epochs, n_batch, save): bat_per_epo = int(real.shape[0] / n_batch) half_batch = int(n_batch / 2) g_loss = np.zeros(n_epochs) d_loss_real = np.zeros(n_epochs) d_loss_fake = np.zeros(n_epochs) for i in range(n_epochs): start = timeit.default_timer() print('================== Epoch %d ==================\n' % (i + 1)) for j in range(bat_per_epo): [x_real, input_cond_real], y_real = self.generate_real_samples( real, input_cond, half_batch) d_loss_real[i], _ = d_model.train_on_batch([x_real, input_cond_real], y_real) [x_fake, input_cond_fake], y_fake = self.generate_fake_samples( g_model, input_cond, latent_dim, half_batch) d_loss_fake[i], _ = d_model.train_on_batch([x_fake, input_cond_fake], y_fake) [z_input, input_cond_gan], y_gan = self.generate_gan_input( input_cond, latent_dim, n_batch) g_loss[i] = gan_model.train_on_batch([z_input, input_cond_gan], y_gan) print('Completed: %.f' % np.divide((j + 1) * 100, bat_per_epo) + '%') print( 'Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f' % (i + 1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\n') stop = timeit.default_timer() print('Time: %.2f min' % ((stop - start) / 60)) g_model.save('./models/cgan_' + save + '.h5') loss = np.array([d_loss_real, d_loss_fake, g_loss]) np.save('./models/cgan_loss_' + save, loss) def generate_fakes_givenOne(self, generator, focused, defocused, latent_dim, n_samples): idx = randint(0, defocused.shape[0], 1) x_real = focused[idx, :, :, :] input_cond = defocused[idx, :, :, :] input_cond = np.repeat(input_cond, n_samples, axis=0) input_z = self.generate_latent(latent_dim, n_samples) x_fake = generator.predict([input_z, input_cond]) return x_real, x_fake, input_cond[0, :, :, :] def generate_fakes_givenMany(self, generator, focused, defocused, latent_dim, n_examples): n_samples = n_examples - 2 x_real_many = np.zeros((n_examples, focused.shape[1], focused.shape [2], focused.shape[3])) input_cond_many = np.zeros((n_examples, focused.shape[1], focused. shape[2], focused.shape[3])) x_fake_many = np.zeros((n_examples, n_samples, focused.shape[1], focused.shape[2], focused.shape[3])) for i in range(n_examples): x_real_many[i, :, :, :], x_fake_many[i, :, :, :, : ], input_cond_many[i, :, :, :] = self.generate_fakes_givenOne( generator, focused, defocused, latent_dim, n_samples) return x_real_many, x_fake_many, input_cond_many <|reserved_special_token_1|> <|reserved_special_token_0|> import numpy as np import timeit import matplotlib.pyplot as plt from numpy import expand_dims, zeros, ones from numpy.random import randn, randint from keras.models import load_model from keras.optimizers import Adam from keras.models import Model from keras.layers import Input, Reshape, Flatten, Concatenate from keras.layers import Dense, Conv2D, Conv2DTranspose from keras.layers import Dropout, LeakyReLU class cGAN: def __init__(self, input_dim1, input_dim2, input_dim3, latent_size): self.input_dim1 = input_dim1 self.input_dim2 = input_dim2 self.input_dim3 = input_dim3 self.latent_size = latent_size def discriminator(self): input_shape = self.input_dim1, self.input_dim2, self.input_dim3 input_cond = Input(shape=input_shape) input_x = Input(shape=input_shape) merge = Concatenate()([input_x, input_cond]) out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(merge) out = LeakyReLU(alpha=0.2)(out) out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) out = Flatten()(out) out = Dropout(0.5)(out) y = Dense(1, activation='sigmoid')(out) model = Model([input_x, input_cond], y) opt = Adam(lr=0.0002) model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[ 'accuracy']) return model def generator(self): image_dim = self.input_dim1 latent_shape = self.latent_size cond_shape = image_dim, image_dim, self.input_dim3 input_latent = Input(shape=(latent_shape,)) num_nodes = image_dim * image_dim latent = Dense(num_nodes)(input_latent) latent = LeakyReLU(alpha=0.2)(latent) latent = Reshape((image_dim, image_dim, 1))(latent) input_cond = Input(shape=cond_shape) cond = input_cond merge = Concatenate()([latent, cond]) out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(merge) out = LeakyReLU(alpha=0.2)(out) out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) x = Conv2D(1, (4, 4), strides=(1, 1), activation='tanh', padding='same' )(out) model = Model([input_latent, input_cond], x) return model def combined(self, g_model, d_model): d_model.trainable = False input_latent, input_cond = g_model.input x = g_model.output y = d_model([x, input_cond]) model = Model([input_latent, input_cond], y) opt = Adam(lr=0.0002, beta_1=0.5) model.compile(loss='binary_crossentropy', optimizer=opt) return model def generate_real_samples(self, focused, defocused, n_samples): idx = randint(0, focused.shape[0], n_samples) x_real, input_cond = focused[idx, :, :, :], defocused[idx, :, :, :] y_real = ones((n_samples, 1)) return [x_real, input_cond], y_real def generate_latent(self, latent_size, n_samples): total_latent = randn(latent_size * n_samples) input_z = total_latent.reshape(n_samples, latent_size) return input_z def generate_fake_samples(self, generator, defocused, latent_dim, n_samples ): idx = randint(0, defocused.shape[0], n_samples) input_cond = defocused[idx, :, :, :] input_z = self.generate_latent(latent_dim, n_samples) x_fake = generator.predict([input_z, input_cond]) y_fake = zeros((n_samples, 1)) return [x_fake, input_cond], y_fake def generate_gan_input(self, defocused, latent_dim, n_samples): idx = randint(0, defocused.shape[0], n_samples) input_cond = defocused[idx, :, :, :] input_z = self.generate_latent(latent_dim, n_samples) y_gan = ones((n_samples, 1)) return [input_z, input_cond], y_gan def train(self, g_model, d_model, gan_model, real, input_cond, latent_dim, n_epochs, n_batch, save): bat_per_epo = int(real.shape[0] / n_batch) half_batch = int(n_batch / 2) g_loss = np.zeros(n_epochs) d_loss_real = np.zeros(n_epochs) d_loss_fake = np.zeros(n_epochs) for i in range(n_epochs): start = timeit.default_timer() print('================== Epoch %d ==================\n' % (i + 1)) for j in range(bat_per_epo): [x_real, input_cond_real], y_real = self.generate_real_samples( real, input_cond, half_batch) d_loss_real[i], _ = d_model.train_on_batch([x_real, input_cond_real], y_real) [x_fake, input_cond_fake], y_fake = self.generate_fake_samples( g_model, input_cond, latent_dim, half_batch) d_loss_fake[i], _ = d_model.train_on_batch([x_fake, input_cond_fake], y_fake) [z_input, input_cond_gan], y_gan = self.generate_gan_input( input_cond, latent_dim, n_batch) g_loss[i] = gan_model.train_on_batch([z_input, input_cond_gan], y_gan) print('Completed: %.f' % np.divide((j + 1) * 100, bat_per_epo) + '%') print( 'Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f' % (i + 1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\n') stop = timeit.default_timer() print('Time: %.2f min' % ((stop - start) / 60)) g_model.save('./models/cgan_' + save + '.h5') loss = np.array([d_loss_real, d_loss_fake, g_loss]) np.save('./models/cgan_loss_' + save, loss) def generate_fakes_givenOne(self, generator, focused, defocused, latent_dim, n_samples): idx = randint(0, defocused.shape[0], 1) x_real = focused[idx, :, :, :] input_cond = defocused[idx, :, :, :] input_cond = np.repeat(input_cond, n_samples, axis=0) input_z = self.generate_latent(latent_dim, n_samples) x_fake = generator.predict([input_z, input_cond]) return x_real, x_fake, input_cond[0, :, :, :] def generate_fakes_givenMany(self, generator, focused, defocused, latent_dim, n_examples): n_samples = n_examples - 2 x_real_many = np.zeros((n_examples, focused.shape[1], focused.shape [2], focused.shape[3])) input_cond_many = np.zeros((n_examples, focused.shape[1], focused. shape[2], focused.shape[3])) x_fake_many = np.zeros((n_examples, n_samples, focused.shape[1], focused.shape[2], focused.shape[3])) for i in range(n_examples): x_real_many[i, :, :, :], x_fake_many[i, :, :, :, : ], input_cond_many[i, :, :, :] = self.generate_fakes_givenOne( generator, focused, defocused, latent_dim, n_samples) return x_real_many, x_fake_many, input_cond_many <|reserved_special_token_1|> # -*- coding: utf-8 -*- """ Created on Mon Nov 11 18:50:46 2019 @author: kanfar """ import numpy as np import timeit import matplotlib.pyplot as plt from numpy import expand_dims, zeros, ones from numpy.random import randn, randint from keras.models import load_model from keras.optimizers import Adam from keras.models import Model from keras.layers import Input, Reshape, Flatten, Concatenate from keras.layers import Dense, Conv2D, Conv2DTranspose from keras.layers import Dropout, LeakyReLU class cGAN: def __init__(self, input_dim1, input_dim2, input_dim3, latent_size): self.input_dim1 = input_dim1 self.input_dim2 = input_dim2 self.input_dim3 = input_dim3 self.latent_size = latent_size def discriminator(self): #conditional input input_shape = (self.input_dim1, self.input_dim2, self.input_dim3) input_cond = Input(shape = input_shape) #generator output input_x = Input(shape = input_shape) merge = Concatenate()([input_x, input_cond]) #downsample out = Conv2D(32, (3,3), strides=(2,2), padding='same')(merge) out = LeakyReLU(alpha=0.2)(out) out = Conv2D(32, (3,3), strides=(2,2), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) out = Flatten()(out) out = Dropout(0.5)(out) y = Dense(1, activation='sigmoid')(out) # define model model = Model([input_x, input_cond], y) # compile model opt = Adam(lr=0.0002) #0.0002 and beta_1 0.5 model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) return model def generator(self): #losing one pixel, figure out later image_dim = self.input_dim1 latent_shape = self.latent_size cond_shape = (image_dim, image_dim, self.input_dim3) input_latent = Input(shape = (latent_shape,)) num_nodes = image_dim * image_dim latent = Dense(num_nodes)(input_latent) latent = LeakyReLU(alpha=0.2)(latent) latent = Reshape((image_dim,image_dim,1))(latent) input_cond = Input(shape = cond_shape) cond = input_cond merge = Concatenate()([latent,cond]) # upsample to 14x14 out = Conv2D(32, (4,4), strides=(1,1), padding='same')(merge) out = LeakyReLU(alpha=0.2)(out) # upsample to 28x28 out = Conv2D(32, (4,4), strides=(1,1), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) out = Conv2D(32, (4,4), strides=(1,1), padding='same')(out) out = LeakyReLU(alpha=0.2)(out) # output x = Conv2D(1, (4,4), strides=(1,1), activation='tanh', padding='same')(out) #something key that I don't understand # define model model = Model([input_latent, input_cond], x) return model def combined(self, g_model, d_model): #model comprised of two models # make weights in the discriminator not trainable d_model.trainable = False # get noise and label inputs from generator model input_latent, input_cond = g_model.input #defining the tensors in a short way: this is saying the input to this model is the same size as input to g_model # get image output from the generator model x = g_model.output #can I do x = g_model([input_latent, input_cond]) instead of the above? # connect image output and label input from generator as inputs to discriminator y = d_model([x, input_cond]) #why this needs to be connected but not the above???? does the first output take model input as default??????? test this # define gan model as taking noise and label and outputting a classification model = Model([input_latent, input_cond], y) # compile model opt = Adam(lr=0.0002, beta_1=0.5) model.compile(loss='binary_crossentropy', optimizer=opt) return model def generate_real_samples(self, focused, defocused, n_samples): idx = randint(0, focused.shape[0], n_samples) x_real, input_cond = focused[idx,:,:,:], defocused[idx,:,:,:] y_real = ones((n_samples,1)) return [x_real, input_cond], y_real def generate_latent(self, latent_size, n_samples): #generate points in teh latent space total_latent = randn(latent_size*n_samples) input_z = total_latent.reshape(n_samples, latent_size) return input_z def generate_fake_samples(self, generator, defocused, latent_dim, n_samples): idx = randint(0, defocused.shape[0], n_samples) input_cond = defocused[idx,:,:,:] input_z = self.generate_latent(latent_dim, n_samples) # predict outputs x_fake = generator.predict([input_z, input_cond]) # create class labels y_fake = zeros((n_samples, 1)) return [x_fake, input_cond], y_fake def generate_gan_input(self, defocused, latent_dim, n_samples): #defocused = data[1,:,:,:] #defocused = np.expand_dims(input_cond, axis = -1) idx = randint(0, defocused.shape[0], n_samples) input_cond = defocused[idx,:,:,:] input_z = self.generate_latent(latent_dim, n_samples) # create class labels y_gan = ones((n_samples, 1)) return [input_z, input_cond], y_gan def train(self, g_model, d_model, gan_model, real, input_cond, latent_dim, n_epochs, n_batch, save): bat_per_epo = int(real.shape[0] / n_batch) #check half_batch = int(n_batch / 2) g_loss = np.zeros(n_epochs) d_loss_real = np.zeros(n_epochs) d_loss_fake = np.zeros(n_epochs) # manually enumerate epochs for i in range(n_epochs): start = timeit.default_timer() # enumerate batches over the training set print('================== Epoch %d ==================\n' % (i+1)) for j in range(bat_per_epo): # get randomly selected 'real' samples [x_real, input_cond_real], y_real = self.generate_real_samples(real, input_cond, half_batch) # update discriminator model weights d_loss_real[i], _ = d_model.train_on_batch([x_real, input_cond_real], y_real) # generate 'fake' examples [x_fake, input_cond_fake], y_fake = self.generate_fake_samples(g_model, input_cond, latent_dim, half_batch) # update discriminator model weights d_loss_fake[i], _ = d_model.train_on_batch([x_fake, input_cond_fake], y_fake) # prepare points in latent space as input for the generator [z_input, input_cond_gan], y_gan = self.generate_gan_input(input_cond, latent_dim, n_batch) # update the generator via the discriminator's error g_loss[i] = gan_model.train_on_batch([z_input, input_cond_gan], y_gan) # summarize loss on this batch print('Completed: %.f' % np.divide((j+1)*100,bat_per_epo) +'%') print('Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f' % (i+1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\n') stop = timeit.default_timer() print('Time: %.2f min' % ((stop - start)/60)) # save the generator model g_model.save('./models/cgan_'+ save + '.h5') #save somewhere # save loss history loss = np.array([d_loss_real, d_loss_fake, g_loss]) np.save('./models/cgan_loss_' + save, loss) def generate_fakes_givenOne(self, generator, focused, defocused, latent_dim, n_samples): idx = randint(0, defocused.shape[0], 1) x_real = focused[idx,:,:,:] input_cond = defocused[idx,:,:,:] ##### should last be zero or :? input_cond = np.repeat(input_cond, n_samples, axis=0) input_z = self.generate_latent(latent_dim, n_samples) x_fake = generator.predict([input_z, input_cond]) return x_real, x_fake, input_cond[0,:,:,:] def generate_fakes_givenMany(self, generator, focused, defocused, latent_dim, n_examples): n_samples = n_examples-2 x_real_many = np.zeros((n_examples, focused.shape[1], focused.shape[2], focused.shape[3])) input_cond_many = np.zeros((n_examples, focused.shape[1], focused.shape[2], focused.shape[3])) x_fake_many = np.zeros((n_examples, n_samples, focused.shape[1], focused.shape[2], focused.shape[3])) for i in range(n_examples): x_real_many[i,:,:,:], x_fake_many[i,:,:,:,:], input_cond_many[i,:,:,:] = self.generate_fakes_givenOne(generator, focused, defocused, latent_dim, n_samples) return x_real_many, x_fake_many, input_cond_many
flexible
{ "blob_id": "fc6c220f8a3a0e9dd1d6e6e1ca131136db8f8a58", "index": 9155, "step-1": "<mask token>\n\n\nclass cGAN:\n\n def __init__(self, input_dim1, input_dim2, input_dim3, latent_size):\n self.input_dim1 = input_dim1\n self.input_dim2 = input_dim2\n self.input_dim3 = input_dim3\n self.latent_size = latent_size\n\n def discriminator(self):\n input_shape = self.input_dim1, self.input_dim2, self.input_dim3\n input_cond = Input(shape=input_shape)\n input_x = Input(shape=input_shape)\n merge = Concatenate()([input_x, input_cond])\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Flatten()(out)\n out = Dropout(0.5)(out)\n y = Dense(1, activation='sigmoid')(out)\n model = Model([input_x, input_cond], y)\n opt = Adam(lr=0.0002)\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[\n 'accuracy'])\n return model\n\n def generator(self):\n image_dim = self.input_dim1\n latent_shape = self.latent_size\n cond_shape = image_dim, image_dim, self.input_dim3\n input_latent = Input(shape=(latent_shape,))\n num_nodes = image_dim * image_dim\n latent = Dense(num_nodes)(input_latent)\n latent = LeakyReLU(alpha=0.2)(latent)\n latent = Reshape((image_dim, image_dim, 1))(latent)\n input_cond = Input(shape=cond_shape)\n cond = input_cond\n merge = Concatenate()([latent, cond])\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n x = Conv2D(1, (4, 4), strides=(1, 1), activation='tanh', padding='same'\n )(out)\n model = Model([input_latent, input_cond], x)\n return model\n\n def combined(self, g_model, d_model):\n d_model.trainable = False\n input_latent, input_cond = g_model.input\n x = g_model.output\n y = d_model([x, input_cond])\n model = Model([input_latent, input_cond], y)\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt)\n return model\n\n def generate_real_samples(self, focused, defocused, n_samples):\n idx = randint(0, focused.shape[0], n_samples)\n x_real, input_cond = focused[idx, :, :, :], defocused[idx, :, :, :]\n y_real = ones((n_samples, 1))\n return [x_real, input_cond], y_real\n\n def generate_latent(self, latent_size, n_samples):\n total_latent = randn(latent_size * n_samples)\n input_z = total_latent.reshape(n_samples, latent_size)\n return input_z\n\n def generate_fake_samples(self, generator, defocused, latent_dim, n_samples\n ):\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx, :, :, :]\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n y_fake = zeros((n_samples, 1))\n return [x_fake, input_cond], y_fake\n <mask token>\n\n def train(self, g_model, d_model, gan_model, real, input_cond,\n latent_dim, n_epochs, n_batch, save):\n bat_per_epo = int(real.shape[0] / n_batch)\n half_batch = int(n_batch / 2)\n g_loss = np.zeros(n_epochs)\n d_loss_real = np.zeros(n_epochs)\n d_loss_fake = np.zeros(n_epochs)\n for i in range(n_epochs):\n start = timeit.default_timer()\n print('================== Epoch %d ==================\\n' % (i + 1))\n for j in range(bat_per_epo):\n [x_real, input_cond_real], y_real = self.generate_real_samples(\n real, input_cond, half_batch)\n d_loss_real[i], _ = d_model.train_on_batch([x_real,\n input_cond_real], y_real)\n [x_fake, input_cond_fake], y_fake = self.generate_fake_samples(\n g_model, input_cond, latent_dim, half_batch)\n d_loss_fake[i], _ = d_model.train_on_batch([x_fake,\n input_cond_fake], y_fake)\n [z_input, input_cond_gan], y_gan = self.generate_gan_input(\n input_cond, latent_dim, n_batch)\n g_loss[i] = gan_model.train_on_batch([z_input,\n input_cond_gan], y_gan)\n print('Completed: %.f' % np.divide((j + 1) * 100,\n bat_per_epo) + '%')\n print(\n 'Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f'\n % (i + 1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\\n')\n stop = timeit.default_timer()\n print('Time: %.2f min' % ((stop - start) / 60))\n g_model.save('./models/cgan_' + save + '.h5')\n loss = np.array([d_loss_real, d_loss_fake, g_loss])\n np.save('./models/cgan_loss_' + save, loss)\n\n def generate_fakes_givenOne(self, generator, focused, defocused,\n latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], 1)\n x_real = focused[idx, :, :, :]\n input_cond = defocused[idx, :, :, :]\n input_cond = np.repeat(input_cond, n_samples, axis=0)\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n return x_real, x_fake, input_cond[0, :, :, :]\n <mask token>\n", "step-2": "<mask token>\n\n\nclass cGAN:\n\n def __init__(self, input_dim1, input_dim2, input_dim3, latent_size):\n self.input_dim1 = input_dim1\n self.input_dim2 = input_dim2\n self.input_dim3 = input_dim3\n self.latent_size = latent_size\n\n def discriminator(self):\n input_shape = self.input_dim1, self.input_dim2, self.input_dim3\n input_cond = Input(shape=input_shape)\n input_x = Input(shape=input_shape)\n merge = Concatenate()([input_x, input_cond])\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Flatten()(out)\n out = Dropout(0.5)(out)\n y = Dense(1, activation='sigmoid')(out)\n model = Model([input_x, input_cond], y)\n opt = Adam(lr=0.0002)\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[\n 'accuracy'])\n return model\n\n def generator(self):\n image_dim = self.input_dim1\n latent_shape = self.latent_size\n cond_shape = image_dim, image_dim, self.input_dim3\n input_latent = Input(shape=(latent_shape,))\n num_nodes = image_dim * image_dim\n latent = Dense(num_nodes)(input_latent)\n latent = LeakyReLU(alpha=0.2)(latent)\n latent = Reshape((image_dim, image_dim, 1))(latent)\n input_cond = Input(shape=cond_shape)\n cond = input_cond\n merge = Concatenate()([latent, cond])\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n x = Conv2D(1, (4, 4), strides=(1, 1), activation='tanh', padding='same'\n )(out)\n model = Model([input_latent, input_cond], x)\n return model\n\n def combined(self, g_model, d_model):\n d_model.trainable = False\n input_latent, input_cond = g_model.input\n x = g_model.output\n y = d_model([x, input_cond])\n model = Model([input_latent, input_cond], y)\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt)\n return model\n\n def generate_real_samples(self, focused, defocused, n_samples):\n idx = randint(0, focused.shape[0], n_samples)\n x_real, input_cond = focused[idx, :, :, :], defocused[idx, :, :, :]\n y_real = ones((n_samples, 1))\n return [x_real, input_cond], y_real\n\n def generate_latent(self, latent_size, n_samples):\n total_latent = randn(latent_size * n_samples)\n input_z = total_latent.reshape(n_samples, latent_size)\n return input_z\n\n def generate_fake_samples(self, generator, defocused, latent_dim, n_samples\n ):\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx, :, :, :]\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n y_fake = zeros((n_samples, 1))\n return [x_fake, input_cond], y_fake\n <mask token>\n\n def train(self, g_model, d_model, gan_model, real, input_cond,\n latent_dim, n_epochs, n_batch, save):\n bat_per_epo = int(real.shape[0] / n_batch)\n half_batch = int(n_batch / 2)\n g_loss = np.zeros(n_epochs)\n d_loss_real = np.zeros(n_epochs)\n d_loss_fake = np.zeros(n_epochs)\n for i in range(n_epochs):\n start = timeit.default_timer()\n print('================== Epoch %d ==================\\n' % (i + 1))\n for j in range(bat_per_epo):\n [x_real, input_cond_real], y_real = self.generate_real_samples(\n real, input_cond, half_batch)\n d_loss_real[i], _ = d_model.train_on_batch([x_real,\n input_cond_real], y_real)\n [x_fake, input_cond_fake], y_fake = self.generate_fake_samples(\n g_model, input_cond, latent_dim, half_batch)\n d_loss_fake[i], _ = d_model.train_on_batch([x_fake,\n input_cond_fake], y_fake)\n [z_input, input_cond_gan], y_gan = self.generate_gan_input(\n input_cond, latent_dim, n_batch)\n g_loss[i] = gan_model.train_on_batch([z_input,\n input_cond_gan], y_gan)\n print('Completed: %.f' % np.divide((j + 1) * 100,\n bat_per_epo) + '%')\n print(\n 'Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f'\n % (i + 1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\\n')\n stop = timeit.default_timer()\n print('Time: %.2f min' % ((stop - start) / 60))\n g_model.save('./models/cgan_' + save + '.h5')\n loss = np.array([d_loss_real, d_loss_fake, g_loss])\n np.save('./models/cgan_loss_' + save, loss)\n\n def generate_fakes_givenOne(self, generator, focused, defocused,\n latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], 1)\n x_real = focused[idx, :, :, :]\n input_cond = defocused[idx, :, :, :]\n input_cond = np.repeat(input_cond, n_samples, axis=0)\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n return x_real, x_fake, input_cond[0, :, :, :]\n\n def generate_fakes_givenMany(self, generator, focused, defocused,\n latent_dim, n_examples):\n n_samples = n_examples - 2\n x_real_many = np.zeros((n_examples, focused.shape[1], focused.shape\n [2], focused.shape[3]))\n input_cond_many = np.zeros((n_examples, focused.shape[1], focused.\n shape[2], focused.shape[3]))\n x_fake_many = np.zeros((n_examples, n_samples, focused.shape[1],\n focused.shape[2], focused.shape[3]))\n for i in range(n_examples):\n x_real_many[i, :, :, :], x_fake_many[i, :, :, :, :\n ], input_cond_many[i, :, :, :] = self.generate_fakes_givenOne(\n generator, focused, defocused, latent_dim, n_samples)\n return x_real_many, x_fake_many, input_cond_many\n", "step-3": "<mask token>\n\n\nclass cGAN:\n\n def __init__(self, input_dim1, input_dim2, input_dim3, latent_size):\n self.input_dim1 = input_dim1\n self.input_dim2 = input_dim2\n self.input_dim3 = input_dim3\n self.latent_size = latent_size\n\n def discriminator(self):\n input_shape = self.input_dim1, self.input_dim2, self.input_dim3\n input_cond = Input(shape=input_shape)\n input_x = Input(shape=input_shape)\n merge = Concatenate()([input_x, input_cond])\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Flatten()(out)\n out = Dropout(0.5)(out)\n y = Dense(1, activation='sigmoid')(out)\n model = Model([input_x, input_cond], y)\n opt = Adam(lr=0.0002)\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[\n 'accuracy'])\n return model\n\n def generator(self):\n image_dim = self.input_dim1\n latent_shape = self.latent_size\n cond_shape = image_dim, image_dim, self.input_dim3\n input_latent = Input(shape=(latent_shape,))\n num_nodes = image_dim * image_dim\n latent = Dense(num_nodes)(input_latent)\n latent = LeakyReLU(alpha=0.2)(latent)\n latent = Reshape((image_dim, image_dim, 1))(latent)\n input_cond = Input(shape=cond_shape)\n cond = input_cond\n merge = Concatenate()([latent, cond])\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n x = Conv2D(1, (4, 4), strides=(1, 1), activation='tanh', padding='same'\n )(out)\n model = Model([input_latent, input_cond], x)\n return model\n\n def combined(self, g_model, d_model):\n d_model.trainable = False\n input_latent, input_cond = g_model.input\n x = g_model.output\n y = d_model([x, input_cond])\n model = Model([input_latent, input_cond], y)\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt)\n return model\n\n def generate_real_samples(self, focused, defocused, n_samples):\n idx = randint(0, focused.shape[0], n_samples)\n x_real, input_cond = focused[idx, :, :, :], defocused[idx, :, :, :]\n y_real = ones((n_samples, 1))\n return [x_real, input_cond], y_real\n\n def generate_latent(self, latent_size, n_samples):\n total_latent = randn(latent_size * n_samples)\n input_z = total_latent.reshape(n_samples, latent_size)\n return input_z\n\n def generate_fake_samples(self, generator, defocused, latent_dim, n_samples\n ):\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx, :, :, :]\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n y_fake = zeros((n_samples, 1))\n return [x_fake, input_cond], y_fake\n\n def generate_gan_input(self, defocused, latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx, :, :, :]\n input_z = self.generate_latent(latent_dim, n_samples)\n y_gan = ones((n_samples, 1))\n return [input_z, input_cond], y_gan\n\n def train(self, g_model, d_model, gan_model, real, input_cond,\n latent_dim, n_epochs, n_batch, save):\n bat_per_epo = int(real.shape[0] / n_batch)\n half_batch = int(n_batch / 2)\n g_loss = np.zeros(n_epochs)\n d_loss_real = np.zeros(n_epochs)\n d_loss_fake = np.zeros(n_epochs)\n for i in range(n_epochs):\n start = timeit.default_timer()\n print('================== Epoch %d ==================\\n' % (i + 1))\n for j in range(bat_per_epo):\n [x_real, input_cond_real], y_real = self.generate_real_samples(\n real, input_cond, half_batch)\n d_loss_real[i], _ = d_model.train_on_batch([x_real,\n input_cond_real], y_real)\n [x_fake, input_cond_fake], y_fake = self.generate_fake_samples(\n g_model, input_cond, latent_dim, half_batch)\n d_loss_fake[i], _ = d_model.train_on_batch([x_fake,\n input_cond_fake], y_fake)\n [z_input, input_cond_gan], y_gan = self.generate_gan_input(\n input_cond, latent_dim, n_batch)\n g_loss[i] = gan_model.train_on_batch([z_input,\n input_cond_gan], y_gan)\n print('Completed: %.f' % np.divide((j + 1) * 100,\n bat_per_epo) + '%')\n print(\n 'Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f'\n % (i + 1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\\n')\n stop = timeit.default_timer()\n print('Time: %.2f min' % ((stop - start) / 60))\n g_model.save('./models/cgan_' + save + '.h5')\n loss = np.array([d_loss_real, d_loss_fake, g_loss])\n np.save('./models/cgan_loss_' + save, loss)\n\n def generate_fakes_givenOne(self, generator, focused, defocused,\n latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], 1)\n x_real = focused[idx, :, :, :]\n input_cond = defocused[idx, :, :, :]\n input_cond = np.repeat(input_cond, n_samples, axis=0)\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n return x_real, x_fake, input_cond[0, :, :, :]\n\n def generate_fakes_givenMany(self, generator, focused, defocused,\n latent_dim, n_examples):\n n_samples = n_examples - 2\n x_real_many = np.zeros((n_examples, focused.shape[1], focused.shape\n [2], focused.shape[3]))\n input_cond_many = np.zeros((n_examples, focused.shape[1], focused.\n shape[2], focused.shape[3]))\n x_fake_many = np.zeros((n_examples, n_samples, focused.shape[1],\n focused.shape[2], focused.shape[3]))\n for i in range(n_examples):\n x_real_many[i, :, :, :], x_fake_many[i, :, :, :, :\n ], input_cond_many[i, :, :, :] = self.generate_fakes_givenOne(\n generator, focused, defocused, latent_dim, n_samples)\n return x_real_many, x_fake_many, input_cond_many\n", "step-4": "<mask token>\nimport numpy as np\nimport timeit\nimport matplotlib.pyplot as plt\nfrom numpy import expand_dims, zeros, ones\nfrom numpy.random import randn, randint\nfrom keras.models import load_model\nfrom keras.optimizers import Adam\nfrom keras.models import Model\nfrom keras.layers import Input, Reshape, Flatten, Concatenate\nfrom keras.layers import Dense, Conv2D, Conv2DTranspose\nfrom keras.layers import Dropout, LeakyReLU\n\n\nclass cGAN:\n\n def __init__(self, input_dim1, input_dim2, input_dim3, latent_size):\n self.input_dim1 = input_dim1\n self.input_dim2 = input_dim2\n self.input_dim3 = input_dim3\n self.latent_size = latent_size\n\n def discriminator(self):\n input_shape = self.input_dim1, self.input_dim2, self.input_dim3\n input_cond = Input(shape=input_shape)\n input_x = Input(shape=input_shape)\n merge = Concatenate()([input_x, input_cond])\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (3, 3), strides=(2, 2), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Flatten()(out)\n out = Dropout(0.5)(out)\n y = Dense(1, activation='sigmoid')(out)\n model = Model([input_x, input_cond], y)\n opt = Adam(lr=0.0002)\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[\n 'accuracy'])\n return model\n\n def generator(self):\n image_dim = self.input_dim1\n latent_shape = self.latent_size\n cond_shape = image_dim, image_dim, self.input_dim3\n input_latent = Input(shape=(latent_shape,))\n num_nodes = image_dim * image_dim\n latent = Dense(num_nodes)(input_latent)\n latent = LeakyReLU(alpha=0.2)(latent)\n latent = Reshape((image_dim, image_dim, 1))(latent)\n input_cond = Input(shape=cond_shape)\n cond = input_cond\n merge = Concatenate()([latent, cond])\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (4, 4), strides=(1, 1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n x = Conv2D(1, (4, 4), strides=(1, 1), activation='tanh', padding='same'\n )(out)\n model = Model([input_latent, input_cond], x)\n return model\n\n def combined(self, g_model, d_model):\n d_model.trainable = False\n input_latent, input_cond = g_model.input\n x = g_model.output\n y = d_model([x, input_cond])\n model = Model([input_latent, input_cond], y)\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt)\n return model\n\n def generate_real_samples(self, focused, defocused, n_samples):\n idx = randint(0, focused.shape[0], n_samples)\n x_real, input_cond = focused[idx, :, :, :], defocused[idx, :, :, :]\n y_real = ones((n_samples, 1))\n return [x_real, input_cond], y_real\n\n def generate_latent(self, latent_size, n_samples):\n total_latent = randn(latent_size * n_samples)\n input_z = total_latent.reshape(n_samples, latent_size)\n return input_z\n\n def generate_fake_samples(self, generator, defocused, latent_dim, n_samples\n ):\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx, :, :, :]\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n y_fake = zeros((n_samples, 1))\n return [x_fake, input_cond], y_fake\n\n def generate_gan_input(self, defocused, latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx, :, :, :]\n input_z = self.generate_latent(latent_dim, n_samples)\n y_gan = ones((n_samples, 1))\n return [input_z, input_cond], y_gan\n\n def train(self, g_model, d_model, gan_model, real, input_cond,\n latent_dim, n_epochs, n_batch, save):\n bat_per_epo = int(real.shape[0] / n_batch)\n half_batch = int(n_batch / 2)\n g_loss = np.zeros(n_epochs)\n d_loss_real = np.zeros(n_epochs)\n d_loss_fake = np.zeros(n_epochs)\n for i in range(n_epochs):\n start = timeit.default_timer()\n print('================== Epoch %d ==================\\n' % (i + 1))\n for j in range(bat_per_epo):\n [x_real, input_cond_real], y_real = self.generate_real_samples(\n real, input_cond, half_batch)\n d_loss_real[i], _ = d_model.train_on_batch([x_real,\n input_cond_real], y_real)\n [x_fake, input_cond_fake], y_fake = self.generate_fake_samples(\n g_model, input_cond, latent_dim, half_batch)\n d_loss_fake[i], _ = d_model.train_on_batch([x_fake,\n input_cond_fake], y_fake)\n [z_input, input_cond_gan], y_gan = self.generate_gan_input(\n input_cond, latent_dim, n_batch)\n g_loss[i] = gan_model.train_on_batch([z_input,\n input_cond_gan], y_gan)\n print('Completed: %.f' % np.divide((j + 1) * 100,\n bat_per_epo) + '%')\n print(\n 'Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f'\n % (i + 1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\\n')\n stop = timeit.default_timer()\n print('Time: %.2f min' % ((stop - start) / 60))\n g_model.save('./models/cgan_' + save + '.h5')\n loss = np.array([d_loss_real, d_loss_fake, g_loss])\n np.save('./models/cgan_loss_' + save, loss)\n\n def generate_fakes_givenOne(self, generator, focused, defocused,\n latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], 1)\n x_real = focused[idx, :, :, :]\n input_cond = defocused[idx, :, :, :]\n input_cond = np.repeat(input_cond, n_samples, axis=0)\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n return x_real, x_fake, input_cond[0, :, :, :]\n\n def generate_fakes_givenMany(self, generator, focused, defocused,\n latent_dim, n_examples):\n n_samples = n_examples - 2\n x_real_many = np.zeros((n_examples, focused.shape[1], focused.shape\n [2], focused.shape[3]))\n input_cond_many = np.zeros((n_examples, focused.shape[1], focused.\n shape[2], focused.shape[3]))\n x_fake_many = np.zeros((n_examples, n_samples, focused.shape[1],\n focused.shape[2], focused.shape[3]))\n for i in range(n_examples):\n x_real_many[i, :, :, :], x_fake_many[i, :, :, :, :\n ], input_cond_many[i, :, :, :] = self.generate_fakes_givenOne(\n generator, focused, defocused, latent_dim, n_samples)\n return x_real_many, x_fake_many, input_cond_many\n", "step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 11 18:50:46 2019\n\n@author: kanfar\n\"\"\"\n\nimport numpy as np\nimport timeit\nimport matplotlib.pyplot as plt\nfrom numpy import expand_dims, zeros, ones\nfrom numpy.random import randn, randint\nfrom keras.models import load_model\nfrom keras.optimizers import Adam\nfrom keras.models import Model\nfrom keras.layers import Input, Reshape, Flatten, Concatenate\nfrom keras.layers import Dense, Conv2D, Conv2DTranspose\nfrom keras.layers import Dropout, LeakyReLU\n\nclass cGAN:\n def __init__(self, input_dim1, input_dim2, input_dim3, latent_size):\n self.input_dim1 = input_dim1\n self.input_dim2 = input_dim2\n self.input_dim3 = input_dim3\n self.latent_size = latent_size\n def discriminator(self):\n #conditional input\n input_shape = (self.input_dim1, self.input_dim2, self.input_dim3)\n input_cond = Input(shape = input_shape)\n #generator output\n input_x = Input(shape = input_shape)\n merge = Concatenate()([input_x, input_cond])\n #downsample\n out = Conv2D(32, (3,3), strides=(2,2), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n out = Conv2D(32, (3,3), strides=(2,2), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n out = Flatten()(out)\n out = Dropout(0.5)(out)\n y = Dense(1, activation='sigmoid')(out)\n # define model\n model = Model([input_x, input_cond], y)\n # compile model\n opt = Adam(lr=0.0002) #0.0002 and beta_1 0.5\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])\n return model\n def generator(self):\n #losing one pixel, figure out later\n image_dim = self.input_dim1\n latent_shape = self.latent_size\n cond_shape = (image_dim, image_dim, self.input_dim3)\n \n input_latent = Input(shape = (latent_shape,))\n num_nodes = image_dim * image_dim\n latent = Dense(num_nodes)(input_latent)\n latent = LeakyReLU(alpha=0.2)(latent)\n latent = Reshape((image_dim,image_dim,1))(latent)\n \n input_cond = Input(shape = cond_shape)\n cond = input_cond\n \n merge = Concatenate()([latent,cond])\n \n # upsample to 14x14\n out = Conv2D(32, (4,4), strides=(1,1), padding='same')(merge)\n out = LeakyReLU(alpha=0.2)(out)\n # upsample to 28x28\n out = Conv2D(32, (4,4), strides=(1,1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n \n out = Conv2D(32, (4,4), strides=(1,1), padding='same')(out)\n out = LeakyReLU(alpha=0.2)(out)\n # output\n x = Conv2D(1, (4,4), strides=(1,1), activation='tanh', padding='same')(out) #something key that I don't understand\n # define model\n model = Model([input_latent, input_cond], x)\n return model\n def combined(self, g_model, d_model):\n #model comprised of two models\n # make weights in the discriminator not trainable\n d_model.trainable = False\n # get noise and label inputs from generator model\n input_latent, input_cond = g_model.input #defining the tensors in a short way: this is saying the input to this model is the same size as input to g_model\n # get image output from the generator model\n x = g_model.output\n #can I do x = g_model([input_latent, input_cond]) instead of the above?\n # connect image output and label input from generator as inputs to discriminator\n y = d_model([x, input_cond]) #why this needs to be connected but not the above???? does the first output take model input as default??????? test this\n # define gan model as taking noise and label and outputting a classification\n model = Model([input_latent, input_cond], y)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n model.compile(loss='binary_crossentropy', optimizer=opt)\n return model\n def generate_real_samples(self, focused, defocused, n_samples):\n idx = randint(0, focused.shape[0], n_samples)\n x_real, input_cond = focused[idx,:,:,:], defocused[idx,:,:,:] \n y_real = ones((n_samples,1))\n return [x_real, input_cond], y_real\n \n def generate_latent(self, latent_size, n_samples):\n #generate points in teh latent space\n total_latent = randn(latent_size*n_samples)\n input_z = total_latent.reshape(n_samples, latent_size) \n return input_z\n def generate_fake_samples(self, generator, defocused, latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx,:,:,:]\n input_z = self.generate_latent(latent_dim, n_samples)\n # predict outputs\n x_fake = generator.predict([input_z, input_cond])\n # create class labels\n y_fake = zeros((n_samples, 1))\n return [x_fake, input_cond], y_fake\n def generate_gan_input(self, defocused, latent_dim, n_samples):\n #defocused = data[1,:,:,:]\n #defocused = np.expand_dims(input_cond, axis = -1)\n idx = randint(0, defocused.shape[0], n_samples)\n input_cond = defocused[idx,:,:,:]\n input_z = self.generate_latent(latent_dim, n_samples)\n # create class labels\n y_gan = ones((n_samples, 1))\n return [input_z, input_cond], y_gan\n\n def train(self, g_model, d_model, gan_model, real, input_cond, latent_dim, n_epochs, n_batch, save):\n bat_per_epo = int(real.shape[0] / n_batch) #check\n half_batch = int(n_batch / 2)\n g_loss = np.zeros(n_epochs)\n d_loss_real = np.zeros(n_epochs)\n d_loss_fake = np.zeros(n_epochs)\n # manually enumerate epochs\n for i in range(n_epochs):\n start = timeit.default_timer()\n # enumerate batches over the training set\n print('================== Epoch %d ==================\\n' % (i+1))\n for j in range(bat_per_epo):\n # get randomly selected 'real' samples\n [x_real, input_cond_real], y_real = self.generate_real_samples(real, input_cond, half_batch)\n # update discriminator model weights\n d_loss_real[i], _ = d_model.train_on_batch([x_real, input_cond_real], y_real)\n # generate 'fake' examples\n [x_fake, input_cond_fake], y_fake = self.generate_fake_samples(g_model, input_cond, latent_dim, half_batch)\n # update discriminator model weights\n d_loss_fake[i], _ = d_model.train_on_batch([x_fake, input_cond_fake], y_fake)\n # prepare points in latent space as input for the generator\n [z_input, input_cond_gan], y_gan = self.generate_gan_input(input_cond, latent_dim, n_batch)\n # update the generator via the discriminator's error\n g_loss[i] = gan_model.train_on_batch([z_input, input_cond_gan], y_gan)\n # summarize loss on this batch\n print('Completed: %.f' % np.divide((j+1)*100,bat_per_epo) +'%')\n print('Epoch %d:: d_loss_real = %.3f, d_loss_fake = %.3f g_loss = %.3f' %\n (i+1, d_loss_real[i], d_loss_fake[i], g_loss[i]) + '\\n')\n stop = timeit.default_timer()\n print('Time: %.2f min' % ((stop - start)/60)) \n # save the generator model\n g_model.save('./models/cgan_'+ save + '.h5') #save somewhere\n # save loss history\n loss = np.array([d_loss_real, d_loss_fake, g_loss])\n np.save('./models/cgan_loss_' + save, loss)\n def generate_fakes_givenOne(self, generator, focused, defocused, latent_dim, n_samples):\n idx = randint(0, defocused.shape[0], 1)\n x_real = focused[idx,:,:,:]\n input_cond = defocused[idx,:,:,:] ##### should last be zero or :?\n input_cond = np.repeat(input_cond, n_samples, axis=0)\n input_z = self.generate_latent(latent_dim, n_samples)\n x_fake = generator.predict([input_z, input_cond])\n return x_real, x_fake, input_cond[0,:,:,:]\n def generate_fakes_givenMany(self, generator, focused, defocused, latent_dim, n_examples):\n n_samples = n_examples-2\n x_real_many = np.zeros((n_examples, focused.shape[1], focused.shape[2], focused.shape[3]))\n input_cond_many = np.zeros((n_examples, focused.shape[1], focused.shape[2], focused.shape[3]))\n x_fake_many = np.zeros((n_examples, n_samples, focused.shape[1], focused.shape[2], focused.shape[3]))\n \n for i in range(n_examples):\n x_real_many[i,:,:,:], x_fake_many[i,:,:,:,:], input_cond_many[i,:,:,:] = self.generate_fakes_givenOne(generator, focused, defocused, latent_dim, n_samples)\n return x_real_many, x_fake_many, input_cond_many\n \n ", "step-ids": [ 10, 11, 12, 13, 14 ] }
[ 10, 11, 12, 13, 14 ]
<|reserved_special_token_0|> def version_info(): return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__) <|reserved_special_token_0|> def check_general_functions(): print('dansfunctions/functions_general.py') print('Version: %s (%s)' % (fg.__version__, fg.__date__)) print('Methods:') print(fg.list_methods(fg, False)) def check_plotting_functions(): print('dansfunctions/functions_plotting.py') if fp is None: print('Matplotlib may not be available') return print('Version: %s (%s)' % (fp.__version__, fp.__date__)) print('Methods:') print(fg.list_methods(fp, False)) def check_tkinter_functions(): print('dansfunctions/tkgui/basic_widgets.py') if widgets is None: print('tkinter may not be available') return print('Version: %s (%s)' % (widgets.__version__, widgets.__date__)) print('Methods:') print(fg.list_methods(widgets, False)) <|reserved_special_token_1|> <|reserved_special_token_0|> def version_info(): return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__) def module_info(): import sys out = 'Python version %s' % sys.version out += '\n%s' % version_info() out += """ numpy version: %s""" % fg.np.__version__ try: import matplotlib out += '\nmatplotlib version: %s' % matplotlib.__version__ except ImportError: out += '\nmatplotlib version: None' try: import tkinter out += '\n tkinter version: %s' % tkinter.TkVersion except ImportError: out += '\n tkinter version: None' return out def check_general_functions(): print('dansfunctions/functions_general.py') print('Version: %s (%s)' % (fg.__version__, fg.__date__)) print('Methods:') print(fg.list_methods(fg, False)) def check_plotting_functions(): print('dansfunctions/functions_plotting.py') if fp is None: print('Matplotlib may not be available') return print('Version: %s (%s)' % (fp.__version__, fp.__date__)) print('Methods:') print(fg.list_methods(fp, False)) def check_tkinter_functions(): print('dansfunctions/tkgui/basic_widgets.py') if widgets is None: print('tkinter may not be available') return print('Version: %s (%s)' % (widgets.__version__, widgets.__date__)) print('Methods:') print(fg.list_methods(widgets, False)) <|reserved_special_token_1|> <|reserved_special_token_0|> try: import matplotlib matplotlib.use('TkAgg') from . import functions_plotting as fp except ImportError: fp = None print('Matplotlib may not be available') try: from .tkgui import basic_widgets as widgets except ImportError: widgets = None print('tkinter may not be available') def version_info(): return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__) def module_info(): import sys out = 'Python version %s' % sys.version out += '\n%s' % version_info() out += """ numpy version: %s""" % fg.np.__version__ try: import matplotlib out += '\nmatplotlib version: %s' % matplotlib.__version__ except ImportError: out += '\nmatplotlib version: None' try: import tkinter out += '\n tkinter version: %s' % tkinter.TkVersion except ImportError: out += '\n tkinter version: None' return out def check_general_functions(): print('dansfunctions/functions_general.py') print('Version: %s (%s)' % (fg.__version__, fg.__date__)) print('Methods:') print(fg.list_methods(fg, False)) def check_plotting_functions(): print('dansfunctions/functions_plotting.py') if fp is None: print('Matplotlib may not be available') return print('Version: %s (%s)' % (fp.__version__, fp.__date__)) print('Methods:') print(fg.list_methods(fp, False)) def check_tkinter_functions(): print('dansfunctions/tkgui/basic_widgets.py') if widgets is None: print('tkinter may not be available') return print('Version: %s (%s)' % (widgets.__version__, widgets.__date__)) print('Methods:') print(fg.list_methods(widgets, False)) <|reserved_special_token_1|> <|reserved_special_token_0|> from . import functions_general as fg try: import matplotlib matplotlib.use('TkAgg') from . import functions_plotting as fp except ImportError: fp = None print('Matplotlib may not be available') try: from .tkgui import basic_widgets as widgets except ImportError: widgets = None print('tkinter may not be available') def version_info(): return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__) def module_info(): import sys out = 'Python version %s' % sys.version out += '\n%s' % version_info() out += """ numpy version: %s""" % fg.np.__version__ try: import matplotlib out += '\nmatplotlib version: %s' % matplotlib.__version__ except ImportError: out += '\nmatplotlib version: None' try: import tkinter out += '\n tkinter version: %s' % tkinter.TkVersion except ImportError: out += '\n tkinter version: None' return out def check_general_functions(): print('dansfunctions/functions_general.py') print('Version: %s (%s)' % (fg.__version__, fg.__date__)) print('Methods:') print(fg.list_methods(fg, False)) def check_plotting_functions(): print('dansfunctions/functions_plotting.py') if fp is None: print('Matplotlib may not be available') return print('Version: %s (%s)' % (fp.__version__, fp.__date__)) print('Methods:') print(fg.list_methods(fp, False)) def check_tkinter_functions(): print('dansfunctions/tkgui/basic_widgets.py') if widgets is None: print('tkinter may not be available') return print('Version: %s (%s)' % (widgets.__version__, widgets.__date__)) print('Methods:') print(fg.list_methods(widgets, False)) <|reserved_special_token_1|> """ dansfunctions - various useful functions in python usage: >>import dansfunctions >>dansfunctions.fg # module of general mathematical, vector and string format functions >>dansfunctions.fp # module of matplotlib shortcuts >>dansfunctions.widgets # module of tkinter shortcuts Requirements: numpy Optional requirements: matplotlib, tkinter """ from . import functions_general as fg try: import matplotlib matplotlib.use('TkAgg') from . import functions_plotting as fp except ImportError: fp = None print('Matplotlib may not be available') try: from .tkgui import basic_widgets as widgets except ImportError: widgets = None print('tkinter may not be available') def version_info(): return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__) def module_info(): import sys out = 'Python version %s' % sys.version out += '\n%s' % version_info() # Modules out += '\n numpy version: %s' % fg.np.__version__ try: import matplotlib out += '\nmatplotlib version: %s' % matplotlib.__version__ except ImportError: out += '\nmatplotlib version: None' try: import tkinter out += '\n tkinter version: %s' % tkinter.TkVersion except ImportError: out += '\n tkinter version: None' return out def check_general_functions(): print('dansfunctions/functions_general.py') print('Version: %s (%s)' % (fg.__version__, fg.__date__)) print('Methods:') print(fg.list_methods(fg, False)) def check_plotting_functions(): print('dansfunctions/functions_plotting.py') if fp is None: print('Matplotlib may not be available') return print('Version: %s (%s)' % (fp.__version__, fp.__date__)) print('Methods:') print(fg.list_methods(fp, False)) def check_tkinter_functions(): print('dansfunctions/tkgui/basic_widgets.py') if widgets is None: print('tkinter may not be available') return print('Version: %s (%s)' % (widgets.__version__, widgets.__date__)) print('Methods:') print(fg.list_methods(widgets, False))
flexible
{ "blob_id": "0f266db39988cfce475380036f4f4f5b1a1fee1a", "index": 3647, "step-1": "<mask token>\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\n<mask token>\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n", "step-2": "<mask token>\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\ndef module_info():\n import sys\n out = 'Python version %s' % sys.version\n out += '\\n%s' % version_info()\n out += \"\"\"\n numpy version: %s\"\"\" % fg.np.__version__\n try:\n import matplotlib\n out += '\\nmatplotlib version: %s' % matplotlib.__version__\n except ImportError:\n out += '\\nmatplotlib version: None'\n try:\n import tkinter\n out += '\\n tkinter version: %s' % tkinter.TkVersion\n except ImportError:\n out += '\\n tkinter version: None'\n return out\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n", "step-3": "<mask token>\ntry:\n import matplotlib\n matplotlib.use('TkAgg')\n from . import functions_plotting as fp\nexcept ImportError:\n fp = None\n print('Matplotlib may not be available')\ntry:\n from .tkgui import basic_widgets as widgets\nexcept ImportError:\n widgets = None\n print('tkinter may not be available')\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\ndef module_info():\n import sys\n out = 'Python version %s' % sys.version\n out += '\\n%s' % version_info()\n out += \"\"\"\n numpy version: %s\"\"\" % fg.np.__version__\n try:\n import matplotlib\n out += '\\nmatplotlib version: %s' % matplotlib.__version__\n except ImportError:\n out += '\\nmatplotlib version: None'\n try:\n import tkinter\n out += '\\n tkinter version: %s' % tkinter.TkVersion\n except ImportError:\n out += '\\n tkinter version: None'\n return out\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n", "step-4": "<mask token>\nfrom . import functions_general as fg\ntry:\n import matplotlib\n matplotlib.use('TkAgg')\n from . import functions_plotting as fp\nexcept ImportError:\n fp = None\n print('Matplotlib may not be available')\ntry:\n from .tkgui import basic_widgets as widgets\nexcept ImportError:\n widgets = None\n print('tkinter may not be available')\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\ndef module_info():\n import sys\n out = 'Python version %s' % sys.version\n out += '\\n%s' % version_info()\n out += \"\"\"\n numpy version: %s\"\"\" % fg.np.__version__\n try:\n import matplotlib\n out += '\\nmatplotlib version: %s' % matplotlib.__version__\n except ImportError:\n out += '\\nmatplotlib version: None'\n try:\n import tkinter\n out += '\\n tkinter version: %s' % tkinter.TkVersion\n except ImportError:\n out += '\\n tkinter version: None'\n return out\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n", "step-5": "\"\"\"\ndansfunctions - various useful functions in python\nusage:\n>>import dansfunctions\n>>dansfunctions.fg # module of general mathematical, vector and string format functions\n>>dansfunctions.fp # module of matplotlib shortcuts\n>>dansfunctions.widgets # module of tkinter shortcuts\n\nRequirements: numpy\nOptional requirements: matplotlib, tkinter\n\"\"\"\n\nfrom . import functions_general as fg\n\ntry:\n import matplotlib\n matplotlib.use('TkAgg')\n from . import functions_plotting as fp\nexcept ImportError:\n fp = None\n print('Matplotlib may not be available')\n\ntry:\n from .tkgui import basic_widgets as widgets\nexcept ImportError:\n widgets = None\n print('tkinter may not be available')\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\ndef module_info():\n import sys\n out = 'Python version %s' % sys.version\n out += '\\n%s' % version_info()\n # Modules\n out += '\\n numpy version: %s' % fg.np.__version__\n try:\n import matplotlib\n out += '\\nmatplotlib version: %s' % matplotlib.__version__\n except ImportError:\n out += '\\nmatplotlib version: None'\n try:\n import tkinter\n out += '\\n tkinter version: %s' % tkinter.TkVersion\n except ImportError:\n out += '\\n tkinter version: None'\n return out\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
import os from flask import Flask,render_template,request,redirect,url_for from sqlalchemy import create_engine from sqlalchemy.orm import scoped_session,sessionmaker app = Flask(__name__) engine = create_engine("postgres://lkghylsqhggivp:d827f6dc5637928e95e060761de590b7d9514e9463c5241ed3d652d777a4a3a9@ec2-52-200-16-99.compute-1.amazonaws.com:5432/d6d65s4otfm5cr") db = scoped_session(sessionmaker(bind=engine)) @app.route("/") def index(): return render_template("a.html") @app.route("/insert",methods=['POST']) def insert(): firstname=request.form.get('firstname') lastname=request.form.get('lastname') dob=request.form.get('dob') gender=request.form.get('gender') aadharno=request.form.get('aadharno') address=request.form.get('address') db.execute("insert into aadhar (firstname,lastname,dob,gender,aadharno,address) values (:firstname,:lastname,:dob,:gender,:aadharno,:address)",{"firstname":firstname ,"lastname":lastname,"dob":dob,"gender":gender,"aadharno":aadharno,"address" : address}) db.commit() return redirect(url_for('index'))
normal
{ "blob_id": "af9430caff843242381d7c99d76ff3c964915700", "index": 6753, "step-1": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('a.html')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('a.html')\n\n\[email protected]('/insert', methods=['POST'])\ndef insert():\n firstname = request.form.get('firstname')\n lastname = request.form.get('lastname')\n dob = request.form.get('dob')\n gender = request.form.get('gender')\n aadharno = request.form.get('aadharno')\n address = request.form.get('address')\n db.execute(\n 'insert into aadhar (firstname,lastname,dob,gender,aadharno,address) values (:firstname,:lastname,:dob,:gender,:aadharno,:address)'\n , {'firstname': firstname, 'lastname': lastname, 'dob': dob,\n 'gender': gender, 'aadharno': aadharno, 'address': address})\n db.commit()\n return redirect(url_for('index'))\n", "step-3": "<mask token>\napp = Flask(__name__)\nengine = create_engine(\n 'postgres://lkghylsqhggivp:d827f6dc5637928e95e060761de590b7d9514e9463c5241ed3d652d777a4a3a9@ec2-52-200-16-99.compute-1.amazonaws.com:5432/d6d65s4otfm5cr'\n )\ndb = scoped_session(sessionmaker(bind=engine))\n\n\[email protected]('/')\ndef index():\n return render_template('a.html')\n\n\[email protected]('/insert', methods=['POST'])\ndef insert():\n firstname = request.form.get('firstname')\n lastname = request.form.get('lastname')\n dob = request.form.get('dob')\n gender = request.form.get('gender')\n aadharno = request.form.get('aadharno')\n address = request.form.get('address')\n db.execute(\n 'insert into aadhar (firstname,lastname,dob,gender,aadharno,address) values (:firstname,:lastname,:dob,:gender,:aadharno,:address)'\n , {'firstname': firstname, 'lastname': lastname, 'dob': dob,\n 'gender': gender, 'aadharno': aadharno, 'address': address})\n db.commit()\n return redirect(url_for('index'))\n", "step-4": "import os\nfrom flask import Flask, render_template, request, redirect, url_for\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\napp = Flask(__name__)\nengine = create_engine(\n 'postgres://lkghylsqhggivp:d827f6dc5637928e95e060761de590b7d9514e9463c5241ed3d652d777a4a3a9@ec2-52-200-16-99.compute-1.amazonaws.com:5432/d6d65s4otfm5cr'\n )\ndb = scoped_session(sessionmaker(bind=engine))\n\n\[email protected]('/')\ndef index():\n return render_template('a.html')\n\n\[email protected]('/insert', methods=['POST'])\ndef insert():\n firstname = request.form.get('firstname')\n lastname = request.form.get('lastname')\n dob = request.form.get('dob')\n gender = request.form.get('gender')\n aadharno = request.form.get('aadharno')\n address = request.form.get('address')\n db.execute(\n 'insert into aadhar (firstname,lastname,dob,gender,aadharno,address) values (:firstname,:lastname,:dob,:gender,:aadharno,:address)'\n , {'firstname': firstname, 'lastname': lastname, 'dob': dob,\n 'gender': gender, 'aadharno': aadharno, 'address': address})\n db.commit()\n return redirect(url_for('index'))\n", "step-5": "import os\r\n\r\nfrom flask import Flask,render_template,request,redirect,url_for\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import scoped_session,sessionmaker\r\n\r\napp = Flask(__name__)\r\n\r\nengine = create_engine(\"postgres://lkghylsqhggivp:d827f6dc5637928e95e060761de590b7d9514e9463c5241ed3d652d777a4a3a9@ec2-52-200-16-99.compute-1.amazonaws.com:5432/d6d65s4otfm5cr\")\r\ndb = scoped_session(sessionmaker(bind=engine))\r\[email protected](\"/\")\r\ndef index():\r\n return render_template(\"a.html\")\r\[email protected](\"/insert\",methods=['POST'])\r\ndef insert():\r\n firstname=request.form.get('firstname')\r\n lastname=request.form.get('lastname')\r\n dob=request.form.get('dob')\r\n gender=request.form.get('gender')\r\n aadharno=request.form.get('aadharno')\r\n address=request.form.get('address')\r\n db.execute(\"insert into aadhar (firstname,lastname,dob,gender,aadharno,address) values (:firstname,:lastname,:dob,:gender,:aadharno,:address)\",{\"firstname\":firstname ,\"lastname\":lastname,\"dob\":dob,\"gender\":gender,\"aadharno\":aadharno,\"address\" : address})\r\n db.commit()\r\n return redirect(url_for('index'))\r\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> vals -= vals[:, np.newaxis].mean(-1) vals /= vals[:, np.newaxis].std(-1) <|reserved_special_token_0|> km.fit(vals) <|reserved_special_token_0|> for ii in range(len(zips)): tzip = int(zips.ZIPCODE[ii]) if tzip in dzips: zips['cluster'][ii] = km.labels_[dzips.index(tzip)] <|reserved_special_token_0|> for tcluster in range(km.n_clusters): print('tcluster = ' + str(tcluster)) zips['color'][zips['cluster'] == tcluster] = 'red' zips['color'][zips['cluster'] != tcluster] = 'none' close('all') yrs = range(2004, 2016) fig, ax = plt.subplots(1, 2, figsize=[10, 5]) fig.set_facecolor('white') ax[1].set_xlim([-74.26, -74.26 + 0.6]) ax[1].set_ylim([40.4, 40.4 + 0.6]) ax[1].axis('off') for ii in range(len(zips)): geo = zips['geometry'][ii] tzip = zips.ZIPCODE[ii] if type(geo) == shapely.geometry.polygon.Polygon: ax[1].add_patch(PolygonPatch(geo, fc=zips['color'][ii], linewidth=0.2)) ax[0].plot(yrs, vals[km.labels_ == tcluster].T, color='k', lw=0.1) ax[0].plot(yrs, km.cluster_centers_[tcluster], color='indianred') ax[0].set_title('Cluster {0}'.format(tcluster)) fig.canvas.draw() fig.savefig('../Outputs/cluster_{0}_{1}.png'.format(tcluster, km. n_clusters), clobber=True) <|reserved_special_token_1|> <|reserved_special_token_0|> data = pd.read_csv( '/scratch/share/gdobler/parqa/output/Tables/ParkQualityScores/QualityArea_ZipCode_FiscalYears.csv' ) zips = gp.GeoDataFrame.from_file( '/scratch/share/gdobler/parqa/output/ShapeData/ZIPCODE_Modified_Final.shp') cols = ['F2{0:03}'.format(i) for i in range(4, 16)] vals = data[cols].values vals -= vals[:, np.newaxis].mean(-1) vals /= vals[:, np.newaxis].std(-1) km = KMeans(n_clusters=5) km.fit(vals) zips['cluster'] = np.zeros(len(zips), dtype=int) - 1 dzips = [i for i in data.ZIPCODE] for ii in range(len(zips)): tzip = int(zips.ZIPCODE[ii]) if tzip in dzips: zips['cluster'][ii] = km.labels_[dzips.index(tzip)] zips['color'] = np.zeros(len(zips), dtype=str) for tcluster in range(km.n_clusters): print('tcluster = ' + str(tcluster)) zips['color'][zips['cluster'] == tcluster] = 'red' zips['color'][zips['cluster'] != tcluster] = 'none' close('all') yrs = range(2004, 2016) fig, ax = plt.subplots(1, 2, figsize=[10, 5]) fig.set_facecolor('white') ax[1].set_xlim([-74.26, -74.26 + 0.6]) ax[1].set_ylim([40.4, 40.4 + 0.6]) ax[1].axis('off') for ii in range(len(zips)): geo = zips['geometry'][ii] tzip = zips.ZIPCODE[ii] if type(geo) == shapely.geometry.polygon.Polygon: ax[1].add_patch(PolygonPatch(geo, fc=zips['color'][ii], linewidth=0.2)) ax[0].plot(yrs, vals[km.labels_ == tcluster].T, color='k', lw=0.1) ax[0].plot(yrs, km.cluster_centers_[tcluster], color='indianred') ax[0].set_title('Cluster {0}'.format(tcluster)) fig.canvas.draw() fig.savefig('../Outputs/cluster_{0}_{1}.png'.format(tcluster, km. n_clusters), clobber=True) <|reserved_special_token_1|> import numpy as np import pandas as pd import geopandas as gp from sklearn.cluster import KMeans import shapely from descartes import PolygonPatch data = pd.read_csv( '/scratch/share/gdobler/parqa/output/Tables/ParkQualityScores/QualityArea_ZipCode_FiscalYears.csv' ) zips = gp.GeoDataFrame.from_file( '/scratch/share/gdobler/parqa/output/ShapeData/ZIPCODE_Modified_Final.shp') cols = ['F2{0:03}'.format(i) for i in range(4, 16)] vals = data[cols].values vals -= vals[:, np.newaxis].mean(-1) vals /= vals[:, np.newaxis].std(-1) km = KMeans(n_clusters=5) km.fit(vals) zips['cluster'] = np.zeros(len(zips), dtype=int) - 1 dzips = [i for i in data.ZIPCODE] for ii in range(len(zips)): tzip = int(zips.ZIPCODE[ii]) if tzip in dzips: zips['cluster'][ii] = km.labels_[dzips.index(tzip)] zips['color'] = np.zeros(len(zips), dtype=str) for tcluster in range(km.n_clusters): print('tcluster = ' + str(tcluster)) zips['color'][zips['cluster'] == tcluster] = 'red' zips['color'][zips['cluster'] != tcluster] = 'none' close('all') yrs = range(2004, 2016) fig, ax = plt.subplots(1, 2, figsize=[10, 5]) fig.set_facecolor('white') ax[1].set_xlim([-74.26, -74.26 + 0.6]) ax[1].set_ylim([40.4, 40.4 + 0.6]) ax[1].axis('off') for ii in range(len(zips)): geo = zips['geometry'][ii] tzip = zips.ZIPCODE[ii] if type(geo) == shapely.geometry.polygon.Polygon: ax[1].add_patch(PolygonPatch(geo, fc=zips['color'][ii], linewidth=0.2)) ax[0].plot(yrs, vals[km.labels_ == tcluster].T, color='k', lw=0.1) ax[0].plot(yrs, km.cluster_centers_[tcluster], color='indianred') ax[0].set_title('Cluster {0}'.format(tcluster)) fig.canvas.draw() fig.savefig('../Outputs/cluster_{0}_{1}.png'.format(tcluster, km. n_clusters), clobber=True) <|reserved_special_token_1|> import numpy as np import pandas as pd import geopandas as gp from sklearn.cluster import KMeans import shapely from descartes import PolygonPatch # -- load the data data = pd.read_csv('/scratch/share/gdobler/parqa/output/Tables/' 'ParkQualityScores/QualityArea_ZipCode_FiscalYears.csv') zips = gp.GeoDataFrame.from_file('/scratch/share/gdobler/parqa/output/' 'ShapeData/ZIPCODE_Modified_Final.shp') # -- prepare the data cols = ['F2{0:03}'.format(i) for i in range(4,16)] vals = data[cols].values vals -=vals[:,np.newaxis].mean(-1) vals /=vals[:,np.newaxis].std(-1) # -- cluster km = KMeans(n_clusters=5) km.fit(vals) # -- assign clusters to zips zips['cluster'] = np.zeros(len(zips),dtype=int)-1 dzips = [i for i in data.ZIPCODE] for ii in range(len(zips)): tzip = int(zips.ZIPCODE[ii]) if tzip in dzips: zips['cluster'][ii] = km.labels_[dzips.index(tzip)] # -- assign color zips['color'] = np.zeros(len(zips),dtype=str) for tcluster in range(km.n_clusters): print("tcluster = " + str(tcluster)) zips['color'][zips['cluster']==tcluster] = 'red' zips['color'][zips['cluster']!=tcluster] = 'none' # -- plot close('all') yrs = range(2004,2016) fig, ax = plt.subplots(1,2,figsize=[10,5]) fig.set_facecolor('white') ax[1].set_xlim([-74.26,-74.26+0.6]) ax[1].set_ylim([40.4,40.4+0.6]) ax[1].axis('off') for ii in range(len(zips)): geo = zips['geometry'][ii] tzip = zips.ZIPCODE[ii] if type(geo)==shapely.geometry.polygon.Polygon: ax[1].add_patch(PolygonPatch(geo,fc=zips['color'][ii], linewidth=0.2)) ax[0].plot(yrs,vals[km.labels_==tcluster].T,color='k',lw=0.1) ax[0].plot(yrs,km.cluster_centers_[tcluster],color='indianred') ax[0].set_title('Cluster {0}'.format(tcluster)) fig.canvas.draw() fig.savefig('../Outputs/cluster_{0}_{1}.png'.format(tcluster, km.n_clusters), clobber=True)
flexible
{ "blob_id": "2c181a33c84ce262404c192abdc515924a1916a9", "index": 6165, "step-1": "<mask token>\n", "step-2": "<mask token>\nvals -= vals[:, np.newaxis].mean(-1)\nvals /= vals[:, np.newaxis].std(-1)\n<mask token>\nkm.fit(vals)\n<mask token>\nfor ii in range(len(zips)):\n tzip = int(zips.ZIPCODE[ii])\n if tzip in dzips:\n zips['cluster'][ii] = km.labels_[dzips.index(tzip)]\n<mask token>\nfor tcluster in range(km.n_clusters):\n print('tcluster = ' + str(tcluster))\n zips['color'][zips['cluster'] == tcluster] = 'red'\n zips['color'][zips['cluster'] != tcluster] = 'none'\n close('all')\n yrs = range(2004, 2016)\n fig, ax = plt.subplots(1, 2, figsize=[10, 5])\n fig.set_facecolor('white')\n ax[1].set_xlim([-74.26, -74.26 + 0.6])\n ax[1].set_ylim([40.4, 40.4 + 0.6])\n ax[1].axis('off')\n for ii in range(len(zips)):\n geo = zips['geometry'][ii]\n tzip = zips.ZIPCODE[ii]\n if type(geo) == shapely.geometry.polygon.Polygon:\n ax[1].add_patch(PolygonPatch(geo, fc=zips['color'][ii],\n linewidth=0.2))\n ax[0].plot(yrs, vals[km.labels_ == tcluster].T, color='k', lw=0.1)\n ax[0].plot(yrs, km.cluster_centers_[tcluster], color='indianred')\n ax[0].set_title('Cluster {0}'.format(tcluster))\n fig.canvas.draw()\n fig.savefig('../Outputs/cluster_{0}_{1}.png'.format(tcluster, km.\n n_clusters), clobber=True)\n", "step-3": "<mask token>\ndata = pd.read_csv(\n '/scratch/share/gdobler/parqa/output/Tables/ParkQualityScores/QualityArea_ZipCode_FiscalYears.csv'\n )\nzips = gp.GeoDataFrame.from_file(\n '/scratch/share/gdobler/parqa/output/ShapeData/ZIPCODE_Modified_Final.shp')\ncols = ['F2{0:03}'.format(i) for i in range(4, 16)]\nvals = data[cols].values\nvals -= vals[:, np.newaxis].mean(-1)\nvals /= vals[:, np.newaxis].std(-1)\nkm = KMeans(n_clusters=5)\nkm.fit(vals)\nzips['cluster'] = np.zeros(len(zips), dtype=int) - 1\ndzips = [i for i in data.ZIPCODE]\nfor ii in range(len(zips)):\n tzip = int(zips.ZIPCODE[ii])\n if tzip in dzips:\n zips['cluster'][ii] = km.labels_[dzips.index(tzip)]\nzips['color'] = np.zeros(len(zips), dtype=str)\nfor tcluster in range(km.n_clusters):\n print('tcluster = ' + str(tcluster))\n zips['color'][zips['cluster'] == tcluster] = 'red'\n zips['color'][zips['cluster'] != tcluster] = 'none'\n close('all')\n yrs = range(2004, 2016)\n fig, ax = plt.subplots(1, 2, figsize=[10, 5])\n fig.set_facecolor('white')\n ax[1].set_xlim([-74.26, -74.26 + 0.6])\n ax[1].set_ylim([40.4, 40.4 + 0.6])\n ax[1].axis('off')\n for ii in range(len(zips)):\n geo = zips['geometry'][ii]\n tzip = zips.ZIPCODE[ii]\n if type(geo) == shapely.geometry.polygon.Polygon:\n ax[1].add_patch(PolygonPatch(geo, fc=zips['color'][ii],\n linewidth=0.2))\n ax[0].plot(yrs, vals[km.labels_ == tcluster].T, color='k', lw=0.1)\n ax[0].plot(yrs, km.cluster_centers_[tcluster], color='indianred')\n ax[0].set_title('Cluster {0}'.format(tcluster))\n fig.canvas.draw()\n fig.savefig('../Outputs/cluster_{0}_{1}.png'.format(tcluster, km.\n n_clusters), clobber=True)\n", "step-4": "import numpy as np\nimport pandas as pd\nimport geopandas as gp\nfrom sklearn.cluster import KMeans\nimport shapely\nfrom descartes import PolygonPatch\ndata = pd.read_csv(\n '/scratch/share/gdobler/parqa/output/Tables/ParkQualityScores/QualityArea_ZipCode_FiscalYears.csv'\n )\nzips = gp.GeoDataFrame.from_file(\n '/scratch/share/gdobler/parqa/output/ShapeData/ZIPCODE_Modified_Final.shp')\ncols = ['F2{0:03}'.format(i) for i in range(4, 16)]\nvals = data[cols].values\nvals -= vals[:, np.newaxis].mean(-1)\nvals /= vals[:, np.newaxis].std(-1)\nkm = KMeans(n_clusters=5)\nkm.fit(vals)\nzips['cluster'] = np.zeros(len(zips), dtype=int) - 1\ndzips = [i for i in data.ZIPCODE]\nfor ii in range(len(zips)):\n tzip = int(zips.ZIPCODE[ii])\n if tzip in dzips:\n zips['cluster'][ii] = km.labels_[dzips.index(tzip)]\nzips['color'] = np.zeros(len(zips), dtype=str)\nfor tcluster in range(km.n_clusters):\n print('tcluster = ' + str(tcluster))\n zips['color'][zips['cluster'] == tcluster] = 'red'\n zips['color'][zips['cluster'] != tcluster] = 'none'\n close('all')\n yrs = range(2004, 2016)\n fig, ax = plt.subplots(1, 2, figsize=[10, 5])\n fig.set_facecolor('white')\n ax[1].set_xlim([-74.26, -74.26 + 0.6])\n ax[1].set_ylim([40.4, 40.4 + 0.6])\n ax[1].axis('off')\n for ii in range(len(zips)):\n geo = zips['geometry'][ii]\n tzip = zips.ZIPCODE[ii]\n if type(geo) == shapely.geometry.polygon.Polygon:\n ax[1].add_patch(PolygonPatch(geo, fc=zips['color'][ii],\n linewidth=0.2))\n ax[0].plot(yrs, vals[km.labels_ == tcluster].T, color='k', lw=0.1)\n ax[0].plot(yrs, km.cluster_centers_[tcluster], color='indianred')\n ax[0].set_title('Cluster {0}'.format(tcluster))\n fig.canvas.draw()\n fig.savefig('../Outputs/cluster_{0}_{1}.png'.format(tcluster, km.\n n_clusters), clobber=True)\n", "step-5": "import numpy as np\nimport pandas as pd\nimport geopandas as gp\nfrom sklearn.cluster import KMeans\nimport shapely\nfrom descartes import PolygonPatch\n\n\n# -- load the data\ndata = pd.read_csv('/scratch/share/gdobler/parqa/output/Tables/'\n 'ParkQualityScores/QualityArea_ZipCode_FiscalYears.csv')\n\nzips = gp.GeoDataFrame.from_file('/scratch/share/gdobler/parqa/output/'\n 'ShapeData/ZIPCODE_Modified_Final.shp')\n\n# -- prepare the data\ncols = ['F2{0:03}'.format(i) for i in range(4,16)]\nvals = data[cols].values\nvals -=vals[:,np.newaxis].mean(-1)\nvals /=vals[:,np.newaxis].std(-1)\n\n# -- cluster\nkm = KMeans(n_clusters=5)\nkm.fit(vals)\n\n# -- assign clusters to zips\nzips['cluster'] = np.zeros(len(zips),dtype=int)-1\ndzips = [i for i in data.ZIPCODE]\n\nfor ii in range(len(zips)):\n tzip = int(zips.ZIPCODE[ii])\n if tzip in dzips:\n zips['cluster'][ii] = km.labels_[dzips.index(tzip)]\n\n\n# -- assign color\nzips['color'] = np.zeros(len(zips),dtype=str)\nfor tcluster in range(km.n_clusters):\n print(\"tcluster = \" + str(tcluster))\n zips['color'][zips['cluster']==tcluster] = 'red'\n zips['color'][zips['cluster']!=tcluster] = 'none'\n\n # -- plot\n close('all')\n yrs = range(2004,2016)\n fig, ax = plt.subplots(1,2,figsize=[10,5])\n fig.set_facecolor('white')\n ax[1].set_xlim([-74.26,-74.26+0.6])\n ax[1].set_ylim([40.4,40.4+0.6])\n ax[1].axis('off')\n for ii in range(len(zips)):\n geo = zips['geometry'][ii]\n tzip = zips.ZIPCODE[ii]\n if type(geo)==shapely.geometry.polygon.Polygon:\n ax[1].add_patch(PolygonPatch(geo,fc=zips['color'][ii],\n linewidth=0.2))\n\n ax[0].plot(yrs,vals[km.labels_==tcluster].T,color='k',lw=0.1)\n ax[0].plot(yrs,km.cluster_centers_[tcluster],color='indianred')\n ax[0].set_title('Cluster {0}'.format(tcluster))\n fig.canvas.draw()\n fig.savefig('../Outputs/cluster_{0}_{1}.png'.format(tcluster,\n km.n_clusters),\n clobber=True)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def info(msg): if config['log_level'] not in ('ERROR', 'WARNING', 'WARN'): print(config['prefix'] + 'INFO> ' + msg) log_count['INFO'] += 1 <|reserved_special_token_0|> def warning(msg): if config.get('log_level') != 'ERROR': print(config['prefix'] + 'WARN> ' + msg) log_count['WARN'] += 1 <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def info(msg): if config['log_level'] not in ('ERROR', 'WARNING', 'WARN'): print(config['prefix'] + 'INFO> ' + msg) log_count['INFO'] += 1 def warn(msg): if config.get('log_level') != 'ERROR': print(config['prefix'] + 'WARN> ' + msg) log_count['WARN'] += 1 def warning(msg): if config.get('log_level') != 'ERROR': print(config['prefix'] + 'WARN> ' + msg) log_count['WARN'] += 1 <|reserved_special_token_0|> def request(req): app = req.app domain = getDomainFromRequest(req, validate=False) if domain is None: print('REQ> {}: {}'.format(req.method, req.path)) else: print('REQ> {}: {} [{}]'.format(req.method, req.path, domain)) if req.path in ('/about', '/register', '/info', '/nodeinfo', '/nodestate', '/register'): return node_state = app['node_state'] if 'node_state' in app else None if node_state != 'READY': warning(f'returning 503 - node_state: {node_state}') raise HTTPServiceUnavailable() if req.method in ('GET', 'POST', 'PUT', 'DELETE'): req_count[req.method] += 1 num_tasks = len(asyncio.Task.all_tasks()) active_tasks = len([task for task in asyncio.Task.all_tasks() if not task.done()]) req_count['num_tasks'] = num_tasks if config['log_level'] == 'DEBUG': debug(f'num tasks: {num_tasks} active tasks: {active_tasks}') max_task_count = app['max_task_count'] if app['node_type' ] == 'sn' and max_task_count and active_tasks > max_task_count: warning(f'more than {max_task_count} tasks, returning 503') raise HTTPServiceUnavailable() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> req_count = {'GET': 0, 'POST': 0, 'PUT': 0, 'DELETE': 0, 'num_tasks': 0} log_count = {'DEBUG': 0, 'INFO': 0, 'WARN': 0, 'ERROR': 0} config = {'log_level': 'DEBUG', 'prefix': ''} def debug(msg): if config['log_level'] == 'DEBUG': print(config['prefix'] + 'DEBUG> ' + msg) log_count['DEBUG'] += 1 def info(msg): if config['log_level'] not in ('ERROR', 'WARNING', 'WARN'): print(config['prefix'] + 'INFO> ' + msg) log_count['INFO'] += 1 def warn(msg): if config.get('log_level') != 'ERROR': print(config['prefix'] + 'WARN> ' + msg) log_count['WARN'] += 1 def warning(msg): if config.get('log_level') != 'ERROR': print(config['prefix'] + 'WARN> ' + msg) log_count['WARN'] += 1 def error(msg): print(config['prefix'] + 'ERROR> ' + msg) log_count['ERROR'] += 1 def request(req): app = req.app domain = getDomainFromRequest(req, validate=False) if domain is None: print('REQ> {}: {}'.format(req.method, req.path)) else: print('REQ> {}: {} [{}]'.format(req.method, req.path, domain)) if req.path in ('/about', '/register', '/info', '/nodeinfo', '/nodestate', '/register'): return node_state = app['node_state'] if 'node_state' in app else None if node_state != 'READY': warning(f'returning 503 - node_state: {node_state}') raise HTTPServiceUnavailable() if req.method in ('GET', 'POST', 'PUT', 'DELETE'): req_count[req.method] += 1 num_tasks = len(asyncio.Task.all_tasks()) active_tasks = len([task for task in asyncio.Task.all_tasks() if not task.done()]) req_count['num_tasks'] = num_tasks if config['log_level'] == 'DEBUG': debug(f'num tasks: {num_tasks} active tasks: {active_tasks}') max_task_count = app['max_task_count'] if app['node_type' ] == 'sn' and max_task_count and active_tasks > max_task_count: warning(f'more than {max_task_count} tasks, returning 503') raise HTTPServiceUnavailable() def response(req, resp=None, code=None, message=None): level = 'INFO' if code is None: code = resp.status if message is None: message = resp.reason if code > 399: if code < 500: level = 'WARN' else: level = 'ERROR' log_level = config['log_level'] prefix = config['prefix'] if (log_level in ('DEBUG', 'INFO') or log_level == 'WARN' and level != 'INFO' or log_level == 'ERROR' and level == 'ERROR'): print('{}{} RSP> <{}> ({}): {}'.format(prefix, level, code, message, req.path)) <|reserved_special_token_1|> import asyncio from aiohttp.web_exceptions import HTTPServiceUnavailable from .util.domainUtil import getDomainFromRequest req_count = {'GET': 0, 'POST': 0, 'PUT': 0, 'DELETE': 0, 'num_tasks': 0} log_count = {'DEBUG': 0, 'INFO': 0, 'WARN': 0, 'ERROR': 0} config = {'log_level': 'DEBUG', 'prefix': ''} def debug(msg): if config['log_level'] == 'DEBUG': print(config['prefix'] + 'DEBUG> ' + msg) log_count['DEBUG'] += 1 def info(msg): if config['log_level'] not in ('ERROR', 'WARNING', 'WARN'): print(config['prefix'] + 'INFO> ' + msg) log_count['INFO'] += 1 def warn(msg): if config.get('log_level') != 'ERROR': print(config['prefix'] + 'WARN> ' + msg) log_count['WARN'] += 1 def warning(msg): if config.get('log_level') != 'ERROR': print(config['prefix'] + 'WARN> ' + msg) log_count['WARN'] += 1 def error(msg): print(config['prefix'] + 'ERROR> ' + msg) log_count['ERROR'] += 1 def request(req): app = req.app domain = getDomainFromRequest(req, validate=False) if domain is None: print('REQ> {}: {}'.format(req.method, req.path)) else: print('REQ> {}: {} [{}]'.format(req.method, req.path, domain)) if req.path in ('/about', '/register', '/info', '/nodeinfo', '/nodestate', '/register'): return node_state = app['node_state'] if 'node_state' in app else None if node_state != 'READY': warning(f'returning 503 - node_state: {node_state}') raise HTTPServiceUnavailable() if req.method in ('GET', 'POST', 'PUT', 'DELETE'): req_count[req.method] += 1 num_tasks = len(asyncio.Task.all_tasks()) active_tasks = len([task for task in asyncio.Task.all_tasks() if not task.done()]) req_count['num_tasks'] = num_tasks if config['log_level'] == 'DEBUG': debug(f'num tasks: {num_tasks} active tasks: {active_tasks}') max_task_count = app['max_task_count'] if app['node_type' ] == 'sn' and max_task_count and active_tasks > max_task_count: warning(f'more than {max_task_count} tasks, returning 503') raise HTTPServiceUnavailable() def response(req, resp=None, code=None, message=None): level = 'INFO' if code is None: code = resp.status if message is None: message = resp.reason if code > 399: if code < 500: level = 'WARN' else: level = 'ERROR' log_level = config['log_level'] prefix = config['prefix'] if (log_level in ('DEBUG', 'INFO') or log_level == 'WARN' and level != 'INFO' or log_level == 'ERROR' and level == 'ERROR'): print('{}{} RSP> <{}> ({}): {}'.format(prefix, level, code, message, req.path)) <|reserved_special_token_1|> ############################################################################## # Copyright by The HDF Group. # # All rights reserved. # # # # This file is part of HSDS (HDF5 Scalable Data Service), Libraries and # # Utilities. The full HSDS copyright notice, including # # terms governing use, modification, and redistribution, is contained in # # the file COPYING, which can be found at the root of the source code # # distribution tree. If you do not have access to this file, you may # # request a copy from [email protected]. # ############################################################################## # # Simple looger for hsds # import asyncio from aiohttp.web_exceptions import HTTPServiceUnavailable from .util.domainUtil import getDomainFromRequest req_count = {"GET": 0, "POST": 0, "PUT": 0, "DELETE": 0, "num_tasks": 0} log_count = {"DEBUG": 0, "INFO": 0, "WARN": 0, "ERROR": 0} # the following defaults will be adjusted by the app config = {"log_level": "DEBUG", "prefix": ""} def debug(msg): if config["log_level"] == "DEBUG": print(config["prefix"] + "DEBUG> " + msg) log_count["DEBUG"] += 1 def info(msg): if config["log_level"] not in ("ERROR", "WARNING", "WARN"): print(config["prefix"] + "INFO> " + msg) log_count["INFO"] += 1 def warn(msg): if config.get("log_level") != "ERROR": print(config["prefix"] + "WARN> " + msg) log_count["WARN"] += 1 def warning(msg): if config.get("log_level") != "ERROR": print(config["prefix"] + "WARN> " + msg) log_count["WARN"] += 1 def error(msg): print(config["prefix"] + "ERROR> " + msg) log_count["ERROR"] += 1 def request(req): app = req.app domain = getDomainFromRequest(req, validate=False) if domain is None: print("REQ> {}: {}".format(req.method, req.path)) else: print("REQ> {}: {} [{}]".format(req.method, req.path, domain)) if req.path in ("/about", "/register", "/info", "/nodeinfo", "/nodestate", "/register"): # always service these state requests regardles of node state and task load return node_state = app["node_state"] if "node_state" in app else None if node_state != "READY": warning(f"returning 503 - node_state: {node_state}") raise HTTPServiceUnavailable() if req.method in ("GET", "POST", "PUT", "DELETE"): req_count[req.method] += 1 num_tasks = len(asyncio.Task.all_tasks()) active_tasks = len([task for task in asyncio.Task.all_tasks() if not task.done()]) req_count["num_tasks"] = num_tasks if config["log_level"] == "DEBUG": debug(f"num tasks: {num_tasks} active tasks: {active_tasks}") max_task_count = app["max_task_count"] if app["node_type"] == "sn" and max_task_count and active_tasks > max_task_count: warning(f"more than {max_task_count} tasks, returning 503") raise HTTPServiceUnavailable() def response(req, resp=None, code=None, message=None): level = "INFO" if code is None: # rsp needs to be set otherwise code = resp.status if message is None: message=resp.reason if code > 399: if code < 500: level = "WARN" else: level = "ERROR" log_level = config["log_level"] prefix = config["prefix"] if log_level in ("DEBUG", "INFO") or (log_level == "WARN" and level != "INFO") or (log_level == "ERROR" and level == "ERROR"): print("{}{} RSP> <{}> ({}): {}".format(prefix, level, code, message, req.path))
flexible
{ "blob_id": "e15ea7d167aad470d0a2d95a8a328b35181e4dc3", "index": 7832, "step-1": "<mask token>\n\n\ndef info(msg):\n if config['log_level'] not in ('ERROR', 'WARNING', 'WARN'):\n print(config['prefix'] + 'INFO> ' + msg)\n log_count['INFO'] += 1\n\n\n<mask token>\n\n\ndef warning(msg):\n if config.get('log_level') != 'ERROR':\n print(config['prefix'] + 'WARN> ' + msg)\n log_count['WARN'] += 1\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef info(msg):\n if config['log_level'] not in ('ERROR', 'WARNING', 'WARN'):\n print(config['prefix'] + 'INFO> ' + msg)\n log_count['INFO'] += 1\n\n\ndef warn(msg):\n if config.get('log_level') != 'ERROR':\n print(config['prefix'] + 'WARN> ' + msg)\n log_count['WARN'] += 1\n\n\ndef warning(msg):\n if config.get('log_level') != 'ERROR':\n print(config['prefix'] + 'WARN> ' + msg)\n log_count['WARN'] += 1\n\n\n<mask token>\n\n\ndef request(req):\n app = req.app\n domain = getDomainFromRequest(req, validate=False)\n if domain is None:\n print('REQ> {}: {}'.format(req.method, req.path))\n else:\n print('REQ> {}: {} [{}]'.format(req.method, req.path, domain))\n if req.path in ('/about', '/register', '/info', '/nodeinfo',\n '/nodestate', '/register'):\n return\n node_state = app['node_state'] if 'node_state' in app else None\n if node_state != 'READY':\n warning(f'returning 503 - node_state: {node_state}')\n raise HTTPServiceUnavailable()\n if req.method in ('GET', 'POST', 'PUT', 'DELETE'):\n req_count[req.method] += 1\n num_tasks = len(asyncio.Task.all_tasks())\n active_tasks = len([task for task in asyncio.Task.all_tasks() if not\n task.done()])\n req_count['num_tasks'] = num_tasks\n if config['log_level'] == 'DEBUG':\n debug(f'num tasks: {num_tasks} active tasks: {active_tasks}')\n max_task_count = app['max_task_count']\n if app['node_type'\n ] == 'sn' and max_task_count and active_tasks > max_task_count:\n warning(f'more than {max_task_count} tasks, returning 503')\n raise HTTPServiceUnavailable()\n\n\n<mask token>\n", "step-3": "<mask token>\nreq_count = {'GET': 0, 'POST': 0, 'PUT': 0, 'DELETE': 0, 'num_tasks': 0}\nlog_count = {'DEBUG': 0, 'INFO': 0, 'WARN': 0, 'ERROR': 0}\nconfig = {'log_level': 'DEBUG', 'prefix': ''}\n\n\ndef debug(msg):\n if config['log_level'] == 'DEBUG':\n print(config['prefix'] + 'DEBUG> ' + msg)\n log_count['DEBUG'] += 1\n\n\ndef info(msg):\n if config['log_level'] not in ('ERROR', 'WARNING', 'WARN'):\n print(config['prefix'] + 'INFO> ' + msg)\n log_count['INFO'] += 1\n\n\ndef warn(msg):\n if config.get('log_level') != 'ERROR':\n print(config['prefix'] + 'WARN> ' + msg)\n log_count['WARN'] += 1\n\n\ndef warning(msg):\n if config.get('log_level') != 'ERROR':\n print(config['prefix'] + 'WARN> ' + msg)\n log_count['WARN'] += 1\n\n\ndef error(msg):\n print(config['prefix'] + 'ERROR> ' + msg)\n log_count['ERROR'] += 1\n\n\ndef request(req):\n app = req.app\n domain = getDomainFromRequest(req, validate=False)\n if domain is None:\n print('REQ> {}: {}'.format(req.method, req.path))\n else:\n print('REQ> {}: {} [{}]'.format(req.method, req.path, domain))\n if req.path in ('/about', '/register', '/info', '/nodeinfo',\n '/nodestate', '/register'):\n return\n node_state = app['node_state'] if 'node_state' in app else None\n if node_state != 'READY':\n warning(f'returning 503 - node_state: {node_state}')\n raise HTTPServiceUnavailable()\n if req.method in ('GET', 'POST', 'PUT', 'DELETE'):\n req_count[req.method] += 1\n num_tasks = len(asyncio.Task.all_tasks())\n active_tasks = len([task for task in asyncio.Task.all_tasks() if not\n task.done()])\n req_count['num_tasks'] = num_tasks\n if config['log_level'] == 'DEBUG':\n debug(f'num tasks: {num_tasks} active tasks: {active_tasks}')\n max_task_count = app['max_task_count']\n if app['node_type'\n ] == 'sn' and max_task_count and active_tasks > max_task_count:\n warning(f'more than {max_task_count} tasks, returning 503')\n raise HTTPServiceUnavailable()\n\n\ndef response(req, resp=None, code=None, message=None):\n level = 'INFO'\n if code is None:\n code = resp.status\n if message is None:\n message = resp.reason\n if code > 399:\n if code < 500:\n level = 'WARN'\n else:\n level = 'ERROR'\n log_level = config['log_level']\n prefix = config['prefix']\n if (log_level in ('DEBUG', 'INFO') or log_level == 'WARN' and level !=\n 'INFO' or log_level == 'ERROR' and level == 'ERROR'):\n print('{}{} RSP> <{}> ({}): {}'.format(prefix, level, code, message,\n req.path))\n", "step-4": "import asyncio\nfrom aiohttp.web_exceptions import HTTPServiceUnavailable\nfrom .util.domainUtil import getDomainFromRequest\nreq_count = {'GET': 0, 'POST': 0, 'PUT': 0, 'DELETE': 0, 'num_tasks': 0}\nlog_count = {'DEBUG': 0, 'INFO': 0, 'WARN': 0, 'ERROR': 0}\nconfig = {'log_level': 'DEBUG', 'prefix': ''}\n\n\ndef debug(msg):\n if config['log_level'] == 'DEBUG':\n print(config['prefix'] + 'DEBUG> ' + msg)\n log_count['DEBUG'] += 1\n\n\ndef info(msg):\n if config['log_level'] not in ('ERROR', 'WARNING', 'WARN'):\n print(config['prefix'] + 'INFO> ' + msg)\n log_count['INFO'] += 1\n\n\ndef warn(msg):\n if config.get('log_level') != 'ERROR':\n print(config['prefix'] + 'WARN> ' + msg)\n log_count['WARN'] += 1\n\n\ndef warning(msg):\n if config.get('log_level') != 'ERROR':\n print(config['prefix'] + 'WARN> ' + msg)\n log_count['WARN'] += 1\n\n\ndef error(msg):\n print(config['prefix'] + 'ERROR> ' + msg)\n log_count['ERROR'] += 1\n\n\ndef request(req):\n app = req.app\n domain = getDomainFromRequest(req, validate=False)\n if domain is None:\n print('REQ> {}: {}'.format(req.method, req.path))\n else:\n print('REQ> {}: {} [{}]'.format(req.method, req.path, domain))\n if req.path in ('/about', '/register', '/info', '/nodeinfo',\n '/nodestate', '/register'):\n return\n node_state = app['node_state'] if 'node_state' in app else None\n if node_state != 'READY':\n warning(f'returning 503 - node_state: {node_state}')\n raise HTTPServiceUnavailable()\n if req.method in ('GET', 'POST', 'PUT', 'DELETE'):\n req_count[req.method] += 1\n num_tasks = len(asyncio.Task.all_tasks())\n active_tasks = len([task for task in asyncio.Task.all_tasks() if not\n task.done()])\n req_count['num_tasks'] = num_tasks\n if config['log_level'] == 'DEBUG':\n debug(f'num tasks: {num_tasks} active tasks: {active_tasks}')\n max_task_count = app['max_task_count']\n if app['node_type'\n ] == 'sn' and max_task_count and active_tasks > max_task_count:\n warning(f'more than {max_task_count} tasks, returning 503')\n raise HTTPServiceUnavailable()\n\n\ndef response(req, resp=None, code=None, message=None):\n level = 'INFO'\n if code is None:\n code = resp.status\n if message is None:\n message = resp.reason\n if code > 399:\n if code < 500:\n level = 'WARN'\n else:\n level = 'ERROR'\n log_level = config['log_level']\n prefix = config['prefix']\n if (log_level in ('DEBUG', 'INFO') or log_level == 'WARN' and level !=\n 'INFO' or log_level == 'ERROR' and level == 'ERROR'):\n print('{}{} RSP> <{}> ({}): {}'.format(prefix, level, code, message,\n req.path))\n", "step-5": "##############################################################################\n# Copyright by The HDF Group. #\n# All rights reserved. #\n# #\n# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #\n# Utilities. The full HSDS copyright notice, including #\n# terms governing use, modification, and redistribution, is contained in #\n# the file COPYING, which can be found at the root of the source code #\n# distribution tree. If you do not have access to this file, you may #\n# request a copy from [email protected]. #\n##############################################################################\n#\n# Simple looger for hsds\n#\nimport asyncio\nfrom aiohttp.web_exceptions import HTTPServiceUnavailable\nfrom .util.domainUtil import getDomainFromRequest\n\nreq_count = {\"GET\": 0, \"POST\": 0, \"PUT\": 0, \"DELETE\": 0, \"num_tasks\": 0}\nlog_count = {\"DEBUG\": 0, \"INFO\": 0, \"WARN\": 0, \"ERROR\": 0}\n# the following defaults will be adjusted by the app\nconfig = {\"log_level\": \"DEBUG\", \"prefix\": \"\"}\n\n\ndef debug(msg):\n\tif config[\"log_level\"] == \"DEBUG\":\n\t\tprint(config[\"prefix\"] + \"DEBUG> \" + msg)\n\t\tlog_count[\"DEBUG\"] += 1\n\ndef info(msg):\n\tif config[\"log_level\"] not in (\"ERROR\", \"WARNING\", \"WARN\"):\n\t\tprint(config[\"prefix\"] + \"INFO> \" + msg)\n\t\tlog_count[\"INFO\"] += 1\n\ndef warn(msg):\n\tif config.get(\"log_level\") != \"ERROR\":\n\t\tprint(config[\"prefix\"] + \"WARN> \" + msg)\n\t\tlog_count[\"WARN\"] += 1\n\ndef warning(msg):\n\tif config.get(\"log_level\") != \"ERROR\":\n\t\tprint(config[\"prefix\"] + \"WARN> \" + msg)\n\t\tlog_count[\"WARN\"] += 1\n\ndef error(msg):\n\tprint(config[\"prefix\"] + \"ERROR> \" + msg)\n\tlog_count[\"ERROR\"] += 1\n\ndef request(req):\n\tapp = req.app\n\tdomain = getDomainFromRequest(req, validate=False)\n\tif domain is None:\n\t\tprint(\"REQ> {}: {}\".format(req.method, req.path))\n\telse:\n\t\tprint(\"REQ> {}: {} [{}]\".format(req.method, req.path, domain))\n\tif req.path in (\"/about\", \"/register\", \"/info\", \"/nodeinfo\", \"/nodestate\", \"/register\"):\n\t\t# always service these state requests regardles of node state and task load\n\t\treturn\n\tnode_state = app[\"node_state\"] if \"node_state\" in app else None\n\tif node_state != \"READY\":\n\t\twarning(f\"returning 503 - node_state: {node_state}\")\n\t\traise HTTPServiceUnavailable()\n\tif req.method in (\"GET\", \"POST\", \"PUT\", \"DELETE\"):\n\t\treq_count[req.method] += 1\n\tnum_tasks = len(asyncio.Task.all_tasks())\n\tactive_tasks = len([task for task in asyncio.Task.all_tasks() if not task.done()])\n\treq_count[\"num_tasks\"] = num_tasks\n\tif config[\"log_level\"] == \"DEBUG\":\n\t\tdebug(f\"num tasks: {num_tasks} active tasks: {active_tasks}\")\n\n\tmax_task_count = app[\"max_task_count\"]\n\tif app[\"node_type\"] == \"sn\" and max_task_count and active_tasks > max_task_count:\n\t\twarning(f\"more than {max_task_count} tasks, returning 503\")\n\t\traise HTTPServiceUnavailable()\n\n\ndef response(req, resp=None, code=None, message=None):\n\tlevel = \"INFO\"\n\tif code is None:\n\t\t# rsp needs to be set otherwise\n\t\tcode = resp.status\n\tif message is None:\n\t\tmessage=resp.reason\n\tif code > 399:\n\t\tif code < 500:\n\t\t\tlevel = \"WARN\"\n\t\telse:\n\t\t\tlevel = \"ERROR\"\n\n\tlog_level = config[\"log_level\"]\n\tprefix = config[\"prefix\"]\n\tif log_level in (\"DEBUG\", \"INFO\") or (log_level == \"WARN\" and level != \"INFO\") or (log_level == \"ERROR\" and level == \"ERROR\"):\n\t\tprint(\"{}{} RSP> <{}> ({}): {}\".format(prefix, level, code, message, req.path))\n", "step-ids": [ 2, 4, 8, 9, 10 ] }
[ 2, 4, 8, 9, 10 ]
<|reserved_special_token_0|> def prepare_output_directory(config: ConfigSchema) ->None: formatted = datetime.now().strftime(config.output_path_format) output_path = Path(formatted) output_path.mkdir(parents=True, exist_ok=False) config.output_path = output_path.as_posix() <|reserved_special_token_1|> <|reserved_special_token_0|> def log_basic_info(logger: Logger, config: ConfigSchema): logger.info('Experiment: {}'.format(config.experiment_name)) logger.info('- PyTorch version: {}'.format(torch.__version__)) logger.info('- Ignite version: {}'.format(ignite.__version__)) logger.info('\n') logger.info('Configuration:') for line in OmegaConf.to_yaml(config).split('\n'): logger.info('\t' + line) logger.info('\n') if idist.get_world_size() > 1: logger.info('\nDistributed setting:') logger.info('\tbackend: {}'.format(idist.backend())) logger.info('\tworld size: {}'.format(idist.get_world_size())) logger.info('\n') def prepare_output_directory(config: ConfigSchema) ->None: formatted = datetime.now().strftime(config.output_path_format) output_path = Path(formatted) output_path.mkdir(parents=True, exist_ok=False) config.output_path = output_path.as_posix() <|reserved_special_token_1|> <|reserved_special_token_0|> def log_metrics(logger: Logger, epoch: int, elapsed: float, tag: str, metrics: Dict[str, float]): logger.info('Epoch {} - elapsed: {:.5f} - {} metrics: {}'.format(epoch, elapsed, tag, ', '.join(['{}: {}'.format(k, v) for k, v in metrics. items()]))) def log_basic_info(logger: Logger, config: ConfigSchema): logger.info('Experiment: {}'.format(config.experiment_name)) logger.info('- PyTorch version: {}'.format(torch.__version__)) logger.info('- Ignite version: {}'.format(ignite.__version__)) logger.info('\n') logger.info('Configuration:') for line in OmegaConf.to_yaml(config).split('\n'): logger.info('\t' + line) logger.info('\n') if idist.get_world_size() > 1: logger.info('\nDistributed setting:') logger.info('\tbackend: {}'.format(idist.backend())) logger.info('\tworld size: {}'.format(idist.get_world_size())) logger.info('\n') def prepare_output_directory(config: ConfigSchema) ->None: formatted = datetime.now().strftime(config.output_path_format) output_path = Path(formatted) output_path.mkdir(parents=True, exist_ok=False) config.output_path = output_path.as_posix() <|reserved_special_token_1|> from datetime import datetime from logging import Logger from pathlib import Path from typing import Dict import ignite import ignite.distributed as idist import torch from omegaconf import OmegaConf from config_schema import ConfigSchema def log_metrics(logger: Logger, epoch: int, elapsed: float, tag: str, metrics: Dict[str, float]): logger.info('Epoch {} - elapsed: {:.5f} - {} metrics: {}'.format(epoch, elapsed, tag, ', '.join(['{}: {}'.format(k, v) for k, v in metrics. items()]))) def log_basic_info(logger: Logger, config: ConfigSchema): logger.info('Experiment: {}'.format(config.experiment_name)) logger.info('- PyTorch version: {}'.format(torch.__version__)) logger.info('- Ignite version: {}'.format(ignite.__version__)) logger.info('\n') logger.info('Configuration:') for line in OmegaConf.to_yaml(config).split('\n'): logger.info('\t' + line) logger.info('\n') if idist.get_world_size() > 1: logger.info('\nDistributed setting:') logger.info('\tbackend: {}'.format(idist.backend())) logger.info('\tworld size: {}'.format(idist.get_world_size())) logger.info('\n') def prepare_output_directory(config: ConfigSchema) ->None: formatted = datetime.now().strftime(config.output_path_format) output_path = Path(formatted) output_path.mkdir(parents=True, exist_ok=False) config.output_path = output_path.as_posix() <|reserved_special_token_1|> from datetime import datetime from logging import Logger from pathlib import Path from typing import Dict import ignite import ignite.distributed as idist import torch from omegaconf import OmegaConf from config_schema import ConfigSchema def log_metrics( logger: Logger, epoch: int, elapsed: float, tag: str, metrics: Dict[str, float] ): logger.info( "Epoch {} - elapsed: {:.5f} - {} metrics: {}".format( epoch, elapsed, tag, ", ".join(["{}: {}".format(k, v) for k, v in metrics.items()]), ) ) def log_basic_info(logger: Logger, config: ConfigSchema): logger.info("Experiment: {}".format(config.experiment_name)) logger.info("- PyTorch version: {}".format(torch.__version__)) logger.info("- Ignite version: {}".format(ignite.__version__)) logger.info("\n") logger.info("Configuration:") for line in OmegaConf.to_yaml(config).split("\n"): logger.info("\t" + line) logger.info("\n") if idist.get_world_size() > 1: logger.info("\nDistributed setting:") logger.info("\tbackend: {}".format(idist.backend())) logger.info("\tworld size: {}".format(idist.get_world_size())) logger.info("\n") def prepare_output_directory(config: ConfigSchema) -> None: formatted = datetime.now().strftime(config.output_path_format) output_path = Path(formatted) # force always to use a new directory to avoid overwriting existing ones output_path.mkdir(parents=True, exist_ok=False) config.output_path = output_path.as_posix()
flexible
{ "blob_id": "d8fb5aeb5453b986cc698165749992e4a7677257", "index": 1506, "step-1": "<mask token>\n\n\ndef prepare_output_directory(config: ConfigSchema) ->None:\n formatted = datetime.now().strftime(config.output_path_format)\n output_path = Path(formatted)\n output_path.mkdir(parents=True, exist_ok=False)\n config.output_path = output_path.as_posix()\n", "step-2": "<mask token>\n\n\ndef log_basic_info(logger: Logger, config: ConfigSchema):\n logger.info('Experiment: {}'.format(config.experiment_name))\n logger.info('- PyTorch version: {}'.format(torch.__version__))\n logger.info('- Ignite version: {}'.format(ignite.__version__))\n logger.info('\\n')\n logger.info('Configuration:')\n for line in OmegaConf.to_yaml(config).split('\\n'):\n logger.info('\\t' + line)\n logger.info('\\n')\n if idist.get_world_size() > 1:\n logger.info('\\nDistributed setting:')\n logger.info('\\tbackend: {}'.format(idist.backend()))\n logger.info('\\tworld size: {}'.format(idist.get_world_size()))\n logger.info('\\n')\n\n\ndef prepare_output_directory(config: ConfigSchema) ->None:\n formatted = datetime.now().strftime(config.output_path_format)\n output_path = Path(formatted)\n output_path.mkdir(parents=True, exist_ok=False)\n config.output_path = output_path.as_posix()\n", "step-3": "<mask token>\n\n\ndef log_metrics(logger: Logger, epoch: int, elapsed: float, tag: str,\n metrics: Dict[str, float]):\n logger.info('Epoch {} - elapsed: {:.5f} - {} metrics: {}'.format(epoch,\n elapsed, tag, ', '.join(['{}: {}'.format(k, v) for k, v in metrics.\n items()])))\n\n\ndef log_basic_info(logger: Logger, config: ConfigSchema):\n logger.info('Experiment: {}'.format(config.experiment_name))\n logger.info('- PyTorch version: {}'.format(torch.__version__))\n logger.info('- Ignite version: {}'.format(ignite.__version__))\n logger.info('\\n')\n logger.info('Configuration:')\n for line in OmegaConf.to_yaml(config).split('\\n'):\n logger.info('\\t' + line)\n logger.info('\\n')\n if idist.get_world_size() > 1:\n logger.info('\\nDistributed setting:')\n logger.info('\\tbackend: {}'.format(idist.backend()))\n logger.info('\\tworld size: {}'.format(idist.get_world_size()))\n logger.info('\\n')\n\n\ndef prepare_output_directory(config: ConfigSchema) ->None:\n formatted = datetime.now().strftime(config.output_path_format)\n output_path = Path(formatted)\n output_path.mkdir(parents=True, exist_ok=False)\n config.output_path = output_path.as_posix()\n", "step-4": "from datetime import datetime\nfrom logging import Logger\nfrom pathlib import Path\nfrom typing import Dict\nimport ignite\nimport ignite.distributed as idist\nimport torch\nfrom omegaconf import OmegaConf\nfrom config_schema import ConfigSchema\n\n\ndef log_metrics(logger: Logger, epoch: int, elapsed: float, tag: str,\n metrics: Dict[str, float]):\n logger.info('Epoch {} - elapsed: {:.5f} - {} metrics: {}'.format(epoch,\n elapsed, tag, ', '.join(['{}: {}'.format(k, v) for k, v in metrics.\n items()])))\n\n\ndef log_basic_info(logger: Logger, config: ConfigSchema):\n logger.info('Experiment: {}'.format(config.experiment_name))\n logger.info('- PyTorch version: {}'.format(torch.__version__))\n logger.info('- Ignite version: {}'.format(ignite.__version__))\n logger.info('\\n')\n logger.info('Configuration:')\n for line in OmegaConf.to_yaml(config).split('\\n'):\n logger.info('\\t' + line)\n logger.info('\\n')\n if idist.get_world_size() > 1:\n logger.info('\\nDistributed setting:')\n logger.info('\\tbackend: {}'.format(idist.backend()))\n logger.info('\\tworld size: {}'.format(idist.get_world_size()))\n logger.info('\\n')\n\n\ndef prepare_output_directory(config: ConfigSchema) ->None:\n formatted = datetime.now().strftime(config.output_path_format)\n output_path = Path(formatted)\n output_path.mkdir(parents=True, exist_ok=False)\n config.output_path = output_path.as_posix()\n", "step-5": "from datetime import datetime\nfrom logging import Logger\nfrom pathlib import Path\nfrom typing import Dict\n\nimport ignite\nimport ignite.distributed as idist\nimport torch\nfrom omegaconf import OmegaConf\n\nfrom config_schema import ConfigSchema\n\n\ndef log_metrics(\n logger: Logger, epoch: int, elapsed: float, tag: str, metrics: Dict[str, float]\n):\n logger.info(\n \"Epoch {} - elapsed: {:.5f} - {} metrics: {}\".format(\n epoch,\n elapsed,\n tag,\n \", \".join([\"{}: {}\".format(k, v) for k, v in metrics.items()]),\n )\n )\n\n\ndef log_basic_info(logger: Logger, config: ConfigSchema):\n logger.info(\"Experiment: {}\".format(config.experiment_name))\n logger.info(\"- PyTorch version: {}\".format(torch.__version__))\n logger.info(\"- Ignite version: {}\".format(ignite.__version__))\n\n logger.info(\"\\n\")\n logger.info(\"Configuration:\")\n for line in OmegaConf.to_yaml(config).split(\"\\n\"):\n logger.info(\"\\t\" + line)\n logger.info(\"\\n\")\n\n if idist.get_world_size() > 1:\n logger.info(\"\\nDistributed setting:\")\n logger.info(\"\\tbackend: {}\".format(idist.backend()))\n logger.info(\"\\tworld size: {}\".format(idist.get_world_size()))\n logger.info(\"\\n\")\n\n\ndef prepare_output_directory(config: ConfigSchema) -> None:\n formatted = datetime.now().strftime(config.output_path_format)\n\n output_path = Path(formatted)\n # force always to use a new directory to avoid overwriting existing ones\n output_path.mkdir(parents=True, exist_ok=False)\n config.output_path = output_path.as_posix()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> def load_image(filename): return mpimg.imread(filename) def calibrate_camera(rows=6, cols=9): mtx = None dist = None save_file = 'calibration.npz' try: data = np.load(save_file) mtx = data['mtx'] dist = data['dist'] print('using saved calibration') except FileNotFoundError: print('begin calibration') filenames = glob('camera_cal/*.jpg') objpoints = [] imgpoints = [] objp = np.zeros((rows * cols, 3), np.float32) objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2) for f in filenames: img = load_image(f) gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None) if ret: imgpoints.append(corners) objpoints.append(objp) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) if ret: for f in filenames: img = load_image(f) undist = cv2.undistort(img, mtx, dist, None, mtx) save_output_image(undist, 'undistorted-' + f.split('/')[-1]) print('end calibration') np.savez(save_file, mtx=mtx, dist=dist) return mtx, dist def save_output_image(img, filename, cmap=None): mpimg.imsave(output_images_dir + filename, img, cmap=cmap) def undistort(img): return cv2.undistort(img, mtx, dist, None, mtx) def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)): gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) if orient == 'x': abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize= sobel_kernel)) if orient == 'y': abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize= sobel_kernel)) scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel)) grad_binary = np.zeros_like(scaled_sobel) grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1 return grad_binary def color_threshold(img): hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) H = hls[:, :, 0] L = hls[:, :, 1] S = hls[:, :, 2] binary = np.zeros_like(H) binary[(H > 15) & (H < 24) & (S > 90) & (L > 50) | (L > 220)] = 1 return binary def window_mask(width, height, img_ref, center, level): output = np.zeros_like(img_ref) output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0 ] - level * height), max(0, int(center - width / 2)):min(int(center + width / 2), img_ref.shape[1])] = 1 return output <|reserved_special_token_0|> def draw_window_boxes(img, l_points, r_points, window_width, window_height): if len(l_points) > 0: for p in l_points: cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (255, 0, 0), -1) if len(r_points) > 0: for p in r_points: cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (0, 255, 0), -1) return img def draw_window_centroids(warped, window_centroids, window_width=50, window_height=80): if len(window_centroids) > 0: l_points = np.zeros_like(warped) r_points = np.zeros_like(warped) for level in range(0, len(window_centroids)): l_mask = window_mask(window_width, window_height, warped, window_centroids[level][0], level) r_mask = window_mask(window_width, window_height, warped, window_centroids[level][1], level) l_points[(l_points == 255) | (l_mask == 1)] = 255 r_points[(r_points == 255) | (r_mask == 1)] = 255 zero_channel = np.zeros_like(l_points) template = np.array(cv2.merge((l_points, r_points, zero_channel)), np.uint8) warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8) output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0) else: output = np.array(cv2.merge((warped, warped, warped)), np.uint8) return output <|reserved_special_token_0|> def pipeline_image(img, save_images=None, save_suffix='.jpg'): if save_images: print('begin pipeline_image', save_suffix) undistorted = undistort(img) if save_images: save_output_image(undistorted, 'undistorted' + save_suffix) binary = color_threshold(undistorted) if save_images: save_output_image(binary, 'binary' + save_suffix, cmap='gray') img_size = binary.shape[::-1] src = np.float32([[img_size[0] / 2 - 55, img_size[1] / 2 + 100], [ img_size[0] / 6 - 10, img_size[1]], [img_size[0] * 5 / 6 + 60, img_size[1]], [img_size[0] / 2 + 55, img_size[1] / 2 + 100]]) dst = np.float32([[img_size[0] / 4, 0], [img_size[0] / 4, img_size[1]], [img_size[0] * 3 / 4, img_size[1]], [img_size[0] * 3 / 4, 0]]) if save_images: cv2.polylines(img, np.int32([src]), True, (255, 0, 0), thickness=3) save_output_image(img, 'polygon' + save_suffix) M = cv2.getPerspectiveTransform(src, dst) Minv = cv2.getPerspectiveTransform(dst, src) warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR) if save_images: save_output_image(warped, 'warped' + save_suffix, cmap='gray') window_width = 40 window_height = 60 l_points, r_points = find_lr_window_centroids(warped, window_width, window_height, 100) global last_l_points, last_r_points if len(l_points) < 5 and len(last_l_points) > 0: l_points = last_l_points else: last_l_points = l_points l_points = np.array(l_points, dtype=np.int32) l_poly = np.polyfit(l_points[:, 1], l_points[:, 0], 2) if len(r_points) < 5 and len(last_r_points) > 0: r_points = last_r_points else: last_r_points = r_points r_points = np.array(r_points, dtype=np.int32) r_poly = np.polyfit(r_points[:, 1], r_points[:, 0], 2) yval = np.arange(0, warped.shape[0]) l_xval = np.polyval(l_poly, yval) r_xval = np.polyval(r_poly, yval) if save_images: lanes = warped * 255 lanes = np.array(cv2.merge((lanes, lanes, lanes)), np.uint8) lanes = draw_window_boxes(lanes, l_points, r_points, window_width, window_height) for p in l_points: cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1) for p in r_points: cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1) for x, y in zip(l_xval, yval): cv2.circle(lanes, (int(x), y), 5, (255, 255, 0), -1) for x, y in zip(r_xval, yval): cv2.circle(lanes, (int(x), y), 5, (0, 255, 255), -1) save_output_image(lanes, 'lanes' + save_suffix, cmap='gray') ym_per_pix = 30 / 720 xm_per_pix = 3.7 / 700 lane_center_offset_m = (warped.shape[1] / 2 - (l_xval[-1] + r_xval[-1]) / 2 ) * xm_per_pix direction = 'Left' if lane_center_offset_m > 0: direction = 'Right' y_eval = np.max(yval) left_fit_cr = np.polyfit(l_points[:, 1] * ym_per_pix, l_points[:, 0] * xm_per_pix, 2) right_fit_cr = np.polyfit(r_points[:, 1] * ym_per_pix, r_points[:, 0] * xm_per_pix, 2) left_curverad = (1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * left_fit_cr[0]) right_curverad = (1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * right_fit_cr[0]) warp_zero = np.zeros_like(warped).astype(np.uint8) color_warp = np.dstack((warp_zero, warp_zero, warp_zero)) pts_left = np.array([np.transpose(np.vstack([l_xval, yval]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))]) pts = np.hstack((pts_left, pts_right)) cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0)) unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2. INTER_LINEAR) draw_text(undistorted, 'Radius: {:.1f}m {:.1f}m'.format(left_curverad, right_curverad), (50, 50)) draw_text(undistorted, '{:.3f}m {} of Center'.format(abs( lane_center_offset_m), direction), (50, 100)) output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0) if save_images: save_output_image(output, 'output' + save_suffix) return output def process_test_images(): filenames = glob('test_images/*.jpg') for f in filenames: img = load_image(f) img_out = pipeline_image(img, True, '-' + f.split('/')[-1]) def process_video(in_file, out_file): clip = VideoFileClip(in_file) video_clip = clip.fl_image(pipeline_image) video_clip.write_videofile(out_file, audio=False) def show_before_after(before, after, cmap=None): fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6)) fig.subplots_adjust(hspace=0.5, wspace=0.5) ax1.imshow(before) ax1.set_title('Before') ax2.imshow(after, cmap=cmap) ax2.set_title('After') plt.show() def show_images(imgs, titles): fig, axes = plt.subplots(3, 6, figsize=(12, 6)) fig.subplots_adjust(hspace=0.5, wspace=0.5) for ax, img, title in zip(axes.flat, imgs, titles): ax.imshow(img) ax.set_title(title) plt.show() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def load_image(filename): return mpimg.imread(filename) def calibrate_camera(rows=6, cols=9): mtx = None dist = None save_file = 'calibration.npz' try: data = np.load(save_file) mtx = data['mtx'] dist = data['dist'] print('using saved calibration') except FileNotFoundError: print('begin calibration') filenames = glob('camera_cal/*.jpg') objpoints = [] imgpoints = [] objp = np.zeros((rows * cols, 3), np.float32) objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2) for f in filenames: img = load_image(f) gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None) if ret: imgpoints.append(corners) objpoints.append(objp) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) if ret: for f in filenames: img = load_image(f) undist = cv2.undistort(img, mtx, dist, None, mtx) save_output_image(undist, 'undistorted-' + f.split('/')[-1]) print('end calibration') np.savez(save_file, mtx=mtx, dist=dist) return mtx, dist def save_output_image(img, filename, cmap=None): mpimg.imsave(output_images_dir + filename, img, cmap=cmap) def undistort(img): return cv2.undistort(img, mtx, dist, None, mtx) def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)): gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) if orient == 'x': abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize= sobel_kernel)) if orient == 'y': abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize= sobel_kernel)) scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel)) grad_binary = np.zeros_like(scaled_sobel) grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1 return grad_binary def color_threshold(img): hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) H = hls[:, :, 0] L = hls[:, :, 1] S = hls[:, :, 2] binary = np.zeros_like(H) binary[(H > 15) & (H < 24) & (S > 90) & (L > 50) | (L > 220)] = 1 return binary def window_mask(width, height, img_ref, center, level): output = np.zeros_like(img_ref) output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0 ] - level * height), max(0, int(center - width / 2)):min(int(center + width / 2), img_ref.shape[1])] = 1 return output def find_lr_window_centroids(image, window_width, window_height, margin): window = np.ones(window_width) left_centroids = [] right_centroids = [] l_sum = np.sum(image[int(3 * image.shape[0] / 4):, :int(image.shape[1] / 2)], axis=0) l_center = np.argmax(np.convolve(window, l_sum)) - window_width / 2 r_sum = np.sum(image[int(3 * image.shape[0] / 4):, int(image.shape[1] / 2):], axis=0) r_center = np.argmax(np.convolve(window, r_sum)) - window_width / 2 + int( image.shape[1] / 2) y_base = int(image.shape[0] - window_height / 2) y_center = y_base left_centroids.append((l_center, y_center)) right_centroids.append((r_center, y_center)) for level in range(1, int(image.shape[0] / window_height)): y_center = int(y_base - level * window_height) image_layer = np.sum(image[int(image.shape[0] - (level + 1) * window_height):int(image.shape[0] - level * window_height), :], axis=0) conv_signal = np.convolve(window, image_layer) offset = window_width / 2 l_min_index = int(max(l_center + offset - margin, 0)) l_max_index = int(min(l_center + offset + margin, image.shape[1])) l_max = np.argmax(conv_signal[l_min_index:l_max_index]) if l_max > 50: left_centroids.append((l_center, y_center)) l_center = l_max + l_min_index - offset r_min_index = int(max(r_center + offset - margin, 0)) r_max_index = int(min(r_center + offset + margin, image.shape[1])) r_max = np.argmax(conv_signal[r_min_index:r_max_index]) if r_max > 50: right_centroids.append((r_center, y_center)) r_center = r_max + r_min_index - offset return left_centroids, right_centroids def draw_window_boxes(img, l_points, r_points, window_width, window_height): if len(l_points) > 0: for p in l_points: cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (255, 0, 0), -1) if len(r_points) > 0: for p in r_points: cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (0, 255, 0), -1) return img def draw_window_centroids(warped, window_centroids, window_width=50, window_height=80): if len(window_centroids) > 0: l_points = np.zeros_like(warped) r_points = np.zeros_like(warped) for level in range(0, len(window_centroids)): l_mask = window_mask(window_width, window_height, warped, window_centroids[level][0], level) r_mask = window_mask(window_width, window_height, warped, window_centroids[level][1], level) l_points[(l_points == 255) | (l_mask == 1)] = 255 r_points[(r_points == 255) | (r_mask == 1)] = 255 zero_channel = np.zeros_like(l_points) template = np.array(cv2.merge((l_points, r_points, zero_channel)), np.uint8) warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8) output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0) else: output = np.array(cv2.merge((warped, warped, warped)), np.uint8) return output def draw_text(img, text, origin): cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), thickness=2) def pipeline_image(img, save_images=None, save_suffix='.jpg'): if save_images: print('begin pipeline_image', save_suffix) undistorted = undistort(img) if save_images: save_output_image(undistorted, 'undistorted' + save_suffix) binary = color_threshold(undistorted) if save_images: save_output_image(binary, 'binary' + save_suffix, cmap='gray') img_size = binary.shape[::-1] src = np.float32([[img_size[0] / 2 - 55, img_size[1] / 2 + 100], [ img_size[0] / 6 - 10, img_size[1]], [img_size[0] * 5 / 6 + 60, img_size[1]], [img_size[0] / 2 + 55, img_size[1] / 2 + 100]]) dst = np.float32([[img_size[0] / 4, 0], [img_size[0] / 4, img_size[1]], [img_size[0] * 3 / 4, img_size[1]], [img_size[0] * 3 / 4, 0]]) if save_images: cv2.polylines(img, np.int32([src]), True, (255, 0, 0), thickness=3) save_output_image(img, 'polygon' + save_suffix) M = cv2.getPerspectiveTransform(src, dst) Minv = cv2.getPerspectiveTransform(dst, src) warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR) if save_images: save_output_image(warped, 'warped' + save_suffix, cmap='gray') window_width = 40 window_height = 60 l_points, r_points = find_lr_window_centroids(warped, window_width, window_height, 100) global last_l_points, last_r_points if len(l_points) < 5 and len(last_l_points) > 0: l_points = last_l_points else: last_l_points = l_points l_points = np.array(l_points, dtype=np.int32) l_poly = np.polyfit(l_points[:, 1], l_points[:, 0], 2) if len(r_points) < 5 and len(last_r_points) > 0: r_points = last_r_points else: last_r_points = r_points r_points = np.array(r_points, dtype=np.int32) r_poly = np.polyfit(r_points[:, 1], r_points[:, 0], 2) yval = np.arange(0, warped.shape[0]) l_xval = np.polyval(l_poly, yval) r_xval = np.polyval(r_poly, yval) if save_images: lanes = warped * 255 lanes = np.array(cv2.merge((lanes, lanes, lanes)), np.uint8) lanes = draw_window_boxes(lanes, l_points, r_points, window_width, window_height) for p in l_points: cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1) for p in r_points: cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1) for x, y in zip(l_xval, yval): cv2.circle(lanes, (int(x), y), 5, (255, 255, 0), -1) for x, y in zip(r_xval, yval): cv2.circle(lanes, (int(x), y), 5, (0, 255, 255), -1) save_output_image(lanes, 'lanes' + save_suffix, cmap='gray') ym_per_pix = 30 / 720 xm_per_pix = 3.7 / 700 lane_center_offset_m = (warped.shape[1] / 2 - (l_xval[-1] + r_xval[-1]) / 2 ) * xm_per_pix direction = 'Left' if lane_center_offset_m > 0: direction = 'Right' y_eval = np.max(yval) left_fit_cr = np.polyfit(l_points[:, 1] * ym_per_pix, l_points[:, 0] * xm_per_pix, 2) right_fit_cr = np.polyfit(r_points[:, 1] * ym_per_pix, r_points[:, 0] * xm_per_pix, 2) left_curverad = (1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * left_fit_cr[0]) right_curverad = (1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * right_fit_cr[0]) warp_zero = np.zeros_like(warped).astype(np.uint8) color_warp = np.dstack((warp_zero, warp_zero, warp_zero)) pts_left = np.array([np.transpose(np.vstack([l_xval, yval]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))]) pts = np.hstack((pts_left, pts_right)) cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0)) unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2. INTER_LINEAR) draw_text(undistorted, 'Radius: {:.1f}m {:.1f}m'.format(left_curverad, right_curverad), (50, 50)) draw_text(undistorted, '{:.3f}m {} of Center'.format(abs( lane_center_offset_m), direction), (50, 100)) output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0) if save_images: save_output_image(output, 'output' + save_suffix) return output def process_test_images(): filenames = glob('test_images/*.jpg') for f in filenames: img = load_image(f) img_out = pipeline_image(img, True, '-' + f.split('/')[-1]) def process_video(in_file, out_file): clip = VideoFileClip(in_file) video_clip = clip.fl_image(pipeline_image) video_clip.write_videofile(out_file, audio=False) def show_before_after(before, after, cmap=None): fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6)) fig.subplots_adjust(hspace=0.5, wspace=0.5) ax1.imshow(before) ax1.set_title('Before') ax2.imshow(after, cmap=cmap) ax2.set_title('After') plt.show() def show_images(imgs, titles): fig, axes = plt.subplots(3, 6, figsize=(12, 6)) fig.subplots_adjust(hspace=0.5, wspace=0.5) for ax, img, title in zip(axes.flat, imgs, titles): ax.imshow(img) ax.set_title(title) plt.show() <|reserved_special_token_0|> process_test_images() process_video('project_video.mp4', 'output.mp4') process_video('challenge_video.mp4', 'challenge_output.mp4') process_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4') <|reserved_special_token_1|> <|reserved_special_token_0|> output_images_dir = './output_images/' test_images_dir = './test_images/' output_video_file = 'output.mp4' mtx = None dist = None def load_image(filename): return mpimg.imread(filename) def calibrate_camera(rows=6, cols=9): mtx = None dist = None save_file = 'calibration.npz' try: data = np.load(save_file) mtx = data['mtx'] dist = data['dist'] print('using saved calibration') except FileNotFoundError: print('begin calibration') filenames = glob('camera_cal/*.jpg') objpoints = [] imgpoints = [] objp = np.zeros((rows * cols, 3), np.float32) objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2) for f in filenames: img = load_image(f) gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None) if ret: imgpoints.append(corners) objpoints.append(objp) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) if ret: for f in filenames: img = load_image(f) undist = cv2.undistort(img, mtx, dist, None, mtx) save_output_image(undist, 'undistorted-' + f.split('/')[-1]) print('end calibration') np.savez(save_file, mtx=mtx, dist=dist) return mtx, dist def save_output_image(img, filename, cmap=None): mpimg.imsave(output_images_dir + filename, img, cmap=cmap) def undistort(img): return cv2.undistort(img, mtx, dist, None, mtx) def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)): gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) if orient == 'x': abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize= sobel_kernel)) if orient == 'y': abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize= sobel_kernel)) scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel)) grad_binary = np.zeros_like(scaled_sobel) grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1 return grad_binary def color_threshold(img): hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) H = hls[:, :, 0] L = hls[:, :, 1] S = hls[:, :, 2] binary = np.zeros_like(H) binary[(H > 15) & (H < 24) & (S > 90) & (L > 50) | (L > 220)] = 1 return binary def window_mask(width, height, img_ref, center, level): output = np.zeros_like(img_ref) output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0 ] - level * height), max(0, int(center - width / 2)):min(int(center + width / 2), img_ref.shape[1])] = 1 return output def find_lr_window_centroids(image, window_width, window_height, margin): window = np.ones(window_width) left_centroids = [] right_centroids = [] l_sum = np.sum(image[int(3 * image.shape[0] / 4):, :int(image.shape[1] / 2)], axis=0) l_center = np.argmax(np.convolve(window, l_sum)) - window_width / 2 r_sum = np.sum(image[int(3 * image.shape[0] / 4):, int(image.shape[1] / 2):], axis=0) r_center = np.argmax(np.convolve(window, r_sum)) - window_width / 2 + int( image.shape[1] / 2) y_base = int(image.shape[0] - window_height / 2) y_center = y_base left_centroids.append((l_center, y_center)) right_centroids.append((r_center, y_center)) for level in range(1, int(image.shape[0] / window_height)): y_center = int(y_base - level * window_height) image_layer = np.sum(image[int(image.shape[0] - (level + 1) * window_height):int(image.shape[0] - level * window_height), :], axis=0) conv_signal = np.convolve(window, image_layer) offset = window_width / 2 l_min_index = int(max(l_center + offset - margin, 0)) l_max_index = int(min(l_center + offset + margin, image.shape[1])) l_max = np.argmax(conv_signal[l_min_index:l_max_index]) if l_max > 50: left_centroids.append((l_center, y_center)) l_center = l_max + l_min_index - offset r_min_index = int(max(r_center + offset - margin, 0)) r_max_index = int(min(r_center + offset + margin, image.shape[1])) r_max = np.argmax(conv_signal[r_min_index:r_max_index]) if r_max > 50: right_centroids.append((r_center, y_center)) r_center = r_max + r_min_index - offset return left_centroids, right_centroids def draw_window_boxes(img, l_points, r_points, window_width, window_height): if len(l_points) > 0: for p in l_points: cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (255, 0, 0), -1) if len(r_points) > 0: for p in r_points: cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (0, 255, 0), -1) return img def draw_window_centroids(warped, window_centroids, window_width=50, window_height=80): if len(window_centroids) > 0: l_points = np.zeros_like(warped) r_points = np.zeros_like(warped) for level in range(0, len(window_centroids)): l_mask = window_mask(window_width, window_height, warped, window_centroids[level][0], level) r_mask = window_mask(window_width, window_height, warped, window_centroids[level][1], level) l_points[(l_points == 255) | (l_mask == 1)] = 255 r_points[(r_points == 255) | (r_mask == 1)] = 255 zero_channel = np.zeros_like(l_points) template = np.array(cv2.merge((l_points, r_points, zero_channel)), np.uint8) warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8) output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0) else: output = np.array(cv2.merge((warped, warped, warped)), np.uint8) return output def draw_text(img, text, origin): cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), thickness=2) def pipeline_image(img, save_images=None, save_suffix='.jpg'): if save_images: print('begin pipeline_image', save_suffix) undistorted = undistort(img) if save_images: save_output_image(undistorted, 'undistorted' + save_suffix) binary = color_threshold(undistorted) if save_images: save_output_image(binary, 'binary' + save_suffix, cmap='gray') img_size = binary.shape[::-1] src = np.float32([[img_size[0] / 2 - 55, img_size[1] / 2 + 100], [ img_size[0] / 6 - 10, img_size[1]], [img_size[0] * 5 / 6 + 60, img_size[1]], [img_size[0] / 2 + 55, img_size[1] / 2 + 100]]) dst = np.float32([[img_size[0] / 4, 0], [img_size[0] / 4, img_size[1]], [img_size[0] * 3 / 4, img_size[1]], [img_size[0] * 3 / 4, 0]]) if save_images: cv2.polylines(img, np.int32([src]), True, (255, 0, 0), thickness=3) save_output_image(img, 'polygon' + save_suffix) M = cv2.getPerspectiveTransform(src, dst) Minv = cv2.getPerspectiveTransform(dst, src) warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR) if save_images: save_output_image(warped, 'warped' + save_suffix, cmap='gray') window_width = 40 window_height = 60 l_points, r_points = find_lr_window_centroids(warped, window_width, window_height, 100) global last_l_points, last_r_points if len(l_points) < 5 and len(last_l_points) > 0: l_points = last_l_points else: last_l_points = l_points l_points = np.array(l_points, dtype=np.int32) l_poly = np.polyfit(l_points[:, 1], l_points[:, 0], 2) if len(r_points) < 5 and len(last_r_points) > 0: r_points = last_r_points else: last_r_points = r_points r_points = np.array(r_points, dtype=np.int32) r_poly = np.polyfit(r_points[:, 1], r_points[:, 0], 2) yval = np.arange(0, warped.shape[0]) l_xval = np.polyval(l_poly, yval) r_xval = np.polyval(r_poly, yval) if save_images: lanes = warped * 255 lanes = np.array(cv2.merge((lanes, lanes, lanes)), np.uint8) lanes = draw_window_boxes(lanes, l_points, r_points, window_width, window_height) for p in l_points: cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1) for p in r_points: cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1) for x, y in zip(l_xval, yval): cv2.circle(lanes, (int(x), y), 5, (255, 255, 0), -1) for x, y in zip(r_xval, yval): cv2.circle(lanes, (int(x), y), 5, (0, 255, 255), -1) save_output_image(lanes, 'lanes' + save_suffix, cmap='gray') ym_per_pix = 30 / 720 xm_per_pix = 3.7 / 700 lane_center_offset_m = (warped.shape[1] / 2 - (l_xval[-1] + r_xval[-1]) / 2 ) * xm_per_pix direction = 'Left' if lane_center_offset_m > 0: direction = 'Right' y_eval = np.max(yval) left_fit_cr = np.polyfit(l_points[:, 1] * ym_per_pix, l_points[:, 0] * xm_per_pix, 2) right_fit_cr = np.polyfit(r_points[:, 1] * ym_per_pix, r_points[:, 0] * xm_per_pix, 2) left_curverad = (1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * left_fit_cr[0]) right_curverad = (1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * right_fit_cr[0]) warp_zero = np.zeros_like(warped).astype(np.uint8) color_warp = np.dstack((warp_zero, warp_zero, warp_zero)) pts_left = np.array([np.transpose(np.vstack([l_xval, yval]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))]) pts = np.hstack((pts_left, pts_right)) cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0)) unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2. INTER_LINEAR) draw_text(undistorted, 'Radius: {:.1f}m {:.1f}m'.format(left_curverad, right_curverad), (50, 50)) draw_text(undistorted, '{:.3f}m {} of Center'.format(abs( lane_center_offset_m), direction), (50, 100)) output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0) if save_images: save_output_image(output, 'output' + save_suffix) return output def process_test_images(): filenames = glob('test_images/*.jpg') for f in filenames: img = load_image(f) img_out = pipeline_image(img, True, '-' + f.split('/')[-1]) def process_video(in_file, out_file): clip = VideoFileClip(in_file) video_clip = clip.fl_image(pipeline_image) video_clip.write_videofile(out_file, audio=False) def show_before_after(before, after, cmap=None): fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6)) fig.subplots_adjust(hspace=0.5, wspace=0.5) ax1.imshow(before) ax1.set_title('Before') ax2.imshow(after, cmap=cmap) ax2.set_title('After') plt.show() def show_images(imgs, titles): fig, axes = plt.subplots(3, 6, figsize=(12, 6)) fig.subplots_adjust(hspace=0.5, wspace=0.5) for ax, img, title in zip(axes.flat, imgs, titles): ax.imshow(img) ax.set_title(title) plt.show() last_l_points = [] last_r_points = [] mtx, dist = calibrate_camera() process_test_images() process_video('project_video.mp4', 'output.mp4') process_video('challenge_video.mp4', 'challenge_output.mp4') process_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4') <|reserved_special_token_1|> import numpy as np import cv2 import matplotlib.pyplot as plt import matplotlib.image as mpimg from glob import glob from moviepy.editor import VideoFileClip output_images_dir = './output_images/' test_images_dir = './test_images/' output_video_file = 'output.mp4' mtx = None dist = None def load_image(filename): return mpimg.imread(filename) def calibrate_camera(rows=6, cols=9): mtx = None dist = None save_file = 'calibration.npz' try: data = np.load(save_file) mtx = data['mtx'] dist = data['dist'] print('using saved calibration') except FileNotFoundError: print('begin calibration') filenames = glob('camera_cal/*.jpg') objpoints = [] imgpoints = [] objp = np.zeros((rows * cols, 3), np.float32) objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2) for f in filenames: img = load_image(f) gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None) if ret: imgpoints.append(corners) objpoints.append(objp) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) if ret: for f in filenames: img = load_image(f) undist = cv2.undistort(img, mtx, dist, None, mtx) save_output_image(undist, 'undistorted-' + f.split('/')[-1]) print('end calibration') np.savez(save_file, mtx=mtx, dist=dist) return mtx, dist def save_output_image(img, filename, cmap=None): mpimg.imsave(output_images_dir + filename, img, cmap=cmap) def undistort(img): return cv2.undistort(img, mtx, dist, None, mtx) def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)): gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) if orient == 'x': abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize= sobel_kernel)) if orient == 'y': abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize= sobel_kernel)) scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel)) grad_binary = np.zeros_like(scaled_sobel) grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1 return grad_binary def color_threshold(img): hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) H = hls[:, :, 0] L = hls[:, :, 1] S = hls[:, :, 2] binary = np.zeros_like(H) binary[(H > 15) & (H < 24) & (S > 90) & (L > 50) | (L > 220)] = 1 return binary def window_mask(width, height, img_ref, center, level): output = np.zeros_like(img_ref) output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0 ] - level * height), max(0, int(center - width / 2)):min(int(center + width / 2), img_ref.shape[1])] = 1 return output def find_lr_window_centroids(image, window_width, window_height, margin): window = np.ones(window_width) left_centroids = [] right_centroids = [] l_sum = np.sum(image[int(3 * image.shape[0] / 4):, :int(image.shape[1] / 2)], axis=0) l_center = np.argmax(np.convolve(window, l_sum)) - window_width / 2 r_sum = np.sum(image[int(3 * image.shape[0] / 4):, int(image.shape[1] / 2):], axis=0) r_center = np.argmax(np.convolve(window, r_sum)) - window_width / 2 + int( image.shape[1] / 2) y_base = int(image.shape[0] - window_height / 2) y_center = y_base left_centroids.append((l_center, y_center)) right_centroids.append((r_center, y_center)) for level in range(1, int(image.shape[0] / window_height)): y_center = int(y_base - level * window_height) image_layer = np.sum(image[int(image.shape[0] - (level + 1) * window_height):int(image.shape[0] - level * window_height), :], axis=0) conv_signal = np.convolve(window, image_layer) offset = window_width / 2 l_min_index = int(max(l_center + offset - margin, 0)) l_max_index = int(min(l_center + offset + margin, image.shape[1])) l_max = np.argmax(conv_signal[l_min_index:l_max_index]) if l_max > 50: left_centroids.append((l_center, y_center)) l_center = l_max + l_min_index - offset r_min_index = int(max(r_center + offset - margin, 0)) r_max_index = int(min(r_center + offset + margin, image.shape[1])) r_max = np.argmax(conv_signal[r_min_index:r_max_index]) if r_max > 50: right_centroids.append((r_center, y_center)) r_center = r_max + r_min_index - offset return left_centroids, right_centroids def draw_window_boxes(img, l_points, r_points, window_width, window_height): if len(l_points) > 0: for p in l_points: cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (255, 0, 0), -1) if len(r_points) > 0: for p in r_points: cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (0, 255, 0), -1) return img def draw_window_centroids(warped, window_centroids, window_width=50, window_height=80): if len(window_centroids) > 0: l_points = np.zeros_like(warped) r_points = np.zeros_like(warped) for level in range(0, len(window_centroids)): l_mask = window_mask(window_width, window_height, warped, window_centroids[level][0], level) r_mask = window_mask(window_width, window_height, warped, window_centroids[level][1], level) l_points[(l_points == 255) | (l_mask == 1)] = 255 r_points[(r_points == 255) | (r_mask == 1)] = 255 zero_channel = np.zeros_like(l_points) template = np.array(cv2.merge((l_points, r_points, zero_channel)), np.uint8) warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8) output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0) else: output = np.array(cv2.merge((warped, warped, warped)), np.uint8) return output def draw_text(img, text, origin): cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), thickness=2) def pipeline_image(img, save_images=None, save_suffix='.jpg'): if save_images: print('begin pipeline_image', save_suffix) undistorted = undistort(img) if save_images: save_output_image(undistorted, 'undistorted' + save_suffix) binary = color_threshold(undistorted) if save_images: save_output_image(binary, 'binary' + save_suffix, cmap='gray') img_size = binary.shape[::-1] src = np.float32([[img_size[0] / 2 - 55, img_size[1] / 2 + 100], [ img_size[0] / 6 - 10, img_size[1]], [img_size[0] * 5 / 6 + 60, img_size[1]], [img_size[0] / 2 + 55, img_size[1] / 2 + 100]]) dst = np.float32([[img_size[0] / 4, 0], [img_size[0] / 4, img_size[1]], [img_size[0] * 3 / 4, img_size[1]], [img_size[0] * 3 / 4, 0]]) if save_images: cv2.polylines(img, np.int32([src]), True, (255, 0, 0), thickness=3) save_output_image(img, 'polygon' + save_suffix) M = cv2.getPerspectiveTransform(src, dst) Minv = cv2.getPerspectiveTransform(dst, src) warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR) if save_images: save_output_image(warped, 'warped' + save_suffix, cmap='gray') window_width = 40 window_height = 60 l_points, r_points = find_lr_window_centroids(warped, window_width, window_height, 100) global last_l_points, last_r_points if len(l_points) < 5 and len(last_l_points) > 0: l_points = last_l_points else: last_l_points = l_points l_points = np.array(l_points, dtype=np.int32) l_poly = np.polyfit(l_points[:, 1], l_points[:, 0], 2) if len(r_points) < 5 and len(last_r_points) > 0: r_points = last_r_points else: last_r_points = r_points r_points = np.array(r_points, dtype=np.int32) r_poly = np.polyfit(r_points[:, 1], r_points[:, 0], 2) yval = np.arange(0, warped.shape[0]) l_xval = np.polyval(l_poly, yval) r_xval = np.polyval(r_poly, yval) if save_images: lanes = warped * 255 lanes = np.array(cv2.merge((lanes, lanes, lanes)), np.uint8) lanes = draw_window_boxes(lanes, l_points, r_points, window_width, window_height) for p in l_points: cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1) for p in r_points: cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1) for x, y in zip(l_xval, yval): cv2.circle(lanes, (int(x), y), 5, (255, 255, 0), -1) for x, y in zip(r_xval, yval): cv2.circle(lanes, (int(x), y), 5, (0, 255, 255), -1) save_output_image(lanes, 'lanes' + save_suffix, cmap='gray') ym_per_pix = 30 / 720 xm_per_pix = 3.7 / 700 lane_center_offset_m = (warped.shape[1] / 2 - (l_xval[-1] + r_xval[-1]) / 2 ) * xm_per_pix direction = 'Left' if lane_center_offset_m > 0: direction = 'Right' y_eval = np.max(yval) left_fit_cr = np.polyfit(l_points[:, 1] * ym_per_pix, l_points[:, 0] * xm_per_pix, 2) right_fit_cr = np.polyfit(r_points[:, 1] * ym_per_pix, r_points[:, 0] * xm_per_pix, 2) left_curverad = (1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * left_fit_cr[0]) right_curverad = (1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * right_fit_cr[0]) warp_zero = np.zeros_like(warped).astype(np.uint8) color_warp = np.dstack((warp_zero, warp_zero, warp_zero)) pts_left = np.array([np.transpose(np.vstack([l_xval, yval]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))]) pts = np.hstack((pts_left, pts_right)) cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0)) unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2. INTER_LINEAR) draw_text(undistorted, 'Radius: {:.1f}m {:.1f}m'.format(left_curverad, right_curverad), (50, 50)) draw_text(undistorted, '{:.3f}m {} of Center'.format(abs( lane_center_offset_m), direction), (50, 100)) output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0) if save_images: save_output_image(output, 'output' + save_suffix) return output def process_test_images(): filenames = glob('test_images/*.jpg') for f in filenames: img = load_image(f) img_out = pipeline_image(img, True, '-' + f.split('/')[-1]) def process_video(in_file, out_file): clip = VideoFileClip(in_file) video_clip = clip.fl_image(pipeline_image) video_clip.write_videofile(out_file, audio=False) def show_before_after(before, after, cmap=None): fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6)) fig.subplots_adjust(hspace=0.5, wspace=0.5) ax1.imshow(before) ax1.set_title('Before') ax2.imshow(after, cmap=cmap) ax2.set_title('After') plt.show() def show_images(imgs, titles): fig, axes = plt.subplots(3, 6, figsize=(12, 6)) fig.subplots_adjust(hspace=0.5, wspace=0.5) for ax, img, title in zip(axes.flat, imgs, titles): ax.imshow(img) ax.set_title(title) plt.show() last_l_points = [] last_r_points = [] mtx, dist = calibrate_camera() process_test_images() process_video('project_video.mp4', 'output.mp4') process_video('challenge_video.mp4', 'challenge_output.mp4') process_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4') <|reserved_special_token_1|> import numpy as np import cv2 import matplotlib.pyplot as plt import matplotlib.image as mpimg from glob import glob from moviepy.editor import VideoFileClip output_images_dir = './output_images/' test_images_dir = './test_images/' output_video_file = 'output.mp4' mtx = None dist = None def load_image(filename): return mpimg.imread(filename) def calibrate_camera(rows=6, cols=9): mtx = None dist = None save_file = 'calibration.npz' try: data = np.load(save_file) mtx = data['mtx'] dist = data['dist'] print('using saved calibration') except FileNotFoundError: print('begin calibration') filenames = glob('camera_cal/*.jpg') objpoints = [] # 3D points in real world space imgpoints = [] # 2D points in image plane #Prepare object points, like (0,0,0), (1,0,0)... objp = np.zeros((rows*cols,3), np.float32) objp[:,:2] = np.mgrid[0:cols,0:rows].T.reshape(-1,2) # x, y coordinates for f in filenames: img = load_image(f) gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) ret, corners = cv2.findChessboardCorners(gray, (cols,rows), None) if ret: imgpoints.append(corners) objpoints.append(objp) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) if ret: for f in filenames: img = load_image(f) undist = cv2.undistort(img, mtx, dist, None, mtx) save_output_image(undist, 'undistorted-' + f.split('/')[-1]) print('end calibration') np.savez(save_file, mtx=mtx, dist=dist) return mtx, dist def save_output_image(img, filename, cmap=None): mpimg.imsave(output_images_dir + filename, img, cmap=cmap) def undistort(img): return cv2.undistort(img, mtx, dist, None, mtx) def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)): gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) if orient == 'x': abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)) if orient == 'y': abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)) scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel)) grad_binary = np.zeros_like(scaled_sobel) grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1 return grad_binary def color_threshold(img): #R = img[:,:,0] #G = img[:,:,1] #B = img[:,:,2] #binary = np.zeros_like(R) #binary[(R > 200) & (G > 160) & ((B < 100) | (B > 200))] = 1 hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) H = hls[:,:,0] L = hls[:,:,1] S = hls[:,:,2] binary = np.zeros_like(H) binary[(((H > 15) & (H < 24) & (S > 90) & (L > 50)) | (L > 220))] = 1 return binary def window_mask(width, height, img_ref, center,level): output = np.zeros_like(img_ref) output[int(img_ref.shape[0]-(level+1)*height):int(img_ref.shape[0]-level*height),max(0,int(center-width/2)):min(int(center+width/2),img_ref.shape[1])] = 1 return output def find_lr_window_centroids(image, window_width, window_height, margin): #window_centroids = [] # Store the (left,right) window centroid positions per level window = np.ones(window_width) # Create our window template that we will use for convolutions left_centroids = [] right_centroids = [] # First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice # and then np.convolve the vertical image slice with the window template # Sum quarter bottom of image to get slice, could use a different ratio l_sum = np.sum(image[int(3*image.shape[0]/4):,:int(image.shape[1]/2)], axis=0) l_center = np.argmax(np.convolve(window,l_sum))-window_width/2 r_sum = np.sum(image[int(3*image.shape[0]/4):,int(image.shape[1]/2):], axis=0) r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(image.shape[1]/2) y_base = int(image.shape[0] - window_height/2) # Add what we found for the first layer y_center = y_base left_centroids.append((l_center, y_center)) right_centroids.append((r_center, y_center)) # Go through each layer looking for max pixel locations for level in range(1,(int)(image.shape[0]/window_height)): y_center = int(y_base - (level * window_height)) # convolve the window into the vertical slice of the image image_layer = np.sum(image[int(image.shape[0]-(level+1)*window_height):int(image.shape[0]-level*window_height),:], axis=0) conv_signal = np.convolve(window, image_layer) # Find the best left centroid by using past left center as a reference # Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window offset = window_width/2 l_min_index = int(max(l_center+offset-margin,0)) l_max_index = int(min(l_center+offset+margin,image.shape[1])) l_max = np.argmax(conv_signal[l_min_index:l_max_index]) if l_max > 50: left_centroids.append((l_center, y_center)) l_center = l_max+l_min_index-offset # Find the best right centroid by using past right center as a reference r_min_index = int(max(r_center+offset-margin,0)) r_max_index = int(min(r_center+offset+margin,image.shape[1])) r_max = np.argmax(conv_signal[r_min_index:r_max_index]) if r_max > 50: right_centroids.append((r_center, y_center)) r_center = r_max+r_min_index-offset return left_centroids, right_centroids def draw_window_boxes(img, l_points, r_points, window_width, window_height): if len(l_points) > 0: for p in l_points: cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (255,0,0), -1) if len(r_points) > 0: for p in r_points: cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (0,255,0), -1) return img def draw_window_centroids(warped, window_centroids, window_width = 50, window_height = 80): if len(window_centroids) > 0: # Points used to draw all the left and right windows l_points = np.zeros_like(warped) r_points = np.zeros_like(warped) # Go through each level and draw the windows for level in range(0,len(window_centroids)): # Window_mask is a function to draw window areas l_mask = window_mask(window_width,window_height,warped,window_centroids[level][0],level) r_mask = window_mask(window_width,window_height,warped,window_centroids[level][1],level) # Add graphic points from window mask here to total pixels found l_points[(l_points == 255) | ((l_mask == 1) ) ] = 255 r_points[(r_points == 255) | ((r_mask == 1) ) ] = 255 # Draw the results #template = np.array(r_points+l_points,np.uint8) # add both left and right window pixels together zero_channel = np.zeros_like(l_points) # create a zero color channle template = np.array(cv2.merge((l_points,r_points,zero_channel)),np.uint8) # make window pixels green warpage = np.array(cv2.merge((warped,warped,warped)),np.uint8) # making the original road pixels 3 color channels output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0) # overlay the orignal road image with window results # If no window centers found, just display orginal road image else: output = np.array(cv2.merge((warped,warped,warped)),np.uint8) return output def draw_text(img, text, origin): cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), thickness=2) def pipeline_image(img, save_images=None, save_suffix='.jpg'): if save_images: print('begin pipeline_image', save_suffix) undistorted = undistort(img) if save_images: save_output_image(undistorted, 'undistorted' + save_suffix) #binary = abs_sobel_thresh(undistorted, orient='x', sobel_kernel=15, thresh=(20,100)) binary = color_threshold(undistorted) if save_images: save_output_image(binary, 'binary' + save_suffix, cmap='gray') img_size = binary.shape[::-1] src = np.float32( [[(img_size[0] / 2) - 55, img_size[1] / 2 + 100], [((img_size[0] / 6) - 10), img_size[1]], [(img_size[0] * 5 / 6) + 60, img_size[1]], [(img_size[0] / 2 + 55), img_size[1] / 2 + 100]]) dst = np.float32( [[(img_size[0] / 4), 0], [(img_size[0] / 4), img_size[1]], [(img_size[0] * 3 / 4), img_size[1]], [(img_size[0] * 3 / 4), 0]]) if save_images: cv2.polylines(img, np.int32([src]), True, (255,0,0), thickness=3) save_output_image(img, 'polygon' + save_suffix) M = cv2.getPerspectiveTransform(src, dst) Minv = cv2.getPerspectiveTransform(dst, src) warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR) if save_images: save_output_image(warped, 'warped' + save_suffix, cmap='gray') window_width = 40 window_height = 60 #identified lane-line pixels and fit their positions with a polynomial l_points, r_points = find_lr_window_centroids(warped, window_width, window_height, 100) global last_l_points, last_r_points if len(l_points) < 5 and len(last_l_points) > 0: #print("less than 4 l_points:", len(r_points)) # use the previous points l_points = last_l_points else: last_l_points = l_points l_points = np.array(l_points, dtype=np.int32) l_poly = np.polyfit(l_points[:,1], l_points[:,0], 2) if len(r_points) < 5 and len(last_r_points) > 0: #print("less than 4 r_points:", len(r_points)) r_points = last_r_points else: last_r_points = r_points r_points = np.array(r_points, dtype=np.int32) r_poly = np.polyfit(r_points[:,1], r_points[:,0], 2) yval = np.arange(0, warped.shape[0]) l_xval = np.polyval(l_poly, yval) r_xval = np.polyval(r_poly, yval) if save_images: lanes = warped*255 lanes = np.array(cv2.merge((lanes,lanes,lanes)),np.uint8) # make window pixels green lanes = draw_window_boxes(lanes, l_points, r_points, window_width, window_height) for p in l_points: cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1) for p in r_points: cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1) for x,y in zip(l_xval, yval): cv2.circle(lanes, (int(x),y), 5, (255,255,0), -1) for x,y in zip(r_xval, yval): cv2.circle(lanes, (int(x),y), 5, (0,255,255), -1) save_output_image(lanes, 'lanes' + save_suffix, cmap='gray') # Define conversions in x and y from pixels space to meters ym_per_pix = 30/720 # meters per pixel in y dimension xm_per_pix = 3.7/700 # meters per pixel in x dimension #calculated the position of the vehicle with respect to center lane_center_offset_m = (warped.shape[1]/2 - (l_xval[-1] + r_xval[-1])/2) * xm_per_pix direction = 'Left' if lane_center_offset_m > 0: direction = 'Right' #calculated the radius of curvature of the lane y_eval = np.max(yval) # Fit new polynomials to x,y in world space left_fit_cr = np.polyfit(l_points[:,1]*ym_per_pix, l_points[:,0]*xm_per_pix, 2) right_fit_cr = np.polyfit(r_points[:,1]*ym_per_pix, r_points[:,0]*xm_per_pix, 2) # Calculate the new radii of curvature left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0]) right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0]) # Now our radius of curvature is in meters #Provide an example image of your result plotted back down onto the road such that the lane area is identified clearly warp_zero = np.zeros_like(warped).astype(np.uint8) color_warp = np.dstack((warp_zero, warp_zero, warp_zero)) # Recast the x and y points into usable format for cv2.fillPoly() pts_left = np.array([np.transpose(np.vstack([l_xval , yval]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))]) pts = np.hstack((pts_left, pts_right)) # Draw the lane onto the warped blank image cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0)) unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.INTER_LINEAR) draw_text(undistorted, "Radius: {:.1f}m {:.1f}m".format(left_curverad, right_curverad), (50, 50)) draw_text(undistorted, "{:.3f}m {} of Center".format(abs(lane_center_offset_m), direction), (50, 100)) output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0) if save_images: save_output_image(output, 'output' + save_suffix) return output def process_test_images(): filenames = glob('test_images/*.jpg') #filenames = ['test_images/test2.jpg'] for f in filenames: img = load_image(f) img_out = pipeline_image(img, True, '-' + f.split('/')[-1]) #show_before_after(img, img_out, 'gray') def process_video(in_file, out_file): clip = VideoFileClip(in_file) video_clip = clip.fl_image(pipeline_image) video_clip.write_videofile(out_file, audio=False) def show_before_after(before, after, cmap=None): fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6)) fig.subplots_adjust(hspace=0.5, wspace=0.5) ax1.imshow(before) ax1.set_title('Before') ax2.imshow(after, cmap=cmap) ax2.set_title('After') plt.show() def show_images(imgs, titles): fig, axes = plt.subplots(3, 6, figsize=(12, 6)) fig.subplots_adjust(hspace=0.5, wspace=0.5) for ax, img, title in zip(axes.flat, imgs, titles): ax.imshow(img) ax.set_title(title) plt.show() last_l_points = [] last_r_points = [] mtx, dist = calibrate_camera() process_test_images() process_video('project_video.mp4', 'output.mp4') process_video('challenge_video.mp4', 'challenge_output.mp4') process_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4')
flexible
{ "blob_id": "3ac30240577eda08343796abbd051d5d3b45beaf", "index": 3416, "step-1": "<mask token>\n\n\ndef load_image(filename):\n return mpimg.imread(filename)\n\n\ndef calibrate_camera(rows=6, cols=9):\n mtx = None\n dist = None\n save_file = 'calibration.npz'\n try:\n data = np.load(save_file)\n mtx = data['mtx']\n dist = data['dist']\n print('using saved calibration')\n except FileNotFoundError:\n print('begin calibration')\n filenames = glob('camera_cal/*.jpg')\n objpoints = []\n imgpoints = []\n objp = np.zeros((rows * cols, 3), np.float32)\n objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2)\n for f in filenames:\n img = load_image(f)\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None)\n if ret:\n imgpoints.append(corners)\n objpoints.append(objp)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints,\n imgpoints, gray.shape[::-1], None, None)\n if ret:\n for f in filenames:\n img = load_image(f)\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n save_output_image(undist, 'undistorted-' + f.split('/')[-1])\n print('end calibration')\n np.savez(save_file, mtx=mtx, dist=dist)\n return mtx, dist\n\n\ndef save_output_image(img, filename, cmap=None):\n mpimg.imsave(output_images_dir + filename, img, cmap=cmap)\n\n\ndef undistort(img):\n return cv2.undistort(img, mtx, dist, None, mtx)\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=\n sobel_kernel))\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=\n sobel_kernel))\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n grad_binary = np.zeros_like(scaled_sobel)\n grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n return grad_binary\n\n\ndef color_threshold(img):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n H = hls[:, :, 0]\n L = hls[:, :, 1]\n S = hls[:, :, 2]\n binary = np.zeros_like(H)\n binary[(H > 15) & (H < 24) & (S > 90) & (L > 50) | (L > 220)] = 1\n return binary\n\n\ndef window_mask(width, height, img_ref, center, level):\n output = np.zeros_like(img_ref)\n output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0\n ] - level * height), max(0, int(center - width / 2)):min(int(center +\n width / 2), img_ref.shape[1])] = 1\n return output\n\n\n<mask token>\n\n\ndef draw_window_boxes(img, l_points, r_points, window_width, window_height):\n if len(l_points) > 0:\n for p in l_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (255, 0, 0), -1)\n if len(r_points) > 0:\n for p in r_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (0, 255, 0), -1)\n return img\n\n\ndef draw_window_centroids(warped, window_centroids, window_width=50,\n window_height=80):\n if len(window_centroids) > 0:\n l_points = np.zeros_like(warped)\n r_points = np.zeros_like(warped)\n for level in range(0, len(window_centroids)):\n l_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][0], level)\n r_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][1], level)\n l_points[(l_points == 255) | (l_mask == 1)] = 255\n r_points[(r_points == 255) | (r_mask == 1)] = 255\n zero_channel = np.zeros_like(l_points)\n template = np.array(cv2.merge((l_points, r_points, zero_channel)),\n np.uint8)\n warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0)\n else:\n output = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n return output\n\n\n<mask token>\n\n\ndef pipeline_image(img, save_images=None, save_suffix='.jpg'):\n if save_images:\n print('begin pipeline_image', save_suffix)\n undistorted = undistort(img)\n if save_images:\n save_output_image(undistorted, 'undistorted' + save_suffix)\n binary = color_threshold(undistorted)\n if save_images:\n save_output_image(binary, 'binary' + save_suffix, cmap='gray')\n img_size = binary.shape[::-1]\n src = np.float32([[img_size[0] / 2 - 55, img_size[1] / 2 + 100], [\n img_size[0] / 6 - 10, img_size[1]], [img_size[0] * 5 / 6 + 60,\n img_size[1]], [img_size[0] / 2 + 55, img_size[1] / 2 + 100]])\n dst = np.float32([[img_size[0] / 4, 0], [img_size[0] / 4, img_size[1]],\n [img_size[0] * 3 / 4, img_size[1]], [img_size[0] * 3 / 4, 0]])\n if save_images:\n cv2.polylines(img, np.int32([src]), True, (255, 0, 0), thickness=3)\n save_output_image(img, 'polygon' + save_suffix)\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)\n if save_images:\n save_output_image(warped, 'warped' + save_suffix, cmap='gray')\n window_width = 40\n window_height = 60\n l_points, r_points = find_lr_window_centroids(warped, window_width,\n window_height, 100)\n global last_l_points, last_r_points\n if len(l_points) < 5 and len(last_l_points) > 0:\n l_points = last_l_points\n else:\n last_l_points = l_points\n l_points = np.array(l_points, dtype=np.int32)\n l_poly = np.polyfit(l_points[:, 1], l_points[:, 0], 2)\n if len(r_points) < 5 and len(last_r_points) > 0:\n r_points = last_r_points\n else:\n last_r_points = r_points\n r_points = np.array(r_points, dtype=np.int32)\n r_poly = np.polyfit(r_points[:, 1], r_points[:, 0], 2)\n yval = np.arange(0, warped.shape[0])\n l_xval = np.polyval(l_poly, yval)\n r_xval = np.polyval(r_poly, yval)\n if save_images:\n lanes = warped * 255\n lanes = np.array(cv2.merge((lanes, lanes, lanes)), np.uint8)\n lanes = draw_window_boxes(lanes, l_points, r_points, window_width,\n window_height)\n for p in l_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for p in r_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for x, y in zip(l_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (255, 255, 0), -1)\n for x, y in zip(r_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (0, 255, 255), -1)\n save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')\n ym_per_pix = 30 / 720\n xm_per_pix = 3.7 / 700\n lane_center_offset_m = (warped.shape[1] / 2 - (l_xval[-1] + r_xval[-1]) / 2\n ) * xm_per_pix\n direction = 'Left'\n if lane_center_offset_m > 0:\n direction = 'Right'\n y_eval = np.max(yval)\n left_fit_cr = np.polyfit(l_points[:, 1] * ym_per_pix, l_points[:, 0] *\n xm_per_pix, 2)\n right_fit_cr = np.polyfit(r_points[:, 1] * ym_per_pix, r_points[:, 0] *\n xm_per_pix, 2)\n left_curverad = (1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix +\n left_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * left_fit_cr[0])\n right_curverad = (1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix +\n right_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * right_fit_cr[0])\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n pts_left = np.array([np.transpose(np.vstack([l_xval, yval]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])\n pts = np.hstack((pts_left, pts_right))\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.\n INTER_LINEAR)\n draw_text(undistorted, 'Radius: {:.1f}m {:.1f}m'.format(left_curverad,\n right_curverad), (50, 50))\n draw_text(undistorted, '{:.3f}m {} of Center'.format(abs(\n lane_center_offset_m), direction), (50, 100))\n output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)\n if save_images:\n save_output_image(output, 'output' + save_suffix)\n return output\n\n\ndef process_test_images():\n filenames = glob('test_images/*.jpg')\n for f in filenames:\n img = load_image(f)\n img_out = pipeline_image(img, True, '-' + f.split('/')[-1])\n\n\ndef process_video(in_file, out_file):\n clip = VideoFileClip(in_file)\n video_clip = clip.fl_image(pipeline_image)\n video_clip.write_videofile(out_file, audio=False)\n\n\ndef show_before_after(before, after, cmap=None):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n ax1.imshow(before)\n ax1.set_title('Before')\n ax2.imshow(after, cmap=cmap)\n ax2.set_title('After')\n plt.show()\n\n\ndef show_images(imgs, titles):\n fig, axes = plt.subplots(3, 6, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n for ax, img, title in zip(axes.flat, imgs, titles):\n ax.imshow(img)\n ax.set_title(title)\n plt.show()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef load_image(filename):\n return mpimg.imread(filename)\n\n\ndef calibrate_camera(rows=6, cols=9):\n mtx = None\n dist = None\n save_file = 'calibration.npz'\n try:\n data = np.load(save_file)\n mtx = data['mtx']\n dist = data['dist']\n print('using saved calibration')\n except FileNotFoundError:\n print('begin calibration')\n filenames = glob('camera_cal/*.jpg')\n objpoints = []\n imgpoints = []\n objp = np.zeros((rows * cols, 3), np.float32)\n objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2)\n for f in filenames:\n img = load_image(f)\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None)\n if ret:\n imgpoints.append(corners)\n objpoints.append(objp)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints,\n imgpoints, gray.shape[::-1], None, None)\n if ret:\n for f in filenames:\n img = load_image(f)\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n save_output_image(undist, 'undistorted-' + f.split('/')[-1])\n print('end calibration')\n np.savez(save_file, mtx=mtx, dist=dist)\n return mtx, dist\n\n\ndef save_output_image(img, filename, cmap=None):\n mpimg.imsave(output_images_dir + filename, img, cmap=cmap)\n\n\ndef undistort(img):\n return cv2.undistort(img, mtx, dist, None, mtx)\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=\n sobel_kernel))\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=\n sobel_kernel))\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n grad_binary = np.zeros_like(scaled_sobel)\n grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n return grad_binary\n\n\ndef color_threshold(img):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n H = hls[:, :, 0]\n L = hls[:, :, 1]\n S = hls[:, :, 2]\n binary = np.zeros_like(H)\n binary[(H > 15) & (H < 24) & (S > 90) & (L > 50) | (L > 220)] = 1\n return binary\n\n\ndef window_mask(width, height, img_ref, center, level):\n output = np.zeros_like(img_ref)\n output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0\n ] - level * height), max(0, int(center - width / 2)):min(int(center +\n width / 2), img_ref.shape[1])] = 1\n return output\n\n\ndef find_lr_window_centroids(image, window_width, window_height, margin):\n window = np.ones(window_width)\n left_centroids = []\n right_centroids = []\n l_sum = np.sum(image[int(3 * image.shape[0] / 4):, :int(image.shape[1] /\n 2)], axis=0)\n l_center = np.argmax(np.convolve(window, l_sum)) - window_width / 2\n r_sum = np.sum(image[int(3 * image.shape[0] / 4):, int(image.shape[1] /\n 2):], axis=0)\n r_center = np.argmax(np.convolve(window, r_sum)) - window_width / 2 + int(\n image.shape[1] / 2)\n y_base = int(image.shape[0] - window_height / 2)\n y_center = y_base\n left_centroids.append((l_center, y_center))\n right_centroids.append((r_center, y_center))\n for level in range(1, int(image.shape[0] / window_height)):\n y_center = int(y_base - level * window_height)\n image_layer = np.sum(image[int(image.shape[0] - (level + 1) *\n window_height):int(image.shape[0] - level * window_height), :],\n axis=0)\n conv_signal = np.convolve(window, image_layer)\n offset = window_width / 2\n l_min_index = int(max(l_center + offset - margin, 0))\n l_max_index = int(min(l_center + offset + margin, image.shape[1]))\n l_max = np.argmax(conv_signal[l_min_index:l_max_index])\n if l_max > 50:\n left_centroids.append((l_center, y_center))\n l_center = l_max + l_min_index - offset\n r_min_index = int(max(r_center + offset - margin, 0))\n r_max_index = int(min(r_center + offset + margin, image.shape[1]))\n r_max = np.argmax(conv_signal[r_min_index:r_max_index])\n if r_max > 50:\n right_centroids.append((r_center, y_center))\n r_center = r_max + r_min_index - offset\n return left_centroids, right_centroids\n\n\ndef draw_window_boxes(img, l_points, r_points, window_width, window_height):\n if len(l_points) > 0:\n for p in l_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (255, 0, 0), -1)\n if len(r_points) > 0:\n for p in r_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (0, 255, 0), -1)\n return img\n\n\ndef draw_window_centroids(warped, window_centroids, window_width=50,\n window_height=80):\n if len(window_centroids) > 0:\n l_points = np.zeros_like(warped)\n r_points = np.zeros_like(warped)\n for level in range(0, len(window_centroids)):\n l_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][0], level)\n r_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][1], level)\n l_points[(l_points == 255) | (l_mask == 1)] = 255\n r_points[(r_points == 255) | (r_mask == 1)] = 255\n zero_channel = np.zeros_like(l_points)\n template = np.array(cv2.merge((l_points, r_points, zero_channel)),\n np.uint8)\n warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0)\n else:\n output = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n return output\n\n\ndef draw_text(img, text, origin):\n cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, \n 255), thickness=2)\n\n\ndef pipeline_image(img, save_images=None, save_suffix='.jpg'):\n if save_images:\n print('begin pipeline_image', save_suffix)\n undistorted = undistort(img)\n if save_images:\n save_output_image(undistorted, 'undistorted' + save_suffix)\n binary = color_threshold(undistorted)\n if save_images:\n save_output_image(binary, 'binary' + save_suffix, cmap='gray')\n img_size = binary.shape[::-1]\n src = np.float32([[img_size[0] / 2 - 55, img_size[1] / 2 + 100], [\n img_size[0] / 6 - 10, img_size[1]], [img_size[0] * 5 / 6 + 60,\n img_size[1]], [img_size[0] / 2 + 55, img_size[1] / 2 + 100]])\n dst = np.float32([[img_size[0] / 4, 0], [img_size[0] / 4, img_size[1]],\n [img_size[0] * 3 / 4, img_size[1]], [img_size[0] * 3 / 4, 0]])\n if save_images:\n cv2.polylines(img, np.int32([src]), True, (255, 0, 0), thickness=3)\n save_output_image(img, 'polygon' + save_suffix)\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)\n if save_images:\n save_output_image(warped, 'warped' + save_suffix, cmap='gray')\n window_width = 40\n window_height = 60\n l_points, r_points = find_lr_window_centroids(warped, window_width,\n window_height, 100)\n global last_l_points, last_r_points\n if len(l_points) < 5 and len(last_l_points) > 0:\n l_points = last_l_points\n else:\n last_l_points = l_points\n l_points = np.array(l_points, dtype=np.int32)\n l_poly = np.polyfit(l_points[:, 1], l_points[:, 0], 2)\n if len(r_points) < 5 and len(last_r_points) > 0:\n r_points = last_r_points\n else:\n last_r_points = r_points\n r_points = np.array(r_points, dtype=np.int32)\n r_poly = np.polyfit(r_points[:, 1], r_points[:, 0], 2)\n yval = np.arange(0, warped.shape[0])\n l_xval = np.polyval(l_poly, yval)\n r_xval = np.polyval(r_poly, yval)\n if save_images:\n lanes = warped * 255\n lanes = np.array(cv2.merge((lanes, lanes, lanes)), np.uint8)\n lanes = draw_window_boxes(lanes, l_points, r_points, window_width,\n window_height)\n for p in l_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for p in r_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for x, y in zip(l_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (255, 255, 0), -1)\n for x, y in zip(r_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (0, 255, 255), -1)\n save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')\n ym_per_pix = 30 / 720\n xm_per_pix = 3.7 / 700\n lane_center_offset_m = (warped.shape[1] / 2 - (l_xval[-1] + r_xval[-1]) / 2\n ) * xm_per_pix\n direction = 'Left'\n if lane_center_offset_m > 0:\n direction = 'Right'\n y_eval = np.max(yval)\n left_fit_cr = np.polyfit(l_points[:, 1] * ym_per_pix, l_points[:, 0] *\n xm_per_pix, 2)\n right_fit_cr = np.polyfit(r_points[:, 1] * ym_per_pix, r_points[:, 0] *\n xm_per_pix, 2)\n left_curverad = (1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix +\n left_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * left_fit_cr[0])\n right_curverad = (1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix +\n right_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * right_fit_cr[0])\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n pts_left = np.array([np.transpose(np.vstack([l_xval, yval]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])\n pts = np.hstack((pts_left, pts_right))\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.\n INTER_LINEAR)\n draw_text(undistorted, 'Radius: {:.1f}m {:.1f}m'.format(left_curverad,\n right_curverad), (50, 50))\n draw_text(undistorted, '{:.3f}m {} of Center'.format(abs(\n lane_center_offset_m), direction), (50, 100))\n output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)\n if save_images:\n save_output_image(output, 'output' + save_suffix)\n return output\n\n\ndef process_test_images():\n filenames = glob('test_images/*.jpg')\n for f in filenames:\n img = load_image(f)\n img_out = pipeline_image(img, True, '-' + f.split('/')[-1])\n\n\ndef process_video(in_file, out_file):\n clip = VideoFileClip(in_file)\n video_clip = clip.fl_image(pipeline_image)\n video_clip.write_videofile(out_file, audio=False)\n\n\ndef show_before_after(before, after, cmap=None):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n ax1.imshow(before)\n ax1.set_title('Before')\n ax2.imshow(after, cmap=cmap)\n ax2.set_title('After')\n plt.show()\n\n\ndef show_images(imgs, titles):\n fig, axes = plt.subplots(3, 6, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n for ax, img, title in zip(axes.flat, imgs, titles):\n ax.imshow(img)\n ax.set_title(title)\n plt.show()\n\n\n<mask token>\nprocess_test_images()\nprocess_video('project_video.mp4', 'output.mp4')\nprocess_video('challenge_video.mp4', 'challenge_output.mp4')\nprocess_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4')\n", "step-3": "<mask token>\noutput_images_dir = './output_images/'\ntest_images_dir = './test_images/'\noutput_video_file = 'output.mp4'\nmtx = None\ndist = None\n\n\ndef load_image(filename):\n return mpimg.imread(filename)\n\n\ndef calibrate_camera(rows=6, cols=9):\n mtx = None\n dist = None\n save_file = 'calibration.npz'\n try:\n data = np.load(save_file)\n mtx = data['mtx']\n dist = data['dist']\n print('using saved calibration')\n except FileNotFoundError:\n print('begin calibration')\n filenames = glob('camera_cal/*.jpg')\n objpoints = []\n imgpoints = []\n objp = np.zeros((rows * cols, 3), np.float32)\n objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2)\n for f in filenames:\n img = load_image(f)\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None)\n if ret:\n imgpoints.append(corners)\n objpoints.append(objp)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints,\n imgpoints, gray.shape[::-1], None, None)\n if ret:\n for f in filenames:\n img = load_image(f)\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n save_output_image(undist, 'undistorted-' + f.split('/')[-1])\n print('end calibration')\n np.savez(save_file, mtx=mtx, dist=dist)\n return mtx, dist\n\n\ndef save_output_image(img, filename, cmap=None):\n mpimg.imsave(output_images_dir + filename, img, cmap=cmap)\n\n\ndef undistort(img):\n return cv2.undistort(img, mtx, dist, None, mtx)\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=\n sobel_kernel))\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=\n sobel_kernel))\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n grad_binary = np.zeros_like(scaled_sobel)\n grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n return grad_binary\n\n\ndef color_threshold(img):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n H = hls[:, :, 0]\n L = hls[:, :, 1]\n S = hls[:, :, 2]\n binary = np.zeros_like(H)\n binary[(H > 15) & (H < 24) & (S > 90) & (L > 50) | (L > 220)] = 1\n return binary\n\n\ndef window_mask(width, height, img_ref, center, level):\n output = np.zeros_like(img_ref)\n output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0\n ] - level * height), max(0, int(center - width / 2)):min(int(center +\n width / 2), img_ref.shape[1])] = 1\n return output\n\n\ndef find_lr_window_centroids(image, window_width, window_height, margin):\n window = np.ones(window_width)\n left_centroids = []\n right_centroids = []\n l_sum = np.sum(image[int(3 * image.shape[0] / 4):, :int(image.shape[1] /\n 2)], axis=0)\n l_center = np.argmax(np.convolve(window, l_sum)) - window_width / 2\n r_sum = np.sum(image[int(3 * image.shape[0] / 4):, int(image.shape[1] /\n 2):], axis=0)\n r_center = np.argmax(np.convolve(window, r_sum)) - window_width / 2 + int(\n image.shape[1] / 2)\n y_base = int(image.shape[0] - window_height / 2)\n y_center = y_base\n left_centroids.append((l_center, y_center))\n right_centroids.append((r_center, y_center))\n for level in range(1, int(image.shape[0] / window_height)):\n y_center = int(y_base - level * window_height)\n image_layer = np.sum(image[int(image.shape[0] - (level + 1) *\n window_height):int(image.shape[0] - level * window_height), :],\n axis=0)\n conv_signal = np.convolve(window, image_layer)\n offset = window_width / 2\n l_min_index = int(max(l_center + offset - margin, 0))\n l_max_index = int(min(l_center + offset + margin, image.shape[1]))\n l_max = np.argmax(conv_signal[l_min_index:l_max_index])\n if l_max > 50:\n left_centroids.append((l_center, y_center))\n l_center = l_max + l_min_index - offset\n r_min_index = int(max(r_center + offset - margin, 0))\n r_max_index = int(min(r_center + offset + margin, image.shape[1]))\n r_max = np.argmax(conv_signal[r_min_index:r_max_index])\n if r_max > 50:\n right_centroids.append((r_center, y_center))\n r_center = r_max + r_min_index - offset\n return left_centroids, right_centroids\n\n\ndef draw_window_boxes(img, l_points, r_points, window_width, window_height):\n if len(l_points) > 0:\n for p in l_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (255, 0, 0), -1)\n if len(r_points) > 0:\n for p in r_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (0, 255, 0), -1)\n return img\n\n\ndef draw_window_centroids(warped, window_centroids, window_width=50,\n window_height=80):\n if len(window_centroids) > 0:\n l_points = np.zeros_like(warped)\n r_points = np.zeros_like(warped)\n for level in range(0, len(window_centroids)):\n l_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][0], level)\n r_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][1], level)\n l_points[(l_points == 255) | (l_mask == 1)] = 255\n r_points[(r_points == 255) | (r_mask == 1)] = 255\n zero_channel = np.zeros_like(l_points)\n template = np.array(cv2.merge((l_points, r_points, zero_channel)),\n np.uint8)\n warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0)\n else:\n output = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n return output\n\n\ndef draw_text(img, text, origin):\n cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, \n 255), thickness=2)\n\n\ndef pipeline_image(img, save_images=None, save_suffix='.jpg'):\n if save_images:\n print('begin pipeline_image', save_suffix)\n undistorted = undistort(img)\n if save_images:\n save_output_image(undistorted, 'undistorted' + save_suffix)\n binary = color_threshold(undistorted)\n if save_images:\n save_output_image(binary, 'binary' + save_suffix, cmap='gray')\n img_size = binary.shape[::-1]\n src = np.float32([[img_size[0] / 2 - 55, img_size[1] / 2 + 100], [\n img_size[0] / 6 - 10, img_size[1]], [img_size[0] * 5 / 6 + 60,\n img_size[1]], [img_size[0] / 2 + 55, img_size[1] / 2 + 100]])\n dst = np.float32([[img_size[0] / 4, 0], [img_size[0] / 4, img_size[1]],\n [img_size[0] * 3 / 4, img_size[1]], [img_size[0] * 3 / 4, 0]])\n if save_images:\n cv2.polylines(img, np.int32([src]), True, (255, 0, 0), thickness=3)\n save_output_image(img, 'polygon' + save_suffix)\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)\n if save_images:\n save_output_image(warped, 'warped' + save_suffix, cmap='gray')\n window_width = 40\n window_height = 60\n l_points, r_points = find_lr_window_centroids(warped, window_width,\n window_height, 100)\n global last_l_points, last_r_points\n if len(l_points) < 5 and len(last_l_points) > 0:\n l_points = last_l_points\n else:\n last_l_points = l_points\n l_points = np.array(l_points, dtype=np.int32)\n l_poly = np.polyfit(l_points[:, 1], l_points[:, 0], 2)\n if len(r_points) < 5 and len(last_r_points) > 0:\n r_points = last_r_points\n else:\n last_r_points = r_points\n r_points = np.array(r_points, dtype=np.int32)\n r_poly = np.polyfit(r_points[:, 1], r_points[:, 0], 2)\n yval = np.arange(0, warped.shape[0])\n l_xval = np.polyval(l_poly, yval)\n r_xval = np.polyval(r_poly, yval)\n if save_images:\n lanes = warped * 255\n lanes = np.array(cv2.merge((lanes, lanes, lanes)), np.uint8)\n lanes = draw_window_boxes(lanes, l_points, r_points, window_width,\n window_height)\n for p in l_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for p in r_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for x, y in zip(l_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (255, 255, 0), -1)\n for x, y in zip(r_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (0, 255, 255), -1)\n save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')\n ym_per_pix = 30 / 720\n xm_per_pix = 3.7 / 700\n lane_center_offset_m = (warped.shape[1] / 2 - (l_xval[-1] + r_xval[-1]) / 2\n ) * xm_per_pix\n direction = 'Left'\n if lane_center_offset_m > 0:\n direction = 'Right'\n y_eval = np.max(yval)\n left_fit_cr = np.polyfit(l_points[:, 1] * ym_per_pix, l_points[:, 0] *\n xm_per_pix, 2)\n right_fit_cr = np.polyfit(r_points[:, 1] * ym_per_pix, r_points[:, 0] *\n xm_per_pix, 2)\n left_curverad = (1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix +\n left_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * left_fit_cr[0])\n right_curverad = (1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix +\n right_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * right_fit_cr[0])\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n pts_left = np.array([np.transpose(np.vstack([l_xval, yval]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])\n pts = np.hstack((pts_left, pts_right))\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.\n INTER_LINEAR)\n draw_text(undistorted, 'Radius: {:.1f}m {:.1f}m'.format(left_curverad,\n right_curverad), (50, 50))\n draw_text(undistorted, '{:.3f}m {} of Center'.format(abs(\n lane_center_offset_m), direction), (50, 100))\n output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)\n if save_images:\n save_output_image(output, 'output' + save_suffix)\n return output\n\n\ndef process_test_images():\n filenames = glob('test_images/*.jpg')\n for f in filenames:\n img = load_image(f)\n img_out = pipeline_image(img, True, '-' + f.split('/')[-1])\n\n\ndef process_video(in_file, out_file):\n clip = VideoFileClip(in_file)\n video_clip = clip.fl_image(pipeline_image)\n video_clip.write_videofile(out_file, audio=False)\n\n\ndef show_before_after(before, after, cmap=None):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n ax1.imshow(before)\n ax1.set_title('Before')\n ax2.imshow(after, cmap=cmap)\n ax2.set_title('After')\n plt.show()\n\n\ndef show_images(imgs, titles):\n fig, axes = plt.subplots(3, 6, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n for ax, img, title in zip(axes.flat, imgs, titles):\n ax.imshow(img)\n ax.set_title(title)\n plt.show()\n\n\nlast_l_points = []\nlast_r_points = []\nmtx, dist = calibrate_camera()\nprocess_test_images()\nprocess_video('project_video.mp4', 'output.mp4')\nprocess_video('challenge_video.mp4', 'challenge_output.mp4')\nprocess_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4')\n", "step-4": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom glob import glob\nfrom moviepy.editor import VideoFileClip\noutput_images_dir = './output_images/'\ntest_images_dir = './test_images/'\noutput_video_file = 'output.mp4'\nmtx = None\ndist = None\n\n\ndef load_image(filename):\n return mpimg.imread(filename)\n\n\ndef calibrate_camera(rows=6, cols=9):\n mtx = None\n dist = None\n save_file = 'calibration.npz'\n try:\n data = np.load(save_file)\n mtx = data['mtx']\n dist = data['dist']\n print('using saved calibration')\n except FileNotFoundError:\n print('begin calibration')\n filenames = glob('camera_cal/*.jpg')\n objpoints = []\n imgpoints = []\n objp = np.zeros((rows * cols, 3), np.float32)\n objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2)\n for f in filenames:\n img = load_image(f)\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None)\n if ret:\n imgpoints.append(corners)\n objpoints.append(objp)\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints,\n imgpoints, gray.shape[::-1], None, None)\n if ret:\n for f in filenames:\n img = load_image(f)\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n save_output_image(undist, 'undistorted-' + f.split('/')[-1])\n print('end calibration')\n np.savez(save_file, mtx=mtx, dist=dist)\n return mtx, dist\n\n\ndef save_output_image(img, filename, cmap=None):\n mpimg.imsave(output_images_dir + filename, img, cmap=cmap)\n\n\ndef undistort(img):\n return cv2.undistort(img, mtx, dist, None, mtx)\n\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=\n sobel_kernel))\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=\n sobel_kernel))\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n grad_binary = np.zeros_like(scaled_sobel)\n grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n return grad_binary\n\n\ndef color_threshold(img):\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n H = hls[:, :, 0]\n L = hls[:, :, 1]\n S = hls[:, :, 2]\n binary = np.zeros_like(H)\n binary[(H > 15) & (H < 24) & (S > 90) & (L > 50) | (L > 220)] = 1\n return binary\n\n\ndef window_mask(width, height, img_ref, center, level):\n output = np.zeros_like(img_ref)\n output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0\n ] - level * height), max(0, int(center - width / 2)):min(int(center +\n width / 2), img_ref.shape[1])] = 1\n return output\n\n\ndef find_lr_window_centroids(image, window_width, window_height, margin):\n window = np.ones(window_width)\n left_centroids = []\n right_centroids = []\n l_sum = np.sum(image[int(3 * image.shape[0] / 4):, :int(image.shape[1] /\n 2)], axis=0)\n l_center = np.argmax(np.convolve(window, l_sum)) - window_width / 2\n r_sum = np.sum(image[int(3 * image.shape[0] / 4):, int(image.shape[1] /\n 2):], axis=0)\n r_center = np.argmax(np.convolve(window, r_sum)) - window_width / 2 + int(\n image.shape[1] / 2)\n y_base = int(image.shape[0] - window_height / 2)\n y_center = y_base\n left_centroids.append((l_center, y_center))\n right_centroids.append((r_center, y_center))\n for level in range(1, int(image.shape[0] / window_height)):\n y_center = int(y_base - level * window_height)\n image_layer = np.sum(image[int(image.shape[0] - (level + 1) *\n window_height):int(image.shape[0] - level * window_height), :],\n axis=0)\n conv_signal = np.convolve(window, image_layer)\n offset = window_width / 2\n l_min_index = int(max(l_center + offset - margin, 0))\n l_max_index = int(min(l_center + offset + margin, image.shape[1]))\n l_max = np.argmax(conv_signal[l_min_index:l_max_index])\n if l_max > 50:\n left_centroids.append((l_center, y_center))\n l_center = l_max + l_min_index - offset\n r_min_index = int(max(r_center + offset - margin, 0))\n r_max_index = int(min(r_center + offset + margin, image.shape[1]))\n r_max = np.argmax(conv_signal[r_min_index:r_max_index])\n if r_max > 50:\n right_centroids.append((r_center, y_center))\n r_center = r_max + r_min_index - offset\n return left_centroids, right_centroids\n\n\ndef draw_window_boxes(img, l_points, r_points, window_width, window_height):\n if len(l_points) > 0:\n for p in l_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (255, 0, 0), -1)\n if len(r_points) > 0:\n for p in r_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] +\n window_height), (0, 255, 0), -1)\n return img\n\n\ndef draw_window_centroids(warped, window_centroids, window_width=50,\n window_height=80):\n if len(window_centroids) > 0:\n l_points = np.zeros_like(warped)\n r_points = np.zeros_like(warped)\n for level in range(0, len(window_centroids)):\n l_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][0], level)\n r_mask = window_mask(window_width, window_height, warped,\n window_centroids[level][1], level)\n l_points[(l_points == 255) | (l_mask == 1)] = 255\n r_points[(r_points == 255) | (r_mask == 1)] = 255\n zero_channel = np.zeros_like(l_points)\n template = np.array(cv2.merge((l_points, r_points, zero_channel)),\n np.uint8)\n warpage = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0)\n else:\n output = np.array(cv2.merge((warped, warped, warped)), np.uint8)\n return output\n\n\ndef draw_text(img, text, origin):\n cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, \n 255), thickness=2)\n\n\ndef pipeline_image(img, save_images=None, save_suffix='.jpg'):\n if save_images:\n print('begin pipeline_image', save_suffix)\n undistorted = undistort(img)\n if save_images:\n save_output_image(undistorted, 'undistorted' + save_suffix)\n binary = color_threshold(undistorted)\n if save_images:\n save_output_image(binary, 'binary' + save_suffix, cmap='gray')\n img_size = binary.shape[::-1]\n src = np.float32([[img_size[0] / 2 - 55, img_size[1] / 2 + 100], [\n img_size[0] / 6 - 10, img_size[1]], [img_size[0] * 5 / 6 + 60,\n img_size[1]], [img_size[0] / 2 + 55, img_size[1] / 2 + 100]])\n dst = np.float32([[img_size[0] / 4, 0], [img_size[0] / 4, img_size[1]],\n [img_size[0] * 3 / 4, img_size[1]], [img_size[0] * 3 / 4, 0]])\n if save_images:\n cv2.polylines(img, np.int32([src]), True, (255, 0, 0), thickness=3)\n save_output_image(img, 'polygon' + save_suffix)\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)\n if save_images:\n save_output_image(warped, 'warped' + save_suffix, cmap='gray')\n window_width = 40\n window_height = 60\n l_points, r_points = find_lr_window_centroids(warped, window_width,\n window_height, 100)\n global last_l_points, last_r_points\n if len(l_points) < 5 and len(last_l_points) > 0:\n l_points = last_l_points\n else:\n last_l_points = l_points\n l_points = np.array(l_points, dtype=np.int32)\n l_poly = np.polyfit(l_points[:, 1], l_points[:, 0], 2)\n if len(r_points) < 5 and len(last_r_points) > 0:\n r_points = last_r_points\n else:\n last_r_points = r_points\n r_points = np.array(r_points, dtype=np.int32)\n r_poly = np.polyfit(r_points[:, 1], r_points[:, 0], 2)\n yval = np.arange(0, warped.shape[0])\n l_xval = np.polyval(l_poly, yval)\n r_xval = np.polyval(r_poly, yval)\n if save_images:\n lanes = warped * 255\n lanes = np.array(cv2.merge((lanes, lanes, lanes)), np.uint8)\n lanes = draw_window_boxes(lanes, l_points, r_points, window_width,\n window_height)\n for p in l_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for p in r_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255, 0, 255), -1)\n for x, y in zip(l_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (255, 255, 0), -1)\n for x, y in zip(r_xval, yval):\n cv2.circle(lanes, (int(x), y), 5, (0, 255, 255), -1)\n save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')\n ym_per_pix = 30 / 720\n xm_per_pix = 3.7 / 700\n lane_center_offset_m = (warped.shape[1] / 2 - (l_xval[-1] + r_xval[-1]) / 2\n ) * xm_per_pix\n direction = 'Left'\n if lane_center_offset_m > 0:\n direction = 'Right'\n y_eval = np.max(yval)\n left_fit_cr = np.polyfit(l_points[:, 1] * ym_per_pix, l_points[:, 0] *\n xm_per_pix, 2)\n right_fit_cr = np.polyfit(r_points[:, 1] * ym_per_pix, r_points[:, 0] *\n xm_per_pix, 2)\n left_curverad = (1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix +\n left_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * left_fit_cr[0])\n right_curverad = (1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix +\n right_fit_cr[1]) ** 2) ** 1.5 / np.absolute(2 * right_fit_cr[0])\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n pts_left = np.array([np.transpose(np.vstack([l_xval, yval]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])\n pts = np.hstack((pts_left, pts_right))\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.\n INTER_LINEAR)\n draw_text(undistorted, 'Radius: {:.1f}m {:.1f}m'.format(left_curverad,\n right_curverad), (50, 50))\n draw_text(undistorted, '{:.3f}m {} of Center'.format(abs(\n lane_center_offset_m), direction), (50, 100))\n output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)\n if save_images:\n save_output_image(output, 'output' + save_suffix)\n return output\n\n\ndef process_test_images():\n filenames = glob('test_images/*.jpg')\n for f in filenames:\n img = load_image(f)\n img_out = pipeline_image(img, True, '-' + f.split('/')[-1])\n\n\ndef process_video(in_file, out_file):\n clip = VideoFileClip(in_file)\n video_clip = clip.fl_image(pipeline_image)\n video_clip.write_videofile(out_file, audio=False)\n\n\ndef show_before_after(before, after, cmap=None):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n ax1.imshow(before)\n ax1.set_title('Before')\n ax2.imshow(after, cmap=cmap)\n ax2.set_title('After')\n plt.show()\n\n\ndef show_images(imgs, titles):\n fig, axes = plt.subplots(3, 6, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n for ax, img, title in zip(axes.flat, imgs, titles):\n ax.imshow(img)\n ax.set_title(title)\n plt.show()\n\n\nlast_l_points = []\nlast_r_points = []\nmtx, dist = calibrate_camera()\nprocess_test_images()\nprocess_video('project_video.mp4', 'output.mp4')\nprocess_video('challenge_video.mp4', 'challenge_output.mp4')\nprocess_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4')\n", "step-5": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom glob import glob\nfrom moviepy.editor import VideoFileClip\n\noutput_images_dir = './output_images/'\ntest_images_dir = './test_images/'\noutput_video_file = 'output.mp4'\n\nmtx = None\ndist = None\n\ndef load_image(filename):\n return mpimg.imread(filename)\n\ndef calibrate_camera(rows=6, cols=9):\n mtx = None\n dist = None\n\n save_file = 'calibration.npz'\n try:\n data = np.load(save_file)\n mtx = data['mtx']\n dist = data['dist']\n print('using saved calibration')\n except FileNotFoundError:\n print('begin calibration')\n filenames = glob('camera_cal/*.jpg')\n\n objpoints = [] # 3D points in real world space\n imgpoints = [] # 2D points in image plane\n\n #Prepare object points, like (0,0,0), (1,0,0)...\n objp = np.zeros((rows*cols,3), np.float32)\n objp[:,:2] = np.mgrid[0:cols,0:rows].T.reshape(-1,2) # x, y coordinates\n\n for f in filenames:\n img = load_image(f)\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n ret, corners = cv2.findChessboardCorners(gray, (cols,rows), None)\n\n if ret:\n imgpoints.append(corners)\n objpoints.append(objp)\n\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\n\n if ret:\n for f in filenames:\n img = load_image(f)\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n save_output_image(undist, 'undistorted-' + f.split('/')[-1])\n\n print('end calibration')\n np.savez(save_file, mtx=mtx, dist=dist)\n\n return mtx, dist\n\ndef save_output_image(img, filename, cmap=None):\n mpimg.imsave(output_images_dir + filename, img, cmap=cmap)\n\ndef undistort(img):\n return cv2.undistort(img, mtx, dist, None, mtx)\n\ndef abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n if orient == 'x':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))\n if orient == 'y':\n abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))\n scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))\n grad_binary = np.zeros_like(scaled_sobel)\n grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1\n\n return grad_binary\n\ndef color_threshold(img):\n #R = img[:,:,0]\n #G = img[:,:,1]\n #B = img[:,:,2]\n\n #binary = np.zeros_like(R)\n #binary[(R > 200) & (G > 160) & ((B < 100) | (B > 200))] = 1\n\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n H = hls[:,:,0]\n L = hls[:,:,1]\n S = hls[:,:,2]\n\n binary = np.zeros_like(H)\n binary[(((H > 15) & (H < 24) & (S > 90) & (L > 50)) | (L > 220))] = 1\n\n return binary\n\ndef window_mask(width, height, img_ref, center,level):\n output = np.zeros_like(img_ref)\n output[int(img_ref.shape[0]-(level+1)*height):int(img_ref.shape[0]-level*height),max(0,int(center-width/2)):min(int(center+width/2),img_ref.shape[1])] = 1\n return output\n\ndef find_lr_window_centroids(image, window_width, window_height, margin):\n #window_centroids = [] # Store the (left,right) window centroid positions per level\n window = np.ones(window_width) # Create our window template that we will use for convolutions\n\n left_centroids = []\n right_centroids = []\n\n # First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice\n # and then np.convolve the vertical image slice with the window template \n\n # Sum quarter bottom of image to get slice, could use a different ratio\n l_sum = np.sum(image[int(3*image.shape[0]/4):,:int(image.shape[1]/2)], axis=0)\n l_center = np.argmax(np.convolve(window,l_sum))-window_width/2\n r_sum = np.sum(image[int(3*image.shape[0]/4):,int(image.shape[1]/2):], axis=0)\n r_center = np.argmax(np.convolve(window,r_sum))-window_width/2+int(image.shape[1]/2)\n\n y_base = int(image.shape[0] - window_height/2)\n\n # Add what we found for the first layer\n y_center = y_base\n left_centroids.append((l_center, y_center))\n right_centroids.append((r_center, y_center))\n\n # Go through each layer looking for max pixel locations\n for level in range(1,(int)(image.shape[0]/window_height)):\n y_center = int(y_base - (level * window_height))\n\n # convolve the window into the vertical slice of the image\n image_layer = np.sum(image[int(image.shape[0]-(level+1)*window_height):int(image.shape[0]-level*window_height),:], axis=0)\n conv_signal = np.convolve(window, image_layer)\n # Find the best left centroid by using past left center as a reference\n # Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window\n offset = window_width/2\n l_min_index = int(max(l_center+offset-margin,0))\n l_max_index = int(min(l_center+offset+margin,image.shape[1]))\n l_max = np.argmax(conv_signal[l_min_index:l_max_index])\n if l_max > 50:\n left_centroids.append((l_center, y_center))\n l_center = l_max+l_min_index-offset\n # Find the best right centroid by using past right center as a reference\n r_min_index = int(max(r_center+offset-margin,0))\n r_max_index = int(min(r_center+offset+margin,image.shape[1]))\n r_max = np.argmax(conv_signal[r_min_index:r_max_index])\n if r_max > 50:\n right_centroids.append((r_center, y_center))\n r_center = r_max+r_min_index-offset\n\n return left_centroids, right_centroids\n\ndef draw_window_boxes(img, l_points, r_points, window_width, window_height):\n if len(l_points) > 0:\n for p in l_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (255,0,0), -1)\n\n if len(r_points) > 0:\n for p in r_points:\n cv2.rectangle(img, (p[0], p[1]), (p[0] + window_width, p[1] + window_height), (0,255,0), -1)\n\n return img\n\ndef draw_window_centroids(warped, window_centroids, window_width = 50, window_height = 80):\n if len(window_centroids) > 0:\n\n # Points used to draw all the left and right windows\n l_points = np.zeros_like(warped)\n r_points = np.zeros_like(warped)\n\n # Go through each level and draw the windows \n for level in range(0,len(window_centroids)):\n # Window_mask is a function to draw window areas\n l_mask = window_mask(window_width,window_height,warped,window_centroids[level][0],level)\n r_mask = window_mask(window_width,window_height,warped,window_centroids[level][1],level)\n # Add graphic points from window mask here to total pixels found \n l_points[(l_points == 255) | ((l_mask == 1) ) ] = 255\n r_points[(r_points == 255) | ((r_mask == 1) ) ] = 255\n\n # Draw the results\n #template = np.array(r_points+l_points,np.uint8) # add both left and right window pixels together\n zero_channel = np.zeros_like(l_points) # create a zero color channle \n template = np.array(cv2.merge((l_points,r_points,zero_channel)),np.uint8) # make window pixels green\n warpage = np.array(cv2.merge((warped,warped,warped)),np.uint8) # making the original road pixels 3 color channels\n output = cv2.addWeighted(warpage, 0.5, template, 0.5, 0.0) # overlay the orignal road image with window results\n\n # If no window centers found, just display orginal road image\n else:\n output = np.array(cv2.merge((warped,warped,warped)),np.uint8)\n\n return output\n\ndef draw_text(img, text, origin):\n cv2.putText(img, text, origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), thickness=2)\n\ndef pipeline_image(img, save_images=None, save_suffix='.jpg'):\n if save_images:\n print('begin pipeline_image', save_suffix)\n\n undistorted = undistort(img)\n if save_images:\n save_output_image(undistorted, 'undistorted' + save_suffix)\n\n #binary = abs_sobel_thresh(undistorted, orient='x', sobel_kernel=15, thresh=(20,100))\n binary = color_threshold(undistorted)\n if save_images:\n save_output_image(binary, 'binary' + save_suffix, cmap='gray')\n\n img_size = binary.shape[::-1]\n\n src = np.float32(\n [[(img_size[0] / 2) - 55, img_size[1] / 2 + 100],\n [((img_size[0] / 6) - 10), img_size[1]],\n [(img_size[0] * 5 / 6) + 60, img_size[1]],\n [(img_size[0] / 2 + 55), img_size[1] / 2 + 100]])\n dst = np.float32(\n [[(img_size[0] / 4), 0],\n [(img_size[0] / 4), img_size[1]],\n [(img_size[0] * 3 / 4), img_size[1]],\n [(img_size[0] * 3 / 4), 0]])\n\n if save_images:\n cv2.polylines(img, np.int32([src]), True, (255,0,0), thickness=3)\n save_output_image(img, 'polygon' + save_suffix)\n\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n warped = cv2.warpPerspective(binary, M, img_size, flags=cv2.INTER_LINEAR)\n\n if save_images:\n save_output_image(warped, 'warped' + save_suffix, cmap='gray')\n\n window_width = 40\n window_height = 60\n\n #identified lane-line pixels and fit their positions with a polynomial\n l_points, r_points = find_lr_window_centroids(warped, window_width, window_height, 100)\n global last_l_points, last_r_points\n if len(l_points) < 5 and len(last_l_points) > 0:\n #print(\"less than 4 l_points:\", len(r_points))\n # use the previous points\n l_points = last_l_points\n else:\n last_l_points = l_points\n l_points = np.array(l_points, dtype=np.int32)\n l_poly = np.polyfit(l_points[:,1], l_points[:,0], 2)\n\n if len(r_points) < 5 and len(last_r_points) > 0:\n #print(\"less than 4 r_points:\", len(r_points))\n r_points = last_r_points\n else:\n last_r_points = r_points\n r_points = np.array(r_points, dtype=np.int32)\n r_poly = np.polyfit(r_points[:,1], r_points[:,0], 2)\n\n yval = np.arange(0, warped.shape[0])\n l_xval = np.polyval(l_poly, yval)\n r_xval = np.polyval(r_poly, yval)\n\n if save_images:\n lanes = warped*255\n lanes = np.array(cv2.merge((lanes,lanes,lanes)),np.uint8) # make window pixels green\n lanes = draw_window_boxes(lanes, l_points, r_points, window_width, window_height)\n\n for p in l_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)\n for p in r_points:\n cv2.circle(lanes, (p[0], p[1]), 10, (255,0,255), -1)\n\n for x,y in zip(l_xval, yval):\n cv2.circle(lanes, (int(x),y), 5, (255,255,0), -1)\n for x,y in zip(r_xval, yval):\n cv2.circle(lanes, (int(x),y), 5, (0,255,255), -1)\n\n save_output_image(lanes, 'lanes' + save_suffix, cmap='gray')\n\n # Define conversions in x and y from pixels space to meters\n ym_per_pix = 30/720 # meters per pixel in y dimension\n xm_per_pix = 3.7/700 # meters per pixel in x dimension\n\n #calculated the position of the vehicle with respect to center\n lane_center_offset_m = (warped.shape[1]/2 - (l_xval[-1] + r_xval[-1])/2) * xm_per_pix\n direction = 'Left'\n if lane_center_offset_m > 0:\n direction = 'Right'\n\n #calculated the radius of curvature of the lane\n y_eval = np.max(yval)\n # Fit new polynomials to x,y in world space\n left_fit_cr = np.polyfit(l_points[:,1]*ym_per_pix, l_points[:,0]*xm_per_pix, 2)\n right_fit_cr = np.polyfit(r_points[:,1]*ym_per_pix, r_points[:,0]*xm_per_pix, 2)\n # Calculate the new radii of curvature\n left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])\n right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])\n # Now our radius of curvature is in meters\n\n #Provide an example image of your result plotted back down onto the road such that the lane area is identified clearly\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([l_xval , yval]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([r_xval, yval])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))\n\n unwarp = cv2.warpPerspective(color_warp, Minv, img_size, flags=cv2.INTER_LINEAR)\n draw_text(undistorted, \"Radius: {:.1f}m {:.1f}m\".format(left_curverad, right_curverad), (50, 50))\n draw_text(undistorted, \"{:.3f}m {} of Center\".format(abs(lane_center_offset_m), direction), (50, 100))\n output = cv2.addWeighted(undistorted, 1, unwarp, 0.4, 0)\n if save_images:\n save_output_image(output, 'output' + save_suffix)\n\n return output\n\ndef process_test_images():\n filenames = glob('test_images/*.jpg')\n #filenames = ['test_images/test2.jpg']\n for f in filenames:\n img = load_image(f)\n img_out = pipeline_image(img, True, '-' + f.split('/')[-1])\n #show_before_after(img, img_out, 'gray')\n\ndef process_video(in_file, out_file):\n clip = VideoFileClip(in_file)\n video_clip = clip.fl_image(pipeline_image)\n video_clip.write_videofile(out_file, audio=False)\n\ndef show_before_after(before, after, cmap=None):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n ax1.imshow(before)\n ax1.set_title('Before')\n ax2.imshow(after, cmap=cmap)\n ax2.set_title('After')\n plt.show()\n\ndef show_images(imgs, titles):\n fig, axes = plt.subplots(3, 6, figsize=(12, 6))\n fig.subplots_adjust(hspace=0.5, wspace=0.5)\n\n for ax, img, title in zip(axes.flat, imgs, titles):\n ax.imshow(img)\n ax.set_title(title)\n\n plt.show()\n\n\nlast_l_points = []\nlast_r_points = []\n\nmtx, dist = calibrate_camera()\nprocess_test_images()\nprocess_video('project_video.mp4', 'output.mp4')\nprocess_video('challenge_video.mp4', 'challenge_output.mp4')\nprocess_video('harder_challenge_video.mp4', 'harder_challenge_output.mp4')\n\n", "step-ids": [ 14, 17, 18, 19, 20 ] }
[ 14, 17, 18, 19, 20 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print(int(h7 * i)) <|reserved_special_token_1|> g7 = int(input()) h7 = g7 / 2 i = g7 - 1 print(int(h7 * i))
flexible
{ "blob_id": "abb08956f55fd1e8af27ce12fa94a4137d7d908e", "index": 7251, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(int(h7 * i))\n", "step-3": "g7 = int(input())\nh7 = g7 / 2\ni = g7 - 1\nprint(int(h7 * i))\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
'''import pyttsx3 #engine = pyttsx3.init() #Conficuração das vozes #voices = engine.getProperty('voices') #engine.setProperty('voice', voices[2].id) engine=pyttsx3.init() voices=engine.getProperty('voices') engine.setProperty('voice',voices[3].id) #Falar texto engine.say('Olá meu nome é Jarvis. Sou uma inteligência artificial') engine.runAndWait() #print(voices) #Printa na tela todas as vozes disponíveis''' '''for voice in voices: print("Voice: %s" % voice.name) print(" - ID: %s" % voice.id) print(" - Languages: %s" % voice.languages) print(" - Gender: %s" % voice.gender) print(" - Age: %s" % voice.age) print("\n")'''
normal
{ "blob_id": "d9bf58dc76d4e8d7146fac3bb2bdfb538ebf78a5", "index": 7102, "step-1": "<mask token>\n", "step-2": "'''import pyttsx3\n\n#engine = pyttsx3.init()\n\n#Conficuração das vozes\n#voices = engine.getProperty('voices')\n#engine.setProperty('voice', voices[2].id)\n\nengine=pyttsx3.init()\n\nvoices=engine.getProperty('voices')\nengine.setProperty('voice',voices[3].id)\n\n#Falar texto\nengine.say('Olá meu nome é Jarvis. Sou uma inteligência artificial')\nengine.runAndWait()\n#print(voices)\n\n#Printa na tela todas as vozes disponíveis'''\n'''for voice in voices:\n print(\"Voice: %s\" % voice.name)\n print(\" - ID: %s\" % voice.id)\n print(\" - Languages: %s\" % voice.languages)\n print(\" - Gender: %s\" % voice.gender)\n print(\" - Age: %s\" % voice.age)\n print(\"\\n\")'''\n\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print('#1 map') <|reserved_special_token_0|> print(new_list) print('\n#2 reduce') <|reserved_special_token_0|> print(reduce_data) <|reserved_special_token_1|> <|reserved_special_token_0|> print('#1 map') a_list = [2, 18, 9, 22, 17, 24, 8, 12, 27] map_data = map(lambda x: x * 2 + 1, a_list) new_list = list(map_data) print(new_list) print('\n#2 reduce') b_list = [1, 2, 3, 4, 5] reduce_data = reduce(lambda x, y: x + y, b_list) print(reduce_data) <|reserved_special_token_1|> <|reserved_special_token_0|> from functools import reduce print('#1 map') a_list = [2, 18, 9, 22, 17, 24, 8, 12, 27] map_data = map(lambda x: x * 2 + 1, a_list) new_list = list(map_data) print(new_list) print('\n#2 reduce') b_list = [1, 2, 3, 4, 5] reduce_data = reduce(lambda x, y: x + y, b_list) print(reduce_data) <|reserved_special_token_1|> """ File: ex17_map_reduce.py Author: TonyDeep Date: 2020-07-21 """ from functools import reduce print('#1 map') a_list = [2, 18, 9, 22, 17, 24, 8, 12, 27] map_data = map(lambda x: x * 2 + 1, a_list) new_list = list(map_data) print(new_list) print('\n#2 reduce') b_list = [1, 2, 3, 4, 5] reduce_data = reduce(lambda x, y: x + y, b_list) print(reduce_data)
flexible
{ "blob_id": "8e3b26826752b6b3482e8a29b9b58f5025c7ef58", "index": 4758, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('#1 map')\n<mask token>\nprint(new_list)\nprint('\\n#2 reduce')\n<mask token>\nprint(reduce_data)\n", "step-3": "<mask token>\nprint('#1 map')\na_list = [2, 18, 9, 22, 17, 24, 8, 12, 27]\nmap_data = map(lambda x: x * 2 + 1, a_list)\nnew_list = list(map_data)\nprint(new_list)\nprint('\\n#2 reduce')\nb_list = [1, 2, 3, 4, 5]\nreduce_data = reduce(lambda x, y: x + y, b_list)\nprint(reduce_data)\n", "step-4": "<mask token>\nfrom functools import reduce\nprint('#1 map')\na_list = [2, 18, 9, 22, 17, 24, 8, 12, 27]\nmap_data = map(lambda x: x * 2 + 1, a_list)\nnew_list = list(map_data)\nprint(new_list)\nprint('\\n#2 reduce')\nb_list = [1, 2, 3, 4, 5]\nreduce_data = reduce(lambda x, y: x + y, b_list)\nprint(reduce_data)\n", "step-5": "\"\"\"\nFile: ex17_map_reduce.py\nAuthor: TonyDeep\nDate: 2020-07-21\n\"\"\"\n\nfrom functools import reduce\n\nprint('#1 map')\na_list = [2, 18, 9, 22, 17, 24, 8, 12, 27]\nmap_data = map(lambda x: x * 2 + 1, a_list)\nnew_list = list(map_data)\nprint(new_list)\n\nprint('\\n#2 reduce')\nb_list = [1, 2, 3, 4, 5]\nreduce_data = reduce(lambda x, y: x + y, b_list)\nprint(reduce_data)\n\n\n\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import tkinter as tk import tkinter.messagebox as tkmb import psutil import os import re import subprocess from subprocess import Popen, PIPE, STDOUT, DEVNULL import filecmp import re import time import threading import datetime import re debian = '/etc/debian_version' redhat = '/etc/redhat-release' def PrintaLog(texto): t = time.time() logtime= time.ctime(t) stringprint = "%s %s\n" % (logtime, texto) f = open("/var/log/patriot", "a") f.write(stringprint) f.flush() f.close() def PrintaMSG(texto): command = 'python3 alertiqt.py "'+texto+'"' processalert = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL) def TestIntegrity(File): if os.path.exists(redhat) : command = 'rpm -Vf "'+File+'"' processrpm = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True) outputrpm = processrpm.communicate()[0] if outputrpm : return(1) else: return(0) else : commandDPKG = 'dpkg -S "'+File+'"' processdpkg = subprocess.Popen([commandDPKG], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL) outputdpkg = processdpkg.communicate()[0] if processdpkg.returncode == 1: #dpkg is buggy to find package files fixdpkgbug= re.sub('/usr', '', File) commandDPKG2 = 'dpkg -S "'+fixdpkgbug+'"' processdpkg2 = subprocess.Popen([commandDPKG2], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL) outputdpkg2 = processdpkg2.communicate()[0] outputdpkg = outputdpkg2 if processdpkg2.returncode == 1: return(1) packagename = outputdpkg.split(":") commandDEBSUM = 'dpkg --verify "'+packagename[0]+'"' processdebsum = subprocess.Popen([commandDEBSUM], stdout=subprocess.PIPE,shell=True) outputdebsum = processdebsum.communicate()[0] print (outputdebsum) if outputdebsum : return(1) else: return(0) def ScanUnsigned(): pidsinicial = psutil.pids() while True: pidsshots = psutil.pids() s = set(pidsinicial) newpids = [x for x in pidsshots if x not in s] if newpids: #print(newpids) for i in newpids: #print(i) try: p = psutil.Process(pid=i) with p.oneshot(): integrity= TestIntegrity(p.exe()) #print (integrity) pidproceso = p.pid exeproceso = p.exe() evadeau = bool(re.match(exeproceso, "/usr/sbin/ausearch")) if integrity == 1 and evadeau == 0: stringprint = "New process that not belongs to any package or package was modified: %i %s" % (pidproceso, exeproceso) x = threading.Thread(target=PrintaMSG, args=(stringprint,)) x.setDaemon(True) x.start() PrintaLog(stringprint) except Exception as e: print (e) pidsinicial = pidsshots time.sleep(2) def ScanConnections(): initialcon =psutil.net_connections() netprocess =[] for i in initialcon: #print (i.pid) p = psutil.Process(pid=i.pid) with p.oneshot(): #print (p.exe()) netprocess.append(p.exe()) #print (netprocess) while True: runcon =psutil.net_connections() netprocessrun =[] for e in runcon: #print (e.pid) p = psutil.Process(pid=e.pid) with p.oneshot(): #print (p.exe()) netprocessrun.append(p.exe()) #print (netprocessrun) s = set(netprocess) newpconprogs = [x for x in netprocessrun if x not in s] if newpconprogs: #print(newpconprogs) for h in newpconprogs: stringprint = "New Process initiating TCP/IP connection: %s" % h x = threading.Thread(target=PrintaMSG, args=(stringprint,)) x.setDaemon(True) x.start() PrintaLog(stringprint) netprocess.append(h) time.sleep(2) def AuSearch(): auparams = {"modules": "New module loaded in Kernel","code_injection": "DLL Inject","register_injection": "DLL Inject"} while True: tomo = datetime.datetime.now() - datetime.timedelta(minutes=2) timeraw = str(tomo.time().replace(second=0, microsecond=0)) for key in auparams.keys(): #print(key) command = 'ausearch -k "'+key+'" --start "'+timeraw+'"' processausearch = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL) outputausearch = processausearch.communicate()[0] if outputausearch: stringprint = "Audit Alert: %s" % auparams[key] x = threading.Thread(target=PrintaMSG, args=(stringprint,)) x.setDaemon(True) x.start() PrintaLog(stringprint) time.sleep(115) def KeyBoardSearch(): command = "xinput --list" keyfirstcommand = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True) outputkeysearch= keyfirstcommand.communicate()[0] while True: keyruncommand = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True) outputkeyrunsearch= keyruncommand.communicate()[0] if outputkeyrunsearch != outputkeysearch: stringprint = "New keyboard detected" x = threading.Thread(target=PrintaMSG, args=(stringprint,)) x.setDaemon(True) x.start() PrintaLog(stringprint) outputkeysearch = outputkeyrunsearch time.sleep(60) s = threading.Thread(target=KeyBoardSearch) s.setDaemon(True) s.start() x = threading.Thread(target=ScanUnsigned) x.setDaemon(True) x.start() y = threading.Thread(target=ScanConnections) y.setDaemon(True) y.start() z = threading.Thread(target=AuSearch) z.setDaemon(True) z.start() while True: time.sleep(100)
normal
{ "blob_id": "fde62dd3f5ee3cc0a1568b037ada14835c327046", "index": 6298, "step-1": "<mask token>\n\n\ndef PrintaLog(texto):\n t = time.time()\n logtime = time.ctime(t)\n stringprint = '%s %s\\n' % (logtime, texto)\n f = open('/var/log/patriot', 'a')\n f.write(stringprint)\n f.flush()\n f.close()\n\n\ndef PrintaMSG(texto):\n command = 'python3 alertiqt.py \"' + texto + '\"'\n processalert = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True, stderr=DEVNULL)\n\n\n<mask token>\n\n\ndef ScanConnections():\n initialcon = psutil.net_connections()\n netprocess = []\n for i in initialcon:\n p = psutil.Process(pid=i.pid)\n with p.oneshot():\n netprocess.append(p.exe())\n while True:\n runcon = psutil.net_connections()\n netprocessrun = []\n for e in runcon:\n p = psutil.Process(pid=e.pid)\n with p.oneshot():\n netprocessrun.append(p.exe())\n s = set(netprocess)\n newpconprogs = [x for x in netprocessrun if x not in s]\n if newpconprogs:\n for h in newpconprogs:\n stringprint = (\n 'New Process initiating TCP/IP connection: %s' % h)\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n netprocess.append(h)\n time.sleep(2)\n\n\ndef AuSearch():\n auparams = {'modules': 'New module loaded in Kernel', 'code_injection':\n 'DLL Inject', 'register_injection': 'DLL Inject'}\n while True:\n tomo = datetime.datetime.now() - datetime.timedelta(minutes=2)\n timeraw = str(tomo.time().replace(second=0, microsecond=0))\n for key in auparams.keys():\n command = 'ausearch -k \"' + key + '\" --start \"' + timeraw + '\"'\n processausearch = subprocess.Popen([command], stdout=subprocess\n .PIPE, shell=True, stderr=DEVNULL)\n outputausearch = processausearch.communicate()[0]\n if outputausearch:\n stringprint = 'Audit Alert: %s' % auparams[key]\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n time.sleep(115)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef PrintaLog(texto):\n t = time.time()\n logtime = time.ctime(t)\n stringprint = '%s %s\\n' % (logtime, texto)\n f = open('/var/log/patriot', 'a')\n f.write(stringprint)\n f.flush()\n f.close()\n\n\ndef PrintaMSG(texto):\n command = 'python3 alertiqt.py \"' + texto + '\"'\n processalert = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True, stderr=DEVNULL)\n\n\n<mask token>\n\n\ndef ScanUnsigned():\n pidsinicial = psutil.pids()\n while True:\n pidsshots = psutil.pids()\n s = set(pidsinicial)\n newpids = [x for x in pidsshots if x not in s]\n if newpids:\n for i in newpids:\n try:\n p = psutil.Process(pid=i)\n with p.oneshot():\n integrity = TestIntegrity(p.exe())\n pidproceso = p.pid\n exeproceso = p.exe()\n evadeau = bool(re.match(exeproceso,\n '/usr/sbin/ausearch'))\n if integrity == 1 and evadeau == 0:\n stringprint = (\n 'New process that not belongs to any package or package was modified: %i %s'\n % (pidproceso, exeproceso))\n x = threading.Thread(target=PrintaMSG, args=(\n stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n except Exception as e:\n print(e)\n pidsinicial = pidsshots\n time.sleep(2)\n\n\ndef ScanConnections():\n initialcon = psutil.net_connections()\n netprocess = []\n for i in initialcon:\n p = psutil.Process(pid=i.pid)\n with p.oneshot():\n netprocess.append(p.exe())\n while True:\n runcon = psutil.net_connections()\n netprocessrun = []\n for e in runcon:\n p = psutil.Process(pid=e.pid)\n with p.oneshot():\n netprocessrun.append(p.exe())\n s = set(netprocess)\n newpconprogs = [x for x in netprocessrun if x not in s]\n if newpconprogs:\n for h in newpconprogs:\n stringprint = (\n 'New Process initiating TCP/IP connection: %s' % h)\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n netprocess.append(h)\n time.sleep(2)\n\n\ndef AuSearch():\n auparams = {'modules': 'New module loaded in Kernel', 'code_injection':\n 'DLL Inject', 'register_injection': 'DLL Inject'}\n while True:\n tomo = datetime.datetime.now() - datetime.timedelta(minutes=2)\n timeraw = str(tomo.time().replace(second=0, microsecond=0))\n for key in auparams.keys():\n command = 'ausearch -k \"' + key + '\" --start \"' + timeraw + '\"'\n processausearch = subprocess.Popen([command], stdout=subprocess\n .PIPE, shell=True, stderr=DEVNULL)\n outputausearch = processausearch.communicate()[0]\n if outputausearch:\n stringprint = 'Audit Alert: %s' % auparams[key]\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n time.sleep(115)\n\n\ndef KeyBoardSearch():\n command = 'xinput --list'\n keyfirstcommand = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputkeysearch = keyfirstcommand.communicate()[0]\n while True:\n keyruncommand = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputkeyrunsearch = keyruncommand.communicate()[0]\n if outputkeyrunsearch != outputkeysearch:\n stringprint = 'New keyboard detected'\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n outputkeysearch = outputkeyrunsearch\n time.sleep(60)\n\n\n<mask token>\n", "step-3": "<mask token>\ndebian = '/etc/debian_version'\nredhat = '/etc/redhat-release'\n\n\ndef PrintaLog(texto):\n t = time.time()\n logtime = time.ctime(t)\n stringprint = '%s %s\\n' % (logtime, texto)\n f = open('/var/log/patriot', 'a')\n f.write(stringprint)\n f.flush()\n f.close()\n\n\ndef PrintaMSG(texto):\n command = 'python3 alertiqt.py \"' + texto + '\"'\n processalert = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True, stderr=DEVNULL)\n\n\ndef TestIntegrity(File):\n if os.path.exists(redhat):\n command = 'rpm -Vf \"' + File + '\"'\n processrpm = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputrpm = processrpm.communicate()[0]\n if outputrpm:\n return 1\n else:\n return 0\n else:\n commandDPKG = 'dpkg -S \"' + File + '\"'\n processdpkg = subprocess.Popen([commandDPKG], stdout=subprocess.\n PIPE, shell=True, stderr=DEVNULL)\n outputdpkg = processdpkg.communicate()[0]\n if processdpkg.returncode == 1:\n fixdpkgbug = re.sub('/usr', '', File)\n commandDPKG2 = 'dpkg -S \"' + fixdpkgbug + '\"'\n processdpkg2 = subprocess.Popen([commandDPKG2], stdout=\n subprocess.PIPE, shell=True, stderr=DEVNULL)\n outputdpkg2 = processdpkg2.communicate()[0]\n outputdpkg = outputdpkg2\n if processdpkg2.returncode == 1:\n return 1\n packagename = outputdpkg.split(':')\n commandDEBSUM = 'dpkg --verify \"' + packagename[0] + '\"'\n processdebsum = subprocess.Popen([commandDEBSUM], stdout=subprocess\n .PIPE, shell=True)\n outputdebsum = processdebsum.communicate()[0]\n print(outputdebsum)\n if outputdebsum:\n return 1\n else:\n return 0\n\n\ndef ScanUnsigned():\n pidsinicial = psutil.pids()\n while True:\n pidsshots = psutil.pids()\n s = set(pidsinicial)\n newpids = [x for x in pidsshots if x not in s]\n if newpids:\n for i in newpids:\n try:\n p = psutil.Process(pid=i)\n with p.oneshot():\n integrity = TestIntegrity(p.exe())\n pidproceso = p.pid\n exeproceso = p.exe()\n evadeau = bool(re.match(exeproceso,\n '/usr/sbin/ausearch'))\n if integrity == 1 and evadeau == 0:\n stringprint = (\n 'New process that not belongs to any package or package was modified: %i %s'\n % (pidproceso, exeproceso))\n x = threading.Thread(target=PrintaMSG, args=(\n stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n except Exception as e:\n print(e)\n pidsinicial = pidsshots\n time.sleep(2)\n\n\ndef ScanConnections():\n initialcon = psutil.net_connections()\n netprocess = []\n for i in initialcon:\n p = psutil.Process(pid=i.pid)\n with p.oneshot():\n netprocess.append(p.exe())\n while True:\n runcon = psutil.net_connections()\n netprocessrun = []\n for e in runcon:\n p = psutil.Process(pid=e.pid)\n with p.oneshot():\n netprocessrun.append(p.exe())\n s = set(netprocess)\n newpconprogs = [x for x in netprocessrun if x not in s]\n if newpconprogs:\n for h in newpconprogs:\n stringprint = (\n 'New Process initiating TCP/IP connection: %s' % h)\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n netprocess.append(h)\n time.sleep(2)\n\n\ndef AuSearch():\n auparams = {'modules': 'New module loaded in Kernel', 'code_injection':\n 'DLL Inject', 'register_injection': 'DLL Inject'}\n while True:\n tomo = datetime.datetime.now() - datetime.timedelta(minutes=2)\n timeraw = str(tomo.time().replace(second=0, microsecond=0))\n for key in auparams.keys():\n command = 'ausearch -k \"' + key + '\" --start \"' + timeraw + '\"'\n processausearch = subprocess.Popen([command], stdout=subprocess\n .PIPE, shell=True, stderr=DEVNULL)\n outputausearch = processausearch.communicate()[0]\n if outputausearch:\n stringprint = 'Audit Alert: %s' % auparams[key]\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n time.sleep(115)\n\n\ndef KeyBoardSearch():\n command = 'xinput --list'\n keyfirstcommand = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputkeysearch = keyfirstcommand.communicate()[0]\n while True:\n keyruncommand = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputkeyrunsearch = keyruncommand.communicate()[0]\n if outputkeyrunsearch != outputkeysearch:\n stringprint = 'New keyboard detected'\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n outputkeysearch = outputkeyrunsearch\n time.sleep(60)\n\n\ns = threading.Thread(target=KeyBoardSearch)\ns.setDaemon(True)\ns.start()\nx = threading.Thread(target=ScanUnsigned)\nx.setDaemon(True)\nx.start()\ny = threading.Thread(target=ScanConnections)\ny.setDaemon(True)\ny.start()\nz = threading.Thread(target=AuSearch)\nz.setDaemon(True)\nz.start()\nwhile True:\n time.sleep(100)\n", "step-4": "import tkinter as tk\nimport tkinter.messagebox as tkmb\nimport psutil\nimport os\nimport re\nimport subprocess\nfrom subprocess import Popen, PIPE, STDOUT, DEVNULL\nimport filecmp\nimport re\nimport time\nimport threading\nimport datetime\nimport re\ndebian = '/etc/debian_version'\nredhat = '/etc/redhat-release'\n\n\ndef PrintaLog(texto):\n t = time.time()\n logtime = time.ctime(t)\n stringprint = '%s %s\\n' % (logtime, texto)\n f = open('/var/log/patriot', 'a')\n f.write(stringprint)\n f.flush()\n f.close()\n\n\ndef PrintaMSG(texto):\n command = 'python3 alertiqt.py \"' + texto + '\"'\n processalert = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True, stderr=DEVNULL)\n\n\ndef TestIntegrity(File):\n if os.path.exists(redhat):\n command = 'rpm -Vf \"' + File + '\"'\n processrpm = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputrpm = processrpm.communicate()[0]\n if outputrpm:\n return 1\n else:\n return 0\n else:\n commandDPKG = 'dpkg -S \"' + File + '\"'\n processdpkg = subprocess.Popen([commandDPKG], stdout=subprocess.\n PIPE, shell=True, stderr=DEVNULL)\n outputdpkg = processdpkg.communicate()[0]\n if processdpkg.returncode == 1:\n fixdpkgbug = re.sub('/usr', '', File)\n commandDPKG2 = 'dpkg -S \"' + fixdpkgbug + '\"'\n processdpkg2 = subprocess.Popen([commandDPKG2], stdout=\n subprocess.PIPE, shell=True, stderr=DEVNULL)\n outputdpkg2 = processdpkg2.communicate()[0]\n outputdpkg = outputdpkg2\n if processdpkg2.returncode == 1:\n return 1\n packagename = outputdpkg.split(':')\n commandDEBSUM = 'dpkg --verify \"' + packagename[0] + '\"'\n processdebsum = subprocess.Popen([commandDEBSUM], stdout=subprocess\n .PIPE, shell=True)\n outputdebsum = processdebsum.communicate()[0]\n print(outputdebsum)\n if outputdebsum:\n return 1\n else:\n return 0\n\n\ndef ScanUnsigned():\n pidsinicial = psutil.pids()\n while True:\n pidsshots = psutil.pids()\n s = set(pidsinicial)\n newpids = [x for x in pidsshots if x not in s]\n if newpids:\n for i in newpids:\n try:\n p = psutil.Process(pid=i)\n with p.oneshot():\n integrity = TestIntegrity(p.exe())\n pidproceso = p.pid\n exeproceso = p.exe()\n evadeau = bool(re.match(exeproceso,\n '/usr/sbin/ausearch'))\n if integrity == 1 and evadeau == 0:\n stringprint = (\n 'New process that not belongs to any package or package was modified: %i %s'\n % (pidproceso, exeproceso))\n x = threading.Thread(target=PrintaMSG, args=(\n stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n except Exception as e:\n print(e)\n pidsinicial = pidsshots\n time.sleep(2)\n\n\ndef ScanConnections():\n initialcon = psutil.net_connections()\n netprocess = []\n for i in initialcon:\n p = psutil.Process(pid=i.pid)\n with p.oneshot():\n netprocess.append(p.exe())\n while True:\n runcon = psutil.net_connections()\n netprocessrun = []\n for e in runcon:\n p = psutil.Process(pid=e.pid)\n with p.oneshot():\n netprocessrun.append(p.exe())\n s = set(netprocess)\n newpconprogs = [x for x in netprocessrun if x not in s]\n if newpconprogs:\n for h in newpconprogs:\n stringprint = (\n 'New Process initiating TCP/IP connection: %s' % h)\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n netprocess.append(h)\n time.sleep(2)\n\n\ndef AuSearch():\n auparams = {'modules': 'New module loaded in Kernel', 'code_injection':\n 'DLL Inject', 'register_injection': 'DLL Inject'}\n while True:\n tomo = datetime.datetime.now() - datetime.timedelta(minutes=2)\n timeraw = str(tomo.time().replace(second=0, microsecond=0))\n for key in auparams.keys():\n command = 'ausearch -k \"' + key + '\" --start \"' + timeraw + '\"'\n processausearch = subprocess.Popen([command], stdout=subprocess\n .PIPE, shell=True, stderr=DEVNULL)\n outputausearch = processausearch.communicate()[0]\n if outputausearch:\n stringprint = 'Audit Alert: %s' % auparams[key]\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n time.sleep(115)\n\n\ndef KeyBoardSearch():\n command = 'xinput --list'\n keyfirstcommand = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputkeysearch = keyfirstcommand.communicate()[0]\n while True:\n keyruncommand = subprocess.Popen([command], stdout=subprocess.PIPE,\n shell=True)\n outputkeyrunsearch = keyruncommand.communicate()[0]\n if outputkeyrunsearch != outputkeysearch:\n stringprint = 'New keyboard detected'\n x = threading.Thread(target=PrintaMSG, args=(stringprint,))\n x.setDaemon(True)\n x.start()\n PrintaLog(stringprint)\n outputkeysearch = outputkeyrunsearch\n time.sleep(60)\n\n\ns = threading.Thread(target=KeyBoardSearch)\ns.setDaemon(True)\ns.start()\nx = threading.Thread(target=ScanUnsigned)\nx.setDaemon(True)\nx.start()\ny = threading.Thread(target=ScanConnections)\ny.setDaemon(True)\ny.start()\nz = threading.Thread(target=AuSearch)\nz.setDaemon(True)\nz.start()\nwhile True:\n time.sleep(100)\n", "step-5": "import tkinter as tk\nimport tkinter.messagebox as tkmb\nimport psutil\nimport os\nimport re\nimport subprocess\nfrom subprocess import Popen, PIPE, STDOUT, DEVNULL\nimport filecmp\nimport re\nimport time\nimport threading\nimport datetime\nimport re\n\ndebian = '/etc/debian_version'\nredhat = '/etc/redhat-release'\n\ndef PrintaLog(texto):\n\t\n\tt = time.time()\n\tlogtime= time.ctime(t)\n\t\n\tstringprint = \"%s %s\\n\" % (logtime, texto)\n\t\n\tf = open(\"/var/log/patriot\", \"a\")\n\tf.write(stringprint)\n\tf.flush()\n\tf.close()\n\ndef PrintaMSG(texto):\n\n\tcommand = 'python3 alertiqt.py \"'+texto+'\"' \n\t\t\t\t\t\n\tprocessalert = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)\n\ndef TestIntegrity(File):\n\t\n\tif os.path.exists(redhat) : \n\t\n\t\tcommand = 'rpm -Vf \"'+File+'\"' \n\t\t\t\t\t\n\t\tprocessrpm = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True)\n\t\toutputrpm = processrpm.communicate()[0]\n\t\t\t\t\t\n\t\tif outputrpm :\n\t\t\t\t\t\t\n\t\t\treturn(1)\n\t\t\t\t\t\t\t\t\n\t\t\n\t\telse:\n\t\t\t\n\t\t\treturn(0)\n\n\telse :\t\n\t\t\n\t\tcommandDPKG = 'dpkg -S \"'+File+'\"'\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\tprocessdpkg = subprocess.Popen([commandDPKG], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)\n\t\toutputdpkg = processdpkg.communicate()[0]\n\t\t\t\t\t\t\n\t\tif processdpkg.returncode == 1:\n\t\t\t\t\t\t\t\n\t\t\t#dpkg is buggy to find package files \n\t\t\t\t\t\t\t\n\t\t\tfixdpkgbug= re.sub('/usr', '', File)\n\t\t\t\t\t\t\t\n\t\t\tcommandDPKG2 = 'dpkg -S \"'+fixdpkgbug+'\"'\n\t\t\t\t\t\t\n\t\t\tprocessdpkg2 = subprocess.Popen([commandDPKG2], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)\n\t\t\toutputdpkg2 = processdpkg2.communicate()[0]\n\t\t\t\t\t\t\t\n\t\t\toutputdpkg = outputdpkg2\n\t\t\t\t\t\t\t\n\t\t\tif processdpkg2.returncode == 1:\n\t\t\t\t\t\t\t\n\t\t\t\treturn(1)\n\t\t\t\t\t\t\t\t\n\t\t\t\t\n\t\tpackagename = outputdpkg.split(\":\")\n\t\t\t\t\t\t\n\t\tcommandDEBSUM = 'dpkg --verify \"'+packagename[0]+'\"'\n\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\tprocessdebsum = subprocess.Popen([commandDEBSUM], stdout=subprocess.PIPE,shell=True)\n\t\toutputdebsum = processdebsum.communicate()[0]\n\t\t\n\t\tprint (outputdebsum)\n\t\t\t\t\t\t\n\t\tif outputdebsum :\n\t\t\t\n\t\t\treturn(1)\n\t\t\t\t\t\t\n\t\telse:\n\t\t\treturn(0)\n\n\ndef ScanUnsigned():\n\t\n\tpidsinicial = psutil.pids()\n\n\twhile True:\n\t\n\t\tpidsshots = psutil.pids()\n\t\n\t\ts = set(pidsinicial)\n\t\tnewpids = [x for x in pidsshots if x not in s]\n\t\n\t\tif newpids:\n\t\n\t\t\t#print(newpids)\n\t\t\n\t\t\tfor i in newpids:\n\t\t\t\n\t\t\t\t#print(i)\n\t\t\t\ttry:\n\t\t\t\t\tp = psutil.Process(pid=i)\n\t\t\t\t\twith p.oneshot():\n\t\t\t\n\t\t\t\t\t\tintegrity= TestIntegrity(p.exe())\n\t\t\t\n\t\t\t\t\t\t#print (integrity)\n\t\t\t\t\t\t\n\t\t\t\t\t\tpidproceso = p.pid\n\t\t\t\t\t\texeproceso = p.exe()\n\t\t\t\t\t\t\n\t\t\t\t\t\tevadeau = bool(re.match(exeproceso, \"/usr/sbin/ausearch\"))\n\t\t\t\t\t\t\n\t\t\t\t\t\tif integrity == 1 and evadeau == 0:\n\t\t\t\t\t\t\n\t\t\t\t\t\t\tstringprint = \"New process that not belongs to any package or package was modified: %i %s\" % (pidproceso, exeproceso)\n\t\t\t\t\t\t\n\t\t\t\t\t\t\tx = threading.Thread(target=PrintaMSG, args=(stringprint,))\n\t\t\t\t\t\t\tx.setDaemon(True)\n\t\t\t\t\t\t\tx.start()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tPrintaLog(stringprint)\n\t\t\t\t\t\t\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint (e)\n\t\n\t\tpidsinicial = pidsshots\n\t\n\t\ttime.sleep(2)\n\t\t\n\ndef ScanConnections():\n\t\n\tinitialcon =psutil.net_connections()\n\n\tnetprocess =[]\n\n\tfor i in initialcon:\n\t\n\t\t#print (i.pid)\n\t\n\t\tp = psutil.Process(pid=i.pid)\n\t\n\t\twith p.oneshot():\n\t\t\n\t\t\t#print (p.exe())\n\t\t\n\t\t\tnetprocess.append(p.exe())\n\t\t\n\t#print (netprocess)\n\t\n\twhile True:\n\t\t\n\t\truncon =psutil.net_connections()\n\n\t\tnetprocessrun =[]\n\n\t\tfor e in runcon:\n\t\n\t\t\t#print (e.pid)\n\t\n\t\t\tp = psutil.Process(pid=e.pid)\n\t\n\t\t\twith p.oneshot():\n\t\t\n\t\t\t\t#print (p.exe())\n\t\t\n\t\t\t\tnetprocessrun.append(p.exe())\n\t\t\n\t\t#print (netprocessrun)\n\t\t\n\t\ts = set(netprocess)\n\t\tnewpconprogs = [x for x in netprocessrun if x not in s]\n\t\t\n\t\tif newpconprogs:\n\t\n\t\t\t#print(newpconprogs)\n\t\t\n\t\t\tfor h in newpconprogs:\n\t\t\t\t\n\t\t\t\tstringprint = \"New Process initiating TCP/IP connection: %s\" % h\n\t\t\t\t\t\t\n\t\t\t\tx = threading.Thread(target=PrintaMSG, args=(stringprint,))\n\t\t\t\tx.setDaemon(True)\n\t\t\t\tx.start()\n\t\t\t\t\n\t\t\t\tPrintaLog(stringprint)\n\t\t\t\t\n\t\t\t\tnetprocess.append(h)\n\t\t\n\t\t\t\t\n\t\ttime.sleep(2)\n\ndef AuSearch():\n\t\n\tauparams = {\"modules\": \"New module loaded in Kernel\",\"code_injection\": \"DLL Inject\",\"register_injection\": \"DLL Inject\"}\n\t\n\twhile True:\n\t\n\t\ttomo = datetime.datetime.now() - datetime.timedelta(minutes=2)\n\n\t\ttimeraw = str(tomo.time().replace(second=0, microsecond=0))\n\n\t\tfor key in auparams.keys():\n\t\t\t#print(key)\n\t\n\t\t\tcommand = 'ausearch -k \"'+key+'\" --start \"'+timeraw+'\"' \n\t\t\t\t\t\n\t\t\tprocessausearch = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True, stderr=DEVNULL)\n\t\t\toutputausearch = processausearch.communicate()[0]\n\t\n\t\t\tif outputausearch:\n\t\t\t\n\t\t\t\tstringprint = \"Audit Alert: %s\" % auparams[key]\n\t\t\t\t\t\t\n\t\t\t\tx = threading.Thread(target=PrintaMSG, args=(stringprint,))\n\t\t\t\tx.setDaemon(True)\n\t\t\t\tx.start()\n\t\t\t\n\t\t\t\tPrintaLog(stringprint)\n\t\n\t\ttime.sleep(115)\n\ndef KeyBoardSearch():\n\t\n\tcommand = \"xinput --list\" \n\t\n\tkeyfirstcommand = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True)\n\toutputkeysearch= keyfirstcommand.communicate()[0]\n\t\n\twhile True:\n\t\t\n\t\tkeyruncommand = subprocess.Popen([command], stdout=subprocess.PIPE,shell=True)\n\t\toutputkeyrunsearch= keyruncommand.communicate()[0]\n\t\t\n\t\tif outputkeyrunsearch != outputkeysearch:\n\t\t\t\n\t\t\tstringprint = \"New keyboard detected\"\n\t\t\t\n\t\t\tx = threading.Thread(target=PrintaMSG, args=(stringprint,))\n\t\t\tx.setDaemon(True)\n\t\t\tx.start()\n\t\t\t\n\t\t\tPrintaLog(stringprint)\n\t\t\t\n\t\t\toutputkeysearch = outputkeyrunsearch\n\t\t\t\n\t\ttime.sleep(60)\n\t\t\t\n\t\ns = threading.Thread(target=KeyBoardSearch)\ns.setDaemon(True)\ns.start()\t\n\nx = threading.Thread(target=ScanUnsigned)\nx.setDaemon(True)\nx.start()\n\ny = threading.Thread(target=ScanConnections)\ny.setDaemon(True)\ny.start()\n\nz = threading.Thread(target=AuSearch)\nz.setDaemon(True)\nz.start()\n\nwhile True:\n\t\n\ttime.sleep(100)\n", "step-ids": [ 4, 6, 9, 10, 11 ] }
[ 4, 6, 9, 10, 11 ]
def merge(items, temp, low, mid, high): i = low j = mid + 1 for k in range(low, high+1): if i > mid: # 왼쪽 리스트의 순회를 마쳤음 # 남은 오른쪽 리스트의 원소들은 모두 왼쪽 리스트 원소보다 작음 temp[k] = items[j] # 뒤에 나머지는 정렬되어있으니 그대로 넣기 j += 1 elif j > high: # 오른쪽 리스트의 순회를 마쳤음 # 남은 왼쪽 리스트 원소들은 모두 오른쪽 리스트 원소보다 작음 temp[k] = items[i] # 앞의 나머지는 정렬되어있으니 그대로 넣기 i += 1 elif items[j] < items[i]: # 왼쪽 리스트의 원소가 더 큰 경우 # 오른쪽 리스트의 원소를 정렬리스트에 넣을거임 temp[k] = items[j] j += 1 # 오른쪽 리스트 다음 원소를 비교해보자 else: # 왼쪽 리스트의 원소가 더 작거나 같은 경우 # 왼쪽 리스트의 원소를 정렬리스트에 넣을거임 temp[k] = items[i] i += 1 # 왼쪽 리스트 다음 원소를 비교해라 for k in range(low, high+1): items[k] = temp[k] # 이제 정렬해놓은거 원래 리스트로 복사해라 def merge_sort(items, temp, low, high): if high <= low: return None # 다 정렬했으면 이제 끝내라 mid = low + (high - low)//2 # low, high, mid 는 값이 아니라 index 값임 merge_sort(items, temp, low, mid) merge_sort(items, temp, mid+1, high) merge(items, temp, low, mid, high) if __name__ == '__main__': items = [5,4,3,3,5,6,4,4,3,2] temp = [None]*len(items) print('정렬 전 : \t', end ='') print(items) merge_sort(items, temp, 0, len(items)-1) print('정렬 전 : \t', end='') print(items)
normal
{ "blob_id": "9ab119b32ceac370b744658e5fa679292609373a", "index": 2517, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef merge_sort(items, temp, low, high):\n if high <= low:\n return None\n mid = low + (high - low) // 2\n merge_sort(items, temp, low, mid)\n merge_sort(items, temp, mid + 1, high)\n merge(items, temp, low, mid, high)\n\n\n<mask token>\n", "step-3": "def merge(items, temp, low, mid, high):\n i = low\n j = mid + 1\n for k in range(low, high + 1):\n if i > mid:\n temp[k] = items[j]\n j += 1\n elif j > high:\n temp[k] = items[i]\n i += 1\n elif items[j] < items[i]:\n temp[k] = items[j]\n j += 1\n else:\n temp[k] = items[i]\n i += 1\n for k in range(low, high + 1):\n items[k] = temp[k]\n\n\ndef merge_sort(items, temp, low, high):\n if high <= low:\n return None\n mid = low + (high - low) // 2\n merge_sort(items, temp, low, mid)\n merge_sort(items, temp, mid + 1, high)\n merge(items, temp, low, mid, high)\n\n\n<mask token>\n", "step-4": "def merge(items, temp, low, mid, high):\n i = low\n j = mid + 1\n for k in range(low, high + 1):\n if i > mid:\n temp[k] = items[j]\n j += 1\n elif j > high:\n temp[k] = items[i]\n i += 1\n elif items[j] < items[i]:\n temp[k] = items[j]\n j += 1\n else:\n temp[k] = items[i]\n i += 1\n for k in range(low, high + 1):\n items[k] = temp[k]\n\n\ndef merge_sort(items, temp, low, high):\n if high <= low:\n return None\n mid = low + (high - low) // 2\n merge_sort(items, temp, low, mid)\n merge_sort(items, temp, mid + 1, high)\n merge(items, temp, low, mid, high)\n\n\nif __name__ == '__main__':\n items = [5, 4, 3, 3, 5, 6, 4, 4, 3, 2]\n temp = [None] * len(items)\n print('정렬 전 : \\t', end='')\n print(items)\n merge_sort(items, temp, 0, len(items) - 1)\n print('정렬 전 : \\t', end='')\n print(items)\n", "step-5": "def merge(items, temp, low, mid, high):\n i = low\n j = mid + 1\n for k in range(low, high+1):\n if i > mid:\n # 왼쪽 리스트의 순회를 마쳤음\n # 남은 오른쪽 리스트의 원소들은 모두 왼쪽 리스트 원소보다 작음\n temp[k] = items[j]\n # 뒤에 나머지는 정렬되어있으니 그대로 넣기\n j += 1\n elif j > high:\n # 오른쪽 리스트의 순회를 마쳤음\n # 남은 왼쪽 리스트 원소들은 모두 오른쪽 리스트 원소보다 작음\n temp[k] = items[i]\n # 앞의 나머지는 정렬되어있으니 그대로 넣기\n i += 1\n elif items[j] < items[i]:\n # 왼쪽 리스트의 원소가 더 큰 경우\n # 오른쪽 리스트의 원소를 정렬리스트에 넣을거임\n temp[k] = items[j]\n j += 1\n # 오른쪽 리스트 다음 원소를 비교해보자\n else:\n # 왼쪽 리스트의 원소가 더 작거나 같은 경우\n # 왼쪽 리스트의 원소를 정렬리스트에 넣을거임\n temp[k] = items[i]\n i += 1\n # 왼쪽 리스트 다음 원소를 비교해라\n for k in range(low, high+1):\n items[k] = temp[k]\n # 이제 정렬해놓은거 원래 리스트로 복사해라\n\ndef merge_sort(items, temp, low, high):\n if high <= low:\n return None\n # 다 정렬했으면 이제 끝내라\n mid = low + (high - low)//2\n # low, high, mid 는 값이 아니라 index 값임\n merge_sort(items, temp, low, mid)\n merge_sort(items, temp, mid+1, high)\n merge(items, temp, low, mid, high)\n\nif __name__ == '__main__':\n items = [5,4,3,3,5,6,4,4,3,2]\n temp = [None]*len(items)\n print('정렬 전 : \\t', end ='')\n print(items)\n merge_sort(items, temp, 0, len(items)-1)\n print('정렬 전 : \\t', end='')\n print(items)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class Movie: def __init__(self, id: int): self.actors = set() self.name = '' self.id = id self.year = 0 def getName(self): return self.name def getActors(self): return self.actors def getId(self): return self.id def getDate(self): return self.year <|reserved_special_token_0|> def updateActors(self, actors_to_add: set()): for x in actors_to_add: self.actors.add(x) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Actor: def __init__(self, name: str, id: int): self.filmography = set() self.name = name self.id = id <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> class Movie: def __init__(self, id: int): self.actors = set() self.name = '' self.id = id self.year = 0 def getName(self): return self.name def getActors(self): return self.actors def getId(self): return self.id def getDate(self): return self.year def updateActors(self, actor: Actor): self.actors.add(actor) def updateActors(self, actors_to_add: set()): for x in actors_to_add: self.actors.add(x) def setDate(self, i: int): self.year = i <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Actor: def __init__(self, name: str, id: int): self.filmography = set() self.name = name self.id = id def getFilms(self): return self.filmography def getName(self): return self.name def getId(self): return self.id def updateFilms(self, film: int): self.filmography.add(film) class Movie: def __init__(self, id: int): self.actors = set() self.name = '' self.id = id self.year = 0 def getName(self): return self.name def getActors(self): return self.actors def getId(self): return self.id def getDate(self): return self.year def updateActors(self, actor: Actor): self.actors.add(actor) def updateActors(self, actors_to_add: set()): for x in actors_to_add: self.actors.add(x) def setDate(self, i: int): self.year = i <|reserved_special_token_0|> def community_analysis(): f = open('./community/communities_outputs.txt', 'w') communities_generator = nx.community.girvan_newman(graph) communities = next(communities_generator) size = len(communities) while size < 10: print(communities) communities = next(communities_generator) size = len(communities) f.write('community iteration: size = {}, {} \n'.format(size, communities)) def link_pred(): splPG = dict(nx.all_pairs_shortest_path_length(PG, cutoff=2)) friends_PG = list() for x in splPG.keys(): for y in splPG[x].keys(): if splPG[x][y] == 2: l = list() l.append(x) l.append(y) friends_PG.append(l) predictions = nx.jaccard_coefficient(PG, friends_PG) results = list() for x in predictions: results.append(x) results.sort(key=lambda x: x[2]) results.reverse() k_vals = [10, 20, 50, 100] for k in k_vals: f = open('./link_pred/link_prediction_values_jaccard' + str(k) + '.txt', 'w') count = 0 while count < k: print('({}, {}),jaccard: {}'.format(all_actors_id_map[results[ count][0]].getName(), all_actors_id_map[results[count][1]]. getName(), results[count][2])) f.write('({}, {}),jaccard: {}\n'.format(all_actors_id_map[ results[count][0]].getName(), all_actors_id_map[results[ count][1]].getName(), results[count][2])) count += 1 top_k = list() precision_at_k = 0 for x in range(k): top_k.append(results[x]) count = 0 for val in top_k: tup = val[0], val[1] if tup in edges: count += 1 precision_at_k = count / k print('precision @ K{}: {}\n'.format(k, precision_at_k)) f.write('precision @ K{}: {}'.format(k, precision_at_k)) f.close() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Actor: def __init__(self, name: str, id: int): self.filmography = set() self.name = name self.id = id def getFilms(self): return self.filmography def getName(self): return self.name def getId(self): return self.id def updateFilms(self, film: int): self.filmography.add(film) class Movie: def __init__(self, id: int): self.actors = set() self.name = '' self.id = id self.year = 0 def getName(self): return self.name def getActors(self): return self.actors def getId(self): return self.id def getDate(self): return self.year def updateActors(self, actor: Actor): self.actors.add(actor) def updateActors(self, actors_to_add: set()): for x in actors_to_add: self.actors.add(x) def setDate(self, i: int): self.year = i <|reserved_special_token_0|> def clean(threshold: int): for actorName in all_actors.keys(): if len(all_actors[actorName].getFilms()) > threshold: cleaned_actors.add(all_actors[actorName]) else: for movie in all_actors[actorName].getFilms(): if all_actors[actorName] in movie.getActors(): movie.getActors().remove(all_actors[actorName]) def clean_movies(threshold: int): for movie in cleaned_movies_1: if 2017 - movie.getDate() <= threshold: cleaned_movies.add(movie) else: for actor in movie.getActors(): s = actor.getFilms() s.remove(movie) def createGraph(): counter = 0 G = nx.Graph() PG_actors = set() for actor in cleaned_actors: G.add_node(actor.getId()) for movie in cleaned_movies: actorIds = set() for actor in movie.getActors(): actorIds.add(actor.getId()) combinations = itertools.combinations(actorIds, 2) for comb in combinations: reverse = comb[::-1] if comb not in edges and reverse not in edges: counter += 1 if 2017 - movie.getDate() < 60 and 2017 - movie.getDate() > 20: if (comb not in edges_last_60_20 and reverse not in edges_last_60_20): edges_last_60_20.add(comb) edges.add(comb) weights[comb] = 1 elif comb in edges: weights[comb] = weights[comb] + 1 elif reverse in edges: weights[reverse] = weights[reverse] + 1 G.add_edges_from(edges) for x in edges_last_60_20: if x[0] not in PG_actors: PG_actors.add(x[0]) if x[1] not in PG_actors: PG_actors.add(x[1]) PG.add_nodes_from(PG_actors) PG.add_edges_from(edges_last_60_20) return G def centrality_analysis(): types = [nx.eigenvector_centrality, nx.harmonic_centrality, nx. degree_centrality] for x in types: file = open('./centrality/40_10/centrality_results_' + x.__name__ + '.txt', 'w') nodes = x(graph) top_10 = list() top_10_ids = list() sorted_values = list(nodes.values()) sorted_values.sort() sorted_values.reverse() top_10 = sorted_values[0] for x in nodes.keys(): if nodes[x] == top_10: top_10_ids.append(x) file.write(str(len(top_10_ids)) + '\n') for x in top_10_ids: for y in cleaned_actors: if x == y.getId(): print(y.getName()) file.close() def community_analysis(): f = open('./community/communities_outputs.txt', 'w') communities_generator = nx.community.girvan_newman(graph) communities = next(communities_generator) size = len(communities) while size < 10: print(communities) communities = next(communities_generator) size = len(communities) f.write('community iteration: size = {}, {} \n'.format(size, communities)) def link_pred(): splPG = dict(nx.all_pairs_shortest_path_length(PG, cutoff=2)) friends_PG = list() for x in splPG.keys(): for y in splPG[x].keys(): if splPG[x][y] == 2: l = list() l.append(x) l.append(y) friends_PG.append(l) predictions = nx.jaccard_coefficient(PG, friends_PG) results = list() for x in predictions: results.append(x) results.sort(key=lambda x: x[2]) results.reverse() k_vals = [10, 20, 50, 100] for k in k_vals: f = open('./link_pred/link_prediction_values_jaccard' + str(k) + '.txt', 'w') count = 0 while count < k: print('({}, {}),jaccard: {}'.format(all_actors_id_map[results[ count][0]].getName(), all_actors_id_map[results[count][1]]. getName(), results[count][2])) f.write('({}, {}),jaccard: {}\n'.format(all_actors_id_map[ results[count][0]].getName(), all_actors_id_map[results[ count][1]].getName(), results[count][2])) count += 1 top_k = list() precision_at_k = 0 for x in range(k): top_k.append(results[x]) count = 0 for val in top_k: tup = val[0], val[1] if tup in edges: count += 1 precision_at_k = count / k print('precision @ K{}: {}\n'.format(k, precision_at_k)) f.write('precision @ K{}: {}'.format(k, precision_at_k)) f.close() def convert_id_actor(): file = open('./community_/communities_outputs.txt') for row in file: items = row.split(', ') i = 0 while i < len(items): items[i].strip('\n') items[i] = int(items[i]) i += 1 i = 0 this_row = list() i = 0 while i < len(items): this_row.append(items[i]) i += 1 comm.append(this_row) file.close() file = open('./actorname_communities.txt', 'w') for x in range(len(comm)): for y in range(len(comm[x])): try: comm[x][y] = all_actors_id_map[comm[x][y]].getName() except: comm[x][y] = 'None' comm.reverse() for x in range(len(comm)): print('Community #{}: {}'.format(x, comm[x])) file.write('Community #{}: {}\n'.format(x, comm[x])) file.flush() file.close() <|reserved_special_token_0|> <|reserved_special_token_1|> import csv import json import re import itertools import pandas as pd import networkx as nx import matplotlib.pyplot as plt from networkx.algorithms import community import snap import numpy # setting up data structures to map actor IDs to objects in order to increase run time. csv.field_size_limit(100000000) curr_actor_id = 1 all_actors = dict() all_actors_id_map = dict() all_actors_frequencies = dict() edges = set() weights = dict() movies = list() movies_dict = dict() edges_last_60_20 = set() comm = list() PG = nx.Graph() class Actor: def __init__(self, name: str, id:int): self.filmography = set() self.name = name self.id = id def getFilms(self): return self.filmography def getName(self): return self.name def getId(self): return self.id def updateFilms(self, film:int): self.filmography.add(film) class Movie: def __init__(self, id: int): self.actors = set() self.name = "" self.id = id self.year = 0 def getName(self): return self.name def getActors(self): return self.actors def getId(self): return self.id def getDate(self): return self.year def updateActors(self, actor:Actor): self.actors.add(actor) def updateActors(self, actors_to_add:set()): for x in actors_to_add: self.actors.add(x) def setDate(self, i: int): self.year = i #parsing data from csv and dropping crew column reader = pd.read_csv('credits.csv', header = 0) crewless = reader.drop('crew', axis = 1) cleanup = re.compile('[^a-zA-Z\s]') #skip the header row row = crewless.iterrows() #loop through each row for x in range(len(reader.index)): cur_row = next(row) data = cur_row[1][0] id = cur_row[1][1] actors = set() #create an instance of a Movie for each row movie = Movie(int(id)) movies.append(movie) movies_dict[id] = movie #split the string around each name split_around_names = data.split('name') #parse actors, and create an instance of Actor for each actor in each movie for y in range(1, len(split_around_names)): #Cleaning up characters and spaces around the actor's name actorName = str(split_around_names[y].split('order')[0]) actorName = cleanup.sub(' ', actorName) actorName = actorName.strip() #Create the Actor and update his/her filmography if actorName not in all_actors.keys(): a = Actor(actorName, curr_actor_id) curr_actor_id += 1 a.updateFilms(movie) actors.add(a) all_actors[actorName] = a all_actors_frequencies[a] = 1 all_actors_id_map[curr_actor_id] = a else: all_actors[actorName].updateFilms(movie) all_actors_frequencies[a] += 1 actors.add(all_actors[actorName]) #Update the set of actors per movie movie.updateActors(actors) reader = pd.read_csv('movies_metadata.csv', header = 0) reader.drop(reader.columns.difference(['id', 'release_date']), 1, inplace=True) row = reader.iterrows() cleaned_actors = set() cleaned_movies_1 = set() cleaned_movies = set() # adding ids to movies from movie files for x in range(len(reader.index)): cur_row = next(row) id = cur_row[1][0] date = cur_row[1][1] id = int(id) year = date[:4] year_int = int(year) if id in movies_dict.keys(): movies_dict[id].setDate(year_int) cleaned_movies_1.add(movies_dict[id]) def clean(threshold: int): for actorName in all_actors.keys(): if len(all_actors[actorName].getFilms()) > threshold: cleaned_actors.add(all_actors[actorName]) else: for movie in all_actors[actorName].getFilms(): if all_actors[actorName] in movie.getActors(): movie.getActors().remove(all_actors[actorName]) def clean_movies(threshold: int): for movie in cleaned_movies_1: if 2017 - movie.getDate() <= threshold: cleaned_movies.add(movie) else: for actor in movie.getActors(): s = actor.getFilms() s.remove(movie) def createGraph(): counter = 0 G = nx.Graph() PG_actors = set() #fill graph with nodes for actor in cleaned_actors: G.add_node(actor.getId()) #generate a list of edges and weights based on frequencie of combination appearances for movie in cleaned_movies: actorIds = set() for actor in movie.getActors(): actorIds.add(actor.getId()) combinations = itertools.combinations(actorIds, 2) for comb in combinations: reverse = comb[::-1] if (comb not in edges) and (reverse not in edges): counter+=1 if (2017 - movie.getDate() < 60 and 2017 - movie.getDate() > 20): if (comb not in edges_last_60_20) and (reverse not in edges_last_60_20): edges_last_60_20.add(comb) edges.add(comb) weights[comb] = 1 else: if comb in edges: weights[comb] = weights[comb] + 1 elif reverse in edges: weights[reverse] = weights[reverse] + 1 G.add_edges_from(edges) for x in edges_last_60_20: if x[0] not in PG_actors: PG_actors.add(x[0]) if x[1] not in PG_actors: PG_actors.add(x[1]) PG.add_nodes_from(PG_actors) PG.add_edges_from(edges_last_60_20) return G def centrality_analysis(): types = [nx.eigenvector_centrality, nx.harmonic_centrality, nx.degree_centrality] for x in types: # based upon cleaning values chosen, choose a directory to store results to. file = open('./centrality/40_10/centrality_results_'+x.__name__+'.txt', 'w') nodes = x(graph) top_10 = list() top_10_ids = list() sorted_values = list(nodes.values()) sorted_values.sort() sorted_values.reverse() top_10 = sorted_values[0] # print(sorted_values) # for y in top_10: for x in nodes.keys(): if nodes[x] == top_10: top_10_ids.append(x) file.write(str(len(top_10_ids)) + '\n') for x in top_10_ids: for y in cleaned_actors: if x == y.getId(): print(y.getName()) #file.write(y.getName() + '\n') file.close() def community_analysis(): f = open('./community/communities_outputs.txt', 'w') communities_generator = nx.community.girvan_newman(graph) communities = next(communities_generator) size = len(communities) while size < 10: print(communities) communities = next(communities_generator) size = len(communities) f.write('community iteration: size = {}, {} \n'.format(size, communities)) def link_pred(): splPG = dict(nx.all_pairs_shortest_path_length(PG, cutoff=2)) friends_PG = list() for x in splPG.keys(): for y in splPG[x].keys(): if splPG[x][y] == 2: l = list() l.append(x) l.append(y) friends_PG.append(l) predictions = nx.jaccard_coefficient(PG, friends_PG) results = list() for x in predictions: results.append(x) results.sort(key=lambda x: x[2]) results.reverse() k_vals = [10,20,50,100] for k in k_vals: f = open('./link_pred/link_prediction_values_jaccard' + str(k) + '.txt', 'w') count = 0 while (count < k): print('({}, {}),jaccard: {}'.format(all_actors_id_map[results[count][0]].getName(), all_actors_id_map[results[count][1]].getName(), results[count][2])) f.write('({}, {}),jaccard: {}\n'.format(all_actors_id_map[results[count][0]].getName(),all_actors_id_map[results[count][1]].getName(),results[count][2])) count+=1 top_k = list() precision_at_k = 0 for x in range(k): top_k.append(results[x]) count = 0 for val in top_k: tup = (val[0], val[1]) if tup in edges: count += 1 precision_at_k = count / k print('precision @ K{}: {}\n'.format(k, precision_at_k)) f.write('precision @ K{}: {}'.format(k, precision_at_k)) f.close() #Convert community results from IDs to Actor name def convert_id_actor(): file = open('./community_/communities_outputs.txt') for row in file: items = row.split(', ') i = 0 while i < len(items): items[i].strip('\n') items[i] = int(items[i]) i+=1 i = 0 this_row = list() i= 0 while i < len(items): this_row.append(items[i]) i+=1 comm.append(this_row) file.close() file = open('./actorname_communities.txt', 'w') for x in range(len(comm)): for y in range(len(comm[x])): try: comm[x][y] = all_actors_id_map[comm[x][y]].getName() except: comm[x][y] = 'None' comm.reverse() for x in range(len(comm)): print("Community #{}: {}".format(x, comm[x])) file.write("Community #{}: {}\n".format(x, comm[x])) file.flush() file.close() clean_movies(60) clean(30) graph = createGraph() print(nx.info(graph)) print(nx.info(PG)) # To perform the analysis, uncomment the respective function(s); additionally, uncomment #convert_id_actor() for community_analysis. # centrality_analysis() # community_analysis() # convert_id_actor() # link_pred()
flexible
{ "blob_id": "0934163fc6461e30a73c06e74b3a5e983ed2fa02", "index": 4211, "step-1": "<mask token>\n\n\nclass Movie:\n\n def __init__(self, id: int):\n self.actors = set()\n self.name = ''\n self.id = id\n self.year = 0\n\n def getName(self):\n return self.name\n\n def getActors(self):\n return self.actors\n\n def getId(self):\n return self.id\n\n def getDate(self):\n return self.year\n <mask token>\n\n def updateActors(self, actors_to_add: set()):\n for x in actors_to_add:\n self.actors.add(x)\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Actor:\n\n def __init__(self, name: str, id: int):\n self.filmography = set()\n self.name = name\n self.id = id\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Movie:\n\n def __init__(self, id: int):\n self.actors = set()\n self.name = ''\n self.id = id\n self.year = 0\n\n def getName(self):\n return self.name\n\n def getActors(self):\n return self.actors\n\n def getId(self):\n return self.id\n\n def getDate(self):\n return self.year\n\n def updateActors(self, actor: Actor):\n self.actors.add(actor)\n\n def updateActors(self, actors_to_add: set()):\n for x in actors_to_add:\n self.actors.add(x)\n\n def setDate(self, i: int):\n self.year = i\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Actor:\n\n def __init__(self, name: str, id: int):\n self.filmography = set()\n self.name = name\n self.id = id\n\n def getFilms(self):\n return self.filmography\n\n def getName(self):\n return self.name\n\n def getId(self):\n return self.id\n\n def updateFilms(self, film: int):\n self.filmography.add(film)\n\n\nclass Movie:\n\n def __init__(self, id: int):\n self.actors = set()\n self.name = ''\n self.id = id\n self.year = 0\n\n def getName(self):\n return self.name\n\n def getActors(self):\n return self.actors\n\n def getId(self):\n return self.id\n\n def getDate(self):\n return self.year\n\n def updateActors(self, actor: Actor):\n self.actors.add(actor)\n\n def updateActors(self, actors_to_add: set()):\n for x in actors_to_add:\n self.actors.add(x)\n\n def setDate(self, i: int):\n self.year = i\n\n\n<mask token>\n\n\ndef community_analysis():\n f = open('./community/communities_outputs.txt', 'w')\n communities_generator = nx.community.girvan_newman(graph)\n communities = next(communities_generator)\n size = len(communities)\n while size < 10:\n print(communities)\n communities = next(communities_generator)\n size = len(communities)\n f.write('community iteration: size = {}, {} \\n'.format(size,\n communities))\n\n\ndef link_pred():\n splPG = dict(nx.all_pairs_shortest_path_length(PG, cutoff=2))\n friends_PG = list()\n for x in splPG.keys():\n for y in splPG[x].keys():\n if splPG[x][y] == 2:\n l = list()\n l.append(x)\n l.append(y)\n friends_PG.append(l)\n predictions = nx.jaccard_coefficient(PG, friends_PG)\n results = list()\n for x in predictions:\n results.append(x)\n results.sort(key=lambda x: x[2])\n results.reverse()\n k_vals = [10, 20, 50, 100]\n for k in k_vals:\n f = open('./link_pred/link_prediction_values_jaccard' + str(k) +\n '.txt', 'w')\n count = 0\n while count < k:\n print('({}, {}),jaccard: {}'.format(all_actors_id_map[results[\n count][0]].getName(), all_actors_id_map[results[count][1]].\n getName(), results[count][2]))\n f.write('({}, {}),jaccard: {}\\n'.format(all_actors_id_map[\n results[count][0]].getName(), all_actors_id_map[results[\n count][1]].getName(), results[count][2]))\n count += 1\n top_k = list()\n precision_at_k = 0\n for x in range(k):\n top_k.append(results[x])\n count = 0\n for val in top_k:\n tup = val[0], val[1]\n if tup in edges:\n count += 1\n precision_at_k = count / k\n print('precision @ K{}: {}\\n'.format(k, precision_at_k))\n f.write('precision @ K{}: {}'.format(k, precision_at_k))\n f.close()\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass Actor:\n\n def __init__(self, name: str, id: int):\n self.filmography = set()\n self.name = name\n self.id = id\n\n def getFilms(self):\n return self.filmography\n\n def getName(self):\n return self.name\n\n def getId(self):\n return self.id\n\n def updateFilms(self, film: int):\n self.filmography.add(film)\n\n\nclass Movie:\n\n def __init__(self, id: int):\n self.actors = set()\n self.name = ''\n self.id = id\n self.year = 0\n\n def getName(self):\n return self.name\n\n def getActors(self):\n return self.actors\n\n def getId(self):\n return self.id\n\n def getDate(self):\n return self.year\n\n def updateActors(self, actor: Actor):\n self.actors.add(actor)\n\n def updateActors(self, actors_to_add: set()):\n for x in actors_to_add:\n self.actors.add(x)\n\n def setDate(self, i: int):\n self.year = i\n\n\n<mask token>\n\n\ndef clean(threshold: int):\n for actorName in all_actors.keys():\n if len(all_actors[actorName].getFilms()) > threshold:\n cleaned_actors.add(all_actors[actorName])\n else:\n for movie in all_actors[actorName].getFilms():\n if all_actors[actorName] in movie.getActors():\n movie.getActors().remove(all_actors[actorName])\n\n\ndef clean_movies(threshold: int):\n for movie in cleaned_movies_1:\n if 2017 - movie.getDate() <= threshold:\n cleaned_movies.add(movie)\n else:\n for actor in movie.getActors():\n s = actor.getFilms()\n s.remove(movie)\n\n\ndef createGraph():\n counter = 0\n G = nx.Graph()\n PG_actors = set()\n for actor in cleaned_actors:\n G.add_node(actor.getId())\n for movie in cleaned_movies:\n actorIds = set()\n for actor in movie.getActors():\n actorIds.add(actor.getId())\n combinations = itertools.combinations(actorIds, 2)\n for comb in combinations:\n reverse = comb[::-1]\n if comb not in edges and reverse not in edges:\n counter += 1\n if 2017 - movie.getDate() < 60 and 2017 - movie.getDate() > 20:\n if (comb not in edges_last_60_20 and reverse not in\n edges_last_60_20):\n edges_last_60_20.add(comb)\n edges.add(comb)\n weights[comb] = 1\n elif comb in edges:\n weights[comb] = weights[comb] + 1\n elif reverse in edges:\n weights[reverse] = weights[reverse] + 1\n G.add_edges_from(edges)\n for x in edges_last_60_20:\n if x[0] not in PG_actors:\n PG_actors.add(x[0])\n if x[1] not in PG_actors:\n PG_actors.add(x[1])\n PG.add_nodes_from(PG_actors)\n PG.add_edges_from(edges_last_60_20)\n return G\n\n\ndef centrality_analysis():\n types = [nx.eigenvector_centrality, nx.harmonic_centrality, nx.\n degree_centrality]\n for x in types:\n file = open('./centrality/40_10/centrality_results_' + x.__name__ +\n '.txt', 'w')\n nodes = x(graph)\n top_10 = list()\n top_10_ids = list()\n sorted_values = list(nodes.values())\n sorted_values.sort()\n sorted_values.reverse()\n top_10 = sorted_values[0]\n for x in nodes.keys():\n if nodes[x] == top_10:\n top_10_ids.append(x)\n file.write(str(len(top_10_ids)) + '\\n')\n for x in top_10_ids:\n for y in cleaned_actors:\n if x == y.getId():\n print(y.getName())\n file.close()\n\n\ndef community_analysis():\n f = open('./community/communities_outputs.txt', 'w')\n communities_generator = nx.community.girvan_newman(graph)\n communities = next(communities_generator)\n size = len(communities)\n while size < 10:\n print(communities)\n communities = next(communities_generator)\n size = len(communities)\n f.write('community iteration: size = {}, {} \\n'.format(size,\n communities))\n\n\ndef link_pred():\n splPG = dict(nx.all_pairs_shortest_path_length(PG, cutoff=2))\n friends_PG = list()\n for x in splPG.keys():\n for y in splPG[x].keys():\n if splPG[x][y] == 2:\n l = list()\n l.append(x)\n l.append(y)\n friends_PG.append(l)\n predictions = nx.jaccard_coefficient(PG, friends_PG)\n results = list()\n for x in predictions:\n results.append(x)\n results.sort(key=lambda x: x[2])\n results.reverse()\n k_vals = [10, 20, 50, 100]\n for k in k_vals:\n f = open('./link_pred/link_prediction_values_jaccard' + str(k) +\n '.txt', 'w')\n count = 0\n while count < k:\n print('({}, {}),jaccard: {}'.format(all_actors_id_map[results[\n count][0]].getName(), all_actors_id_map[results[count][1]].\n getName(), results[count][2]))\n f.write('({}, {}),jaccard: {}\\n'.format(all_actors_id_map[\n results[count][0]].getName(), all_actors_id_map[results[\n count][1]].getName(), results[count][2]))\n count += 1\n top_k = list()\n precision_at_k = 0\n for x in range(k):\n top_k.append(results[x])\n count = 0\n for val in top_k:\n tup = val[0], val[1]\n if tup in edges:\n count += 1\n precision_at_k = count / k\n print('precision @ K{}: {}\\n'.format(k, precision_at_k))\n f.write('precision @ K{}: {}'.format(k, precision_at_k))\n f.close()\n\n\ndef convert_id_actor():\n file = open('./community_/communities_outputs.txt')\n for row in file:\n items = row.split(', ')\n i = 0\n while i < len(items):\n items[i].strip('\\n')\n items[i] = int(items[i])\n i += 1\n i = 0\n this_row = list()\n i = 0\n while i < len(items):\n this_row.append(items[i])\n i += 1\n comm.append(this_row)\n file.close()\n file = open('./actorname_communities.txt', 'w')\n for x in range(len(comm)):\n for y in range(len(comm[x])):\n try:\n comm[x][y] = all_actors_id_map[comm[x][y]].getName()\n except:\n comm[x][y] = 'None'\n comm.reverse()\n for x in range(len(comm)):\n print('Community #{}: {}'.format(x, comm[x]))\n file.write('Community #{}: {}\\n'.format(x, comm[x]))\n file.flush()\n file.close()\n\n\n<mask token>\n", "step-5": "import csv\nimport json\nimport re\nimport itertools\nimport pandas as pd\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom networkx.algorithms import community\nimport snap\nimport numpy\n\n# setting up data structures to map actor IDs to objects in order to increase run time.\ncsv.field_size_limit(100000000)\ncurr_actor_id = 1\nall_actors = dict()\nall_actors_id_map = dict()\nall_actors_frequencies = dict()\nedges = set()\nweights = dict()\nmovies = list()\nmovies_dict = dict()\nedges_last_60_20 = set()\ncomm = list()\nPG = nx.Graph()\n\nclass Actor:\n\n def __init__(self, name: str, id:int):\n self.filmography = set()\n self.name = name\n self.id = id\n def getFilms(self):\n return self.filmography\n\n def getName(self):\n return self.name\n\n def getId(self):\n return self.id\n\n def updateFilms(self, film:int):\n self.filmography.add(film)\n\n\nclass Movie:\n\n def __init__(self, id: int):\n self.actors = set()\n self.name = \"\"\n self.id = id\n self.year = 0\n\n def getName(self):\n return self.name\n\n def getActors(self):\n return self.actors\n\n def getId(self):\n return self.id\n\n def getDate(self):\n return self.year\n\n def updateActors(self, actor:Actor):\n self.actors.add(actor)\n\n def updateActors(self, actors_to_add:set()):\n for x in actors_to_add:\n self.actors.add(x)\n\n def setDate(self, i: int):\n self.year = i\n\n#parsing data from csv and dropping crew column\nreader = pd.read_csv('credits.csv', header = 0)\ncrewless = reader.drop('crew', axis = 1)\ncleanup = re.compile('[^a-zA-Z\\s]')\n\n#skip the header row\nrow = crewless.iterrows()\n\n#loop through each row\nfor x in range(len(reader.index)):\n cur_row = next(row)\n data = cur_row[1][0]\n id = cur_row[1][1]\n actors = set()\n\n #create an instance of a Movie for each row\n movie = Movie(int(id))\n movies.append(movie)\n movies_dict[id] = movie\n\n #split the string around each name\n split_around_names = data.split('name')\n\n #parse actors, and create an instance of Actor for each actor in each movie\n for y in range(1, len(split_around_names)):\n #Cleaning up characters and spaces around the actor's name\n actorName = str(split_around_names[y].split('order')[0])\n actorName = cleanup.sub(' ', actorName)\n actorName = actorName.strip()\n #Create the Actor and update his/her filmography\n if actorName not in all_actors.keys():\n a = Actor(actorName, curr_actor_id)\n curr_actor_id += 1\n a.updateFilms(movie)\n actors.add(a)\n all_actors[actorName] = a\n all_actors_frequencies[a] = 1\n all_actors_id_map[curr_actor_id] = a\n else:\n all_actors[actorName].updateFilms(movie)\n all_actors_frequencies[a] += 1\n actors.add(all_actors[actorName])\n #Update the set of actors per movie\n movie.updateActors(actors)\n\nreader = pd.read_csv('movies_metadata.csv', header = 0)\nreader.drop(reader.columns.difference(['id', 'release_date']), 1, inplace=True)\nrow = reader.iterrows()\n\ncleaned_actors = set()\ncleaned_movies_1 = set()\ncleaned_movies = set()\n\n# adding ids to movies from movie files\nfor x in range(len(reader.index)):\n cur_row = next(row)\n id = cur_row[1][0]\n date = cur_row[1][1]\n id = int(id)\n year = date[:4]\n year_int = int(year)\n if id in movies_dict.keys():\n movies_dict[id].setDate(year_int)\n cleaned_movies_1.add(movies_dict[id])\n\n\ndef clean(threshold: int):\n for actorName in all_actors.keys():\n if len(all_actors[actorName].getFilms()) > threshold:\n cleaned_actors.add(all_actors[actorName])\n else:\n for movie in all_actors[actorName].getFilms():\n if all_actors[actorName] in movie.getActors():\n movie.getActors().remove(all_actors[actorName])\n\n\ndef clean_movies(threshold: int):\n for movie in cleaned_movies_1:\n if 2017 - movie.getDate() <= threshold:\n cleaned_movies.add(movie)\n else:\n for actor in movie.getActors():\n s = actor.getFilms()\n s.remove(movie)\n\n\ndef createGraph():\n counter = 0\n G = nx.Graph()\n PG_actors = set()\n\n #fill graph with nodes\n for actor in cleaned_actors:\n G.add_node(actor.getId())\n\n #generate a list of edges and weights based on frequencie of combination appearances\n for movie in cleaned_movies:\n actorIds = set()\n for actor in movie.getActors():\n actorIds.add(actor.getId())\n combinations = itertools.combinations(actorIds, 2)\n for comb in combinations:\n reverse = comb[::-1]\n if (comb not in edges) and (reverse not in edges):\n counter+=1\n if (2017 - movie.getDate() < 60 and 2017 - movie.getDate() > 20):\n if (comb not in edges_last_60_20) and (reverse not in edges_last_60_20):\n edges_last_60_20.add(comb)\n edges.add(comb)\n weights[comb] = 1\n else:\n if comb in edges:\n weights[comb] = weights[comb] + 1\n elif reverse in edges:\n weights[reverse] = weights[reverse] + 1\n G.add_edges_from(edges)\n for x in edges_last_60_20:\n if x[0] not in PG_actors:\n PG_actors.add(x[0])\n if x[1] not in PG_actors:\n PG_actors.add(x[1])\n PG.add_nodes_from(PG_actors)\n PG.add_edges_from(edges_last_60_20)\n return G\n\n\ndef centrality_analysis():\n types = [nx.eigenvector_centrality, nx.harmonic_centrality, nx.degree_centrality]\n\n for x in types:\n\n # based upon cleaning values chosen, choose a directory to store results to.\n file = open('./centrality/40_10/centrality_results_'+x.__name__+'.txt', 'w')\n nodes = x(graph)\n top_10 = list()\n top_10_ids = list()\n\n sorted_values = list(nodes.values())\n sorted_values.sort()\n sorted_values.reverse()\n\n top_10 = sorted_values[0]\n # print(sorted_values)\n\n # for y in top_10:\n for x in nodes.keys():\n if nodes[x] == top_10:\n top_10_ids.append(x)\n\n file.write(str(len(top_10_ids)) + '\\n')\n for x in top_10_ids:\n for y in cleaned_actors:\n if x == y.getId():\n print(y.getName())\n #file.write(y.getName() + '\\n')\n file.close()\n\n\ndef community_analysis():\n f = open('./community/communities_outputs.txt', 'w')\n communities_generator = nx.community.girvan_newman(graph)\n communities = next(communities_generator)\n size = len(communities)\n while size < 10:\n print(communities)\n communities = next(communities_generator)\n size = len(communities)\n f.write('community iteration: size = {}, {} \\n'.format(size, communities))\n\n\ndef link_pred():\n splPG = dict(nx.all_pairs_shortest_path_length(PG, cutoff=2))\n friends_PG = list()\n for x in splPG.keys():\n for y in splPG[x].keys():\n if splPG[x][y] == 2:\n l = list()\n l.append(x)\n l.append(y)\n friends_PG.append(l)\n predictions = nx.jaccard_coefficient(PG, friends_PG)\n results = list()\n for x in predictions:\n results.append(x)\n results.sort(key=lambda x: x[2])\n results.reverse()\n\n k_vals = [10,20,50,100]\n for k in k_vals:\n f = open('./link_pred/link_prediction_values_jaccard' + str(k) + '.txt', 'w')\n count = 0\n while (count < k):\n print('({}, {}),jaccard: {}'.format(all_actors_id_map[results[count][0]].getName(), all_actors_id_map[results[count][1]].getName(), results[count][2]))\n f.write('({}, {}),jaccard: {}\\n'.format(all_actors_id_map[results[count][0]].getName(),all_actors_id_map[results[count][1]].getName(),results[count][2]))\n count+=1\n top_k = list()\n precision_at_k = 0\n for x in range(k):\n top_k.append(results[x])\n count = 0\n for val in top_k:\n tup = (val[0], val[1])\n if tup in edges:\n count += 1\n precision_at_k = count / k\n print('precision @ K{}: {}\\n'.format(k, precision_at_k))\n f.write('precision @ K{}: {}'.format(k, precision_at_k))\n f.close()\n\n#Convert community results from IDs to Actor name\ndef convert_id_actor():\n file = open('./community_/communities_outputs.txt')\n for row in file:\n items = row.split(', ')\n i = 0\n while i < len(items):\n items[i].strip('\\n')\n items[i] = int(items[i])\n i+=1\n i = 0\n this_row = list()\n i= 0\n while i < len(items):\n this_row.append(items[i])\n i+=1\n comm.append(this_row)\n file.close()\n file = open('./actorname_communities.txt', 'w')\n for x in range(len(comm)):\n for y in range(len(comm[x])):\n try:\n comm[x][y] = all_actors_id_map[comm[x][y]].getName()\n except:\n comm[x][y] = 'None'\n comm.reverse()\n for x in range(len(comm)):\n print(\"Community #{}: {}\".format(x, comm[x]))\n file.write(\"Community #{}: {}\\n\".format(x, comm[x]))\n file.flush()\n file.close()\n\n\nclean_movies(60)\nclean(30)\n\ngraph = createGraph()\nprint(nx.info(graph))\nprint(nx.info(PG))\n\n\n# To perform the analysis, uncomment the respective function(s); additionally, uncomment #convert_id_actor() for community_analysis.\n# centrality_analysis()\n# community_analysis()\n# convert_id_actor()\n# link_pred()\n", "step-ids": [ 7, 11, 17, 22, 26 ] }
[ 7, 11, 17, 22, 26 ]
<|reserved_special_token_0|> def scrape(event_id, event_cost): page = get(event_id, resource='events').json() venue = get(page['venue_id'], resource='venues').json() start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S') end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S') desc = '(' + venue['address']['region'] + ') ' + page['summary'] event_data = {'Event Name': page['name']['text'], 'Event Description': desc, 'Event Start Date': start.strftime('%Y-%m-%d'), 'Event Start Time': start.strftime('%H:%M:%S'), 'Event End Date': end.strftime('%Y-%m-%d'), 'Event End Time': end.strftime('%H:%M:%S' ), 'All Day Event': 'False', 'Timezone': 'America/New_York', 'Event Venue Name': venue['name'], 'Event Organizers': 'Sierra Club MD', 'Event Cost': event_cost, 'Event Currency Symbol': '$', 'Event Category': get_category_name(page), 'Event Website': page['url'], 'Event Featured Image': ''} return event_data def get(api_id, resource, params={'token': EVENTBRITE_TOKEN}): url = (f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' else f'https://www.eventbriteapi.com/v3/{resource}/{api_id}') try: if resource != 'o': r = requests.get(url, params=params) else: r = requests.get(url) except Exception as e: msg = f'Exception making GET request to {url}: {e}' logger.critical(msg, exc_info=True) return if not r.ok: code = r.status_code msg = f'Non-200 status code of {code} making GET request to: {url}' logger.critical(msg, exc_info=True) return r def get_live_events(soup): live_events = soup.find('article', {'id': 'live_events'}) try: event_divs = live_events.find_all('div', {'class': 'list-card-v2'}) except AttributeError: return [] return event_divs def get_cost_events(soup): cost = soup.find('span', {'class': 'list-card__label'}).text cost = cost.lower() cost = cost.replace('free', '0') cost = re.sub('[^\\d]+', '', cost) if cost == '': cost = '0' return cost def main(): events_array = [] r = get(14506382808, 'o') soup = BeautifulSoup(r.content, 'html.parser') event_a_refs = get_live_events(soup) for events in event_a_refs: event_cost = get_cost_events(events) event_id = events.find('a').get('data-eid') events_array.append(scrape(event_id, event_cost)) return events_array <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def get_category_name(page): if page['category_id'] is None: category = '' elif page['subcategory_id'] is None: category = get(page['category_id'], 'categories/').json()['name'] else: category_name = get(page['category_id'], 'categories/') category_name = category_name.json()['name'] category_name = category_name.replace(',', '') subcategory_name = get(page['subcategory_id'], 'subcategories/') subcategory_name = subcategory_name.json()['name'] subcategory_name = subcategory_name.replace(',', '') category = category_name + ',' + subcategory_name return category def scrape(event_id, event_cost): page = get(event_id, resource='events').json() venue = get(page['venue_id'], resource='venues').json() start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S') end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S') desc = '(' + venue['address']['region'] + ') ' + page['summary'] event_data = {'Event Name': page['name']['text'], 'Event Description': desc, 'Event Start Date': start.strftime('%Y-%m-%d'), 'Event Start Time': start.strftime('%H:%M:%S'), 'Event End Date': end.strftime('%Y-%m-%d'), 'Event End Time': end.strftime('%H:%M:%S' ), 'All Day Event': 'False', 'Timezone': 'America/New_York', 'Event Venue Name': venue['name'], 'Event Organizers': 'Sierra Club MD', 'Event Cost': event_cost, 'Event Currency Symbol': '$', 'Event Category': get_category_name(page), 'Event Website': page['url'], 'Event Featured Image': ''} return event_data def get(api_id, resource, params={'token': EVENTBRITE_TOKEN}): url = (f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' else f'https://www.eventbriteapi.com/v3/{resource}/{api_id}') try: if resource != 'o': r = requests.get(url, params=params) else: r = requests.get(url) except Exception as e: msg = f'Exception making GET request to {url}: {e}' logger.critical(msg, exc_info=True) return if not r.ok: code = r.status_code msg = f'Non-200 status code of {code} making GET request to: {url}' logger.critical(msg, exc_info=True) return r def get_live_events(soup): live_events = soup.find('article', {'id': 'live_events'}) try: event_divs = live_events.find_all('div', {'class': 'list-card-v2'}) except AttributeError: return [] return event_divs def get_cost_events(soup): cost = soup.find('span', {'class': 'list-card__label'}).text cost = cost.lower() cost = cost.replace('free', '0') cost = re.sub('[^\\d]+', '', cost) if cost == '': cost = '0' return cost def main(): events_array = [] r = get(14506382808, 'o') soup = BeautifulSoup(r.content, 'html.parser') event_a_refs = get_live_events(soup) for events in event_a_refs: event_cost = get_cost_events(events) event_id = events.find('a').get('data-eid') events_array.append(scrape(event_id, event_cost)) return events_array if __name__ == '__main__': logging.basicConfig(level=logging.INFO, format= '%(asctime)s - %(name)s - %(levelname)s - %(message)s') events = main() print(len(events)) <|reserved_special_token_1|> <|reserved_special_token_0|> logger = get_logger(os.path.basename(__file__)) EVENTBRITE_TOKEN = os.environ['EVENTBRITE_TOKEN'] def get_category_name(page): if page['category_id'] is None: category = '' elif page['subcategory_id'] is None: category = get(page['category_id'], 'categories/').json()['name'] else: category_name = get(page['category_id'], 'categories/') category_name = category_name.json()['name'] category_name = category_name.replace(',', '') subcategory_name = get(page['subcategory_id'], 'subcategories/') subcategory_name = subcategory_name.json()['name'] subcategory_name = subcategory_name.replace(',', '') category = category_name + ',' + subcategory_name return category def scrape(event_id, event_cost): page = get(event_id, resource='events').json() venue = get(page['venue_id'], resource='venues').json() start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S') end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S') desc = '(' + venue['address']['region'] + ') ' + page['summary'] event_data = {'Event Name': page['name']['text'], 'Event Description': desc, 'Event Start Date': start.strftime('%Y-%m-%d'), 'Event Start Time': start.strftime('%H:%M:%S'), 'Event End Date': end.strftime('%Y-%m-%d'), 'Event End Time': end.strftime('%H:%M:%S' ), 'All Day Event': 'False', 'Timezone': 'America/New_York', 'Event Venue Name': venue['name'], 'Event Organizers': 'Sierra Club MD', 'Event Cost': event_cost, 'Event Currency Symbol': '$', 'Event Category': get_category_name(page), 'Event Website': page['url'], 'Event Featured Image': ''} return event_data def get(api_id, resource, params={'token': EVENTBRITE_TOKEN}): url = (f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' else f'https://www.eventbriteapi.com/v3/{resource}/{api_id}') try: if resource != 'o': r = requests.get(url, params=params) else: r = requests.get(url) except Exception as e: msg = f'Exception making GET request to {url}: {e}' logger.critical(msg, exc_info=True) return if not r.ok: code = r.status_code msg = f'Non-200 status code of {code} making GET request to: {url}' logger.critical(msg, exc_info=True) return r def get_live_events(soup): live_events = soup.find('article', {'id': 'live_events'}) try: event_divs = live_events.find_all('div', {'class': 'list-card-v2'}) except AttributeError: return [] return event_divs def get_cost_events(soup): cost = soup.find('span', {'class': 'list-card__label'}).text cost = cost.lower() cost = cost.replace('free', '0') cost = re.sub('[^\\d]+', '', cost) if cost == '': cost = '0' return cost def main(): events_array = [] r = get(14506382808, 'o') soup = BeautifulSoup(r.content, 'html.parser') event_a_refs = get_live_events(soup) for events in event_a_refs: event_cost = get_cost_events(events) event_id = events.find('a').get('data-eid') events_array.append(scrape(event_id, event_cost)) return events_array if __name__ == '__main__': logging.basicConfig(level=logging.INFO, format= '%(asctime)s - %(name)s - %(levelname)s - %(message)s') events = main() print(len(events)) <|reserved_special_token_1|> from datetime import datetime import logging import os import re from bs4 import BeautifulSoup import requests from .utils.log import get_logger logger = get_logger(os.path.basename(__file__)) EVENTBRITE_TOKEN = os.environ['EVENTBRITE_TOKEN'] def get_category_name(page): if page['category_id'] is None: category = '' elif page['subcategory_id'] is None: category = get(page['category_id'], 'categories/').json()['name'] else: category_name = get(page['category_id'], 'categories/') category_name = category_name.json()['name'] category_name = category_name.replace(',', '') subcategory_name = get(page['subcategory_id'], 'subcategories/') subcategory_name = subcategory_name.json()['name'] subcategory_name = subcategory_name.replace(',', '') category = category_name + ',' + subcategory_name return category def scrape(event_id, event_cost): page = get(event_id, resource='events').json() venue = get(page['venue_id'], resource='venues').json() start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S') end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S') desc = '(' + venue['address']['region'] + ') ' + page['summary'] event_data = {'Event Name': page['name']['text'], 'Event Description': desc, 'Event Start Date': start.strftime('%Y-%m-%d'), 'Event Start Time': start.strftime('%H:%M:%S'), 'Event End Date': end.strftime('%Y-%m-%d'), 'Event End Time': end.strftime('%H:%M:%S' ), 'All Day Event': 'False', 'Timezone': 'America/New_York', 'Event Venue Name': venue['name'], 'Event Organizers': 'Sierra Club MD', 'Event Cost': event_cost, 'Event Currency Symbol': '$', 'Event Category': get_category_name(page), 'Event Website': page['url'], 'Event Featured Image': ''} return event_data def get(api_id, resource, params={'token': EVENTBRITE_TOKEN}): url = (f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' else f'https://www.eventbriteapi.com/v3/{resource}/{api_id}') try: if resource != 'o': r = requests.get(url, params=params) else: r = requests.get(url) except Exception as e: msg = f'Exception making GET request to {url}: {e}' logger.critical(msg, exc_info=True) return if not r.ok: code = r.status_code msg = f'Non-200 status code of {code} making GET request to: {url}' logger.critical(msg, exc_info=True) return r def get_live_events(soup): live_events = soup.find('article', {'id': 'live_events'}) try: event_divs = live_events.find_all('div', {'class': 'list-card-v2'}) except AttributeError: return [] return event_divs def get_cost_events(soup): cost = soup.find('span', {'class': 'list-card__label'}).text cost = cost.lower() cost = cost.replace('free', '0') cost = re.sub('[^\\d]+', '', cost) if cost == '': cost = '0' return cost def main(): events_array = [] r = get(14506382808, 'o') soup = BeautifulSoup(r.content, 'html.parser') event_a_refs = get_live_events(soup) for events in event_a_refs: event_cost = get_cost_events(events) event_id = events.find('a').get('data-eid') events_array.append(scrape(event_id, event_cost)) return events_array if __name__ == '__main__': logging.basicConfig(level=logging.INFO, format= '%(asctime)s - %(name)s - %(levelname)s - %(message)s') events = main() print(len(events)) <|reserved_special_token_1|> from datetime import datetime import logging import os import re from bs4 import BeautifulSoup import requests from .utils.log import get_logger logger = get_logger(os.path.basename(__file__)) EVENTBRITE_TOKEN = os.environ['EVENTBRITE_TOKEN'] def get_category_name(page): if page["category_id"] is None: category = '' else: if page["subcategory_id"] is None: category = get(page["category_id"], 'categories/').json()["name"] else: category_name = get(page["category_id"], 'categories/') category_name = category_name.json()["name"] category_name = category_name.replace(",", "") subcategory_name = get(page["subcategory_id"], 'subcategories/') subcategory_name = subcategory_name.json()["name"] subcategory_name = subcategory_name.replace(",", "") category = category_name + "," + subcategory_name return category def scrape(event_id, event_cost): page = get(event_id, resource='events').json() venue = get(page["venue_id"], resource='venues').json() start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S') end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S') desc = "(" + venue["address"]["region"] + ") " + page["summary"] event_data = { 'Event Name': page['name']['text'], 'Event Description': desc, 'Event Start Date': start.strftime('%Y-%m-%d'), 'Event Start Time': start.strftime('%H:%M:%S'), 'Event End Date': end.strftime('%Y-%m-%d'), 'Event End Time': end.strftime('%H:%M:%S'), 'All Day Event': "False", 'Timezone': "America/New_York", 'Event Venue Name': venue["name"], 'Event Organizers': 'Sierra Club MD', 'Event Cost': event_cost, 'Event Currency Symbol': "$", # TODO: parse event data for optional category fields if present 'Event Category': get_category_name(page), 'Event Website': page['url'], 'Event Featured Image': "" } return event_data def get(api_id, resource, params={'token': EVENTBRITE_TOKEN}): url = f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' \ else f'https://www.eventbriteapi.com/v3/{resource}/{api_id}' try: if resource != 'o': r = requests.get(url, params=params) else: r = requests.get(url) except Exception as e: msg = f"Exception making GET request to {url}: {e}" logger.critical(msg, exc_info=True) return if not r.ok: code = r.status_code msg = f"Non-200 status code of {code} making GET request to: {url}" logger.critical(msg, exc_info=True) return r def get_live_events(soup): live_events = soup.find("article", {"id": "live_events"}) try: event_divs = live_events.find_all("div", {"class": "list-card-v2"}) except AttributeError: return [] return event_divs def get_cost_events(soup): cost = soup.find("span", {"class": "list-card__label"}).text cost = cost.lower() cost = cost.replace("free", "0") cost = re.sub(r'[^\d]+', '', cost) if cost == "": cost = "0" return cost def main(): events_array = [] r = get(14506382808, 'o') soup = BeautifulSoup(r.content, 'html.parser') event_a_refs = get_live_events(soup) for events in event_a_refs: event_cost = get_cost_events(events) event_id = events.find("a").get("data-eid") events_array.append(scrape(event_id, event_cost)) return events_array if __name__ == '__main__': logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' ) events = main() print(len(events))
flexible
{ "blob_id": "edfc8794fab2c95e01ae254f9f13d446faafe6fd", "index": 9213, "step-1": "<mask token>\n\n\ndef scrape(event_id, event_cost):\n page = get(event_id, resource='events').json()\n venue = get(page['venue_id'], resource='venues').json()\n start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S')\n end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S')\n desc = '(' + venue['address']['region'] + ') ' + page['summary']\n event_data = {'Event Name': page['name']['text'], 'Event Description':\n desc, 'Event Start Date': start.strftime('%Y-%m-%d'),\n 'Event Start Time': start.strftime('%H:%M:%S'), 'Event End Date':\n end.strftime('%Y-%m-%d'), 'Event End Time': end.strftime('%H:%M:%S'\n ), 'All Day Event': 'False', 'Timezone': 'America/New_York',\n 'Event Venue Name': venue['name'], 'Event Organizers':\n 'Sierra Club MD', 'Event Cost': event_cost, 'Event Currency Symbol':\n '$', 'Event Category': get_category_name(page), 'Event Website':\n page['url'], 'Event Featured Image': ''}\n return event_data\n\n\ndef get(api_id, resource, params={'token': EVENTBRITE_TOKEN}):\n url = (f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' else\n f'https://www.eventbriteapi.com/v3/{resource}/{api_id}')\n try:\n if resource != 'o':\n r = requests.get(url, params=params)\n else:\n r = requests.get(url)\n except Exception as e:\n msg = f'Exception making GET request to {url}: {e}'\n logger.critical(msg, exc_info=True)\n return\n if not r.ok:\n code = r.status_code\n msg = f'Non-200 status code of {code} making GET request to: {url}'\n logger.critical(msg, exc_info=True)\n return r\n\n\ndef get_live_events(soup):\n live_events = soup.find('article', {'id': 'live_events'})\n try:\n event_divs = live_events.find_all('div', {'class': 'list-card-v2'})\n except AttributeError:\n return []\n return event_divs\n\n\ndef get_cost_events(soup):\n cost = soup.find('span', {'class': 'list-card__label'}).text\n cost = cost.lower()\n cost = cost.replace('free', '0')\n cost = re.sub('[^\\\\d]+', '', cost)\n if cost == '':\n cost = '0'\n return cost\n\n\ndef main():\n events_array = []\n r = get(14506382808, 'o')\n soup = BeautifulSoup(r.content, 'html.parser')\n event_a_refs = get_live_events(soup)\n for events in event_a_refs:\n event_cost = get_cost_events(events)\n event_id = events.find('a').get('data-eid')\n events_array.append(scrape(event_id, event_cost))\n return events_array\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_category_name(page):\n if page['category_id'] is None:\n category = ''\n elif page['subcategory_id'] is None:\n category = get(page['category_id'], 'categories/').json()['name']\n else:\n category_name = get(page['category_id'], 'categories/')\n category_name = category_name.json()['name']\n category_name = category_name.replace(',', '')\n subcategory_name = get(page['subcategory_id'], 'subcategories/')\n subcategory_name = subcategory_name.json()['name']\n subcategory_name = subcategory_name.replace(',', '')\n category = category_name + ',' + subcategory_name\n return category\n\n\ndef scrape(event_id, event_cost):\n page = get(event_id, resource='events').json()\n venue = get(page['venue_id'], resource='venues').json()\n start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S')\n end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S')\n desc = '(' + venue['address']['region'] + ') ' + page['summary']\n event_data = {'Event Name': page['name']['text'], 'Event Description':\n desc, 'Event Start Date': start.strftime('%Y-%m-%d'),\n 'Event Start Time': start.strftime('%H:%M:%S'), 'Event End Date':\n end.strftime('%Y-%m-%d'), 'Event End Time': end.strftime('%H:%M:%S'\n ), 'All Day Event': 'False', 'Timezone': 'America/New_York',\n 'Event Venue Name': venue['name'], 'Event Organizers':\n 'Sierra Club MD', 'Event Cost': event_cost, 'Event Currency Symbol':\n '$', 'Event Category': get_category_name(page), 'Event Website':\n page['url'], 'Event Featured Image': ''}\n return event_data\n\n\ndef get(api_id, resource, params={'token': EVENTBRITE_TOKEN}):\n url = (f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' else\n f'https://www.eventbriteapi.com/v3/{resource}/{api_id}')\n try:\n if resource != 'o':\n r = requests.get(url, params=params)\n else:\n r = requests.get(url)\n except Exception as e:\n msg = f'Exception making GET request to {url}: {e}'\n logger.critical(msg, exc_info=True)\n return\n if not r.ok:\n code = r.status_code\n msg = f'Non-200 status code of {code} making GET request to: {url}'\n logger.critical(msg, exc_info=True)\n return r\n\n\ndef get_live_events(soup):\n live_events = soup.find('article', {'id': 'live_events'})\n try:\n event_divs = live_events.find_all('div', {'class': 'list-card-v2'})\n except AttributeError:\n return []\n return event_divs\n\n\ndef get_cost_events(soup):\n cost = soup.find('span', {'class': 'list-card__label'}).text\n cost = cost.lower()\n cost = cost.replace('free', '0')\n cost = re.sub('[^\\\\d]+', '', cost)\n if cost == '':\n cost = '0'\n return cost\n\n\ndef main():\n events_array = []\n r = get(14506382808, 'o')\n soup = BeautifulSoup(r.content, 'html.parser')\n event_a_refs = get_live_events(soup)\n for events in event_a_refs:\n event_cost = get_cost_events(events)\n event_id = events.find('a').get('data-eid')\n events_array.append(scrape(event_id, event_cost))\n return events_array\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n events = main()\n print(len(events))\n", "step-3": "<mask token>\nlogger = get_logger(os.path.basename(__file__))\nEVENTBRITE_TOKEN = os.environ['EVENTBRITE_TOKEN']\n\n\ndef get_category_name(page):\n if page['category_id'] is None:\n category = ''\n elif page['subcategory_id'] is None:\n category = get(page['category_id'], 'categories/').json()['name']\n else:\n category_name = get(page['category_id'], 'categories/')\n category_name = category_name.json()['name']\n category_name = category_name.replace(',', '')\n subcategory_name = get(page['subcategory_id'], 'subcategories/')\n subcategory_name = subcategory_name.json()['name']\n subcategory_name = subcategory_name.replace(',', '')\n category = category_name + ',' + subcategory_name\n return category\n\n\ndef scrape(event_id, event_cost):\n page = get(event_id, resource='events').json()\n venue = get(page['venue_id'], resource='venues').json()\n start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S')\n end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S')\n desc = '(' + venue['address']['region'] + ') ' + page['summary']\n event_data = {'Event Name': page['name']['text'], 'Event Description':\n desc, 'Event Start Date': start.strftime('%Y-%m-%d'),\n 'Event Start Time': start.strftime('%H:%M:%S'), 'Event End Date':\n end.strftime('%Y-%m-%d'), 'Event End Time': end.strftime('%H:%M:%S'\n ), 'All Day Event': 'False', 'Timezone': 'America/New_York',\n 'Event Venue Name': venue['name'], 'Event Organizers':\n 'Sierra Club MD', 'Event Cost': event_cost, 'Event Currency Symbol':\n '$', 'Event Category': get_category_name(page), 'Event Website':\n page['url'], 'Event Featured Image': ''}\n return event_data\n\n\ndef get(api_id, resource, params={'token': EVENTBRITE_TOKEN}):\n url = (f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' else\n f'https://www.eventbriteapi.com/v3/{resource}/{api_id}')\n try:\n if resource != 'o':\n r = requests.get(url, params=params)\n else:\n r = requests.get(url)\n except Exception as e:\n msg = f'Exception making GET request to {url}: {e}'\n logger.critical(msg, exc_info=True)\n return\n if not r.ok:\n code = r.status_code\n msg = f'Non-200 status code of {code} making GET request to: {url}'\n logger.critical(msg, exc_info=True)\n return r\n\n\ndef get_live_events(soup):\n live_events = soup.find('article', {'id': 'live_events'})\n try:\n event_divs = live_events.find_all('div', {'class': 'list-card-v2'})\n except AttributeError:\n return []\n return event_divs\n\n\ndef get_cost_events(soup):\n cost = soup.find('span', {'class': 'list-card__label'}).text\n cost = cost.lower()\n cost = cost.replace('free', '0')\n cost = re.sub('[^\\\\d]+', '', cost)\n if cost == '':\n cost = '0'\n return cost\n\n\ndef main():\n events_array = []\n r = get(14506382808, 'o')\n soup = BeautifulSoup(r.content, 'html.parser')\n event_a_refs = get_live_events(soup)\n for events in event_a_refs:\n event_cost = get_cost_events(events)\n event_id = events.find('a').get('data-eid')\n events_array.append(scrape(event_id, event_cost))\n return events_array\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n events = main()\n print(len(events))\n", "step-4": "from datetime import datetime\nimport logging\nimport os\nimport re\nfrom bs4 import BeautifulSoup\nimport requests\nfrom .utils.log import get_logger\nlogger = get_logger(os.path.basename(__file__))\nEVENTBRITE_TOKEN = os.environ['EVENTBRITE_TOKEN']\n\n\ndef get_category_name(page):\n if page['category_id'] is None:\n category = ''\n elif page['subcategory_id'] is None:\n category = get(page['category_id'], 'categories/').json()['name']\n else:\n category_name = get(page['category_id'], 'categories/')\n category_name = category_name.json()['name']\n category_name = category_name.replace(',', '')\n subcategory_name = get(page['subcategory_id'], 'subcategories/')\n subcategory_name = subcategory_name.json()['name']\n subcategory_name = subcategory_name.replace(',', '')\n category = category_name + ',' + subcategory_name\n return category\n\n\ndef scrape(event_id, event_cost):\n page = get(event_id, resource='events').json()\n venue = get(page['venue_id'], resource='venues').json()\n start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S')\n end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S')\n desc = '(' + venue['address']['region'] + ') ' + page['summary']\n event_data = {'Event Name': page['name']['text'], 'Event Description':\n desc, 'Event Start Date': start.strftime('%Y-%m-%d'),\n 'Event Start Time': start.strftime('%H:%M:%S'), 'Event End Date':\n end.strftime('%Y-%m-%d'), 'Event End Time': end.strftime('%H:%M:%S'\n ), 'All Day Event': 'False', 'Timezone': 'America/New_York',\n 'Event Venue Name': venue['name'], 'Event Organizers':\n 'Sierra Club MD', 'Event Cost': event_cost, 'Event Currency Symbol':\n '$', 'Event Category': get_category_name(page), 'Event Website':\n page['url'], 'Event Featured Image': ''}\n return event_data\n\n\ndef get(api_id, resource, params={'token': EVENTBRITE_TOKEN}):\n url = (f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' else\n f'https://www.eventbriteapi.com/v3/{resource}/{api_id}')\n try:\n if resource != 'o':\n r = requests.get(url, params=params)\n else:\n r = requests.get(url)\n except Exception as e:\n msg = f'Exception making GET request to {url}: {e}'\n logger.critical(msg, exc_info=True)\n return\n if not r.ok:\n code = r.status_code\n msg = f'Non-200 status code of {code} making GET request to: {url}'\n logger.critical(msg, exc_info=True)\n return r\n\n\ndef get_live_events(soup):\n live_events = soup.find('article', {'id': 'live_events'})\n try:\n event_divs = live_events.find_all('div', {'class': 'list-card-v2'})\n except AttributeError:\n return []\n return event_divs\n\n\ndef get_cost_events(soup):\n cost = soup.find('span', {'class': 'list-card__label'}).text\n cost = cost.lower()\n cost = cost.replace('free', '0')\n cost = re.sub('[^\\\\d]+', '', cost)\n if cost == '':\n cost = '0'\n return cost\n\n\ndef main():\n events_array = []\n r = get(14506382808, 'o')\n soup = BeautifulSoup(r.content, 'html.parser')\n event_a_refs = get_live_events(soup)\n for events in event_a_refs:\n event_cost = get_cost_events(events)\n event_id = events.find('a').get('data-eid')\n events_array.append(scrape(event_id, event_cost))\n return events_array\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n events = main()\n print(len(events))\n", "step-5": "from datetime import datetime\nimport logging\nimport os\nimport re\n\nfrom bs4 import BeautifulSoup\nimport requests\n\nfrom .utils.log import get_logger\n\nlogger = get_logger(os.path.basename(__file__))\n\nEVENTBRITE_TOKEN = os.environ['EVENTBRITE_TOKEN']\n\n\ndef get_category_name(page):\n if page[\"category_id\"] is None:\n category = ''\n else:\n if page[\"subcategory_id\"] is None:\n category = get(page[\"category_id\"], 'categories/').json()[\"name\"]\n else:\n category_name = get(page[\"category_id\"], 'categories/')\n category_name = category_name.json()[\"name\"]\n category_name = category_name.replace(\",\", \"\")\n subcategory_name = get(page[\"subcategory_id\"], 'subcategories/')\n subcategory_name = subcategory_name.json()[\"name\"]\n subcategory_name = subcategory_name.replace(\",\", \"\")\n category = category_name + \",\" + subcategory_name\n return category\n\n \ndef scrape(event_id, event_cost):\n page = get(event_id, resource='events').json()\n venue = get(page[\"venue_id\"], resource='venues').json()\n\n start = datetime.strptime(page['start']['local'], '%Y-%m-%dT%H:%M:%S')\n end = datetime.strptime(page['end']['local'], '%Y-%m-%dT%H:%M:%S')\n desc = \"(\" + venue[\"address\"][\"region\"] + \") \" + page[\"summary\"]\n event_data = {\n 'Event Name': page['name']['text'],\n 'Event Description': desc,\n 'Event Start Date': start.strftime('%Y-%m-%d'),\n 'Event Start Time': start.strftime('%H:%M:%S'),\n 'Event End Date': end.strftime('%Y-%m-%d'),\n 'Event End Time': end.strftime('%H:%M:%S'),\n 'All Day Event': \"False\",\n 'Timezone': \"America/New_York\",\n 'Event Venue Name': venue[\"name\"],\n 'Event Organizers': 'Sierra Club MD',\n 'Event Cost': event_cost,\n 'Event Currency Symbol': \"$\",\n # TODO: parse event data for optional category fields if present\n 'Event Category': get_category_name(page), \n 'Event Website': page['url'],\n 'Event Featured Image': \"\"\n }\n return event_data\n\n\ndef get(api_id, resource, params={'token': EVENTBRITE_TOKEN}):\n url = f'https://www.eventbrite.com/o/{api_id}' if resource == 'o' \\\n else f'https://www.eventbriteapi.com/v3/{resource}/{api_id}' \n \n try:\n if resource != 'o':\n r = requests.get(url, params=params) \n else:\n r = requests.get(url)\n except Exception as e:\n msg = f\"Exception making GET request to {url}: {e}\"\n logger.critical(msg, exc_info=True)\n return\n if not r.ok:\n code = r.status_code\n msg = f\"Non-200 status code of {code} making GET request to: {url}\"\n logger.critical(msg, exc_info=True)\n \n return r\n\n\ndef get_live_events(soup):\n live_events = soup.find(\"article\", {\"id\": \"live_events\"})\n try:\n event_divs = live_events.find_all(\"div\", {\"class\": \"list-card-v2\"})\n except AttributeError:\n return []\n \n return event_divs\n \n\ndef get_cost_events(soup):\n cost = soup.find(\"span\", {\"class\": \"list-card__label\"}).text\n cost = cost.lower()\n cost = cost.replace(\"free\", \"0\")\n cost = re.sub(r'[^\\d]+', '', cost)\n if cost == \"\":\n cost = \"0\"\n return cost\n\n\ndef main():\n events_array = []\n r = get(14506382808, 'o')\n soup = BeautifulSoup(r.content, 'html.parser') \n event_a_refs = get_live_events(soup)\n for events in event_a_refs:\n event_cost = get_cost_events(events)\n event_id = events.find(\"a\").get(\"data-eid\")\n events_array.append(scrape(event_id, event_cost))\n \n return events_array\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n events = main()\n print(len(events))", "step-ids": [ 5, 7, 8, 9, 10 ] }
[ 5, 7, 8, 9, 10 ]
import thumt.utils.bleu as bleu import argparse parser = argparse.ArgumentParser("Compute sentence bleu.") parser.add_argument("-pred_path", type=str, required=True) parser.add_argument("-n_list_path", type=str, required=True) parser.add_argument("-refer_path", type=str, required=True) args = parser.parse_args() n_list = [] with open(args.pred_path, 'r') as f: preds = f.readlines() with open(args.n_list_path, 'r') as f: for line in f: n_list.append(int(line.strip())) with open(args.refer_path, 'r') as f: golds = f.readlines() f_summary = open(args.pred_path + ".sent-bleu", 'w') gold_idx = 0 for idx, pred in enumerate(preds): #import ipdb; ipdb.set_trace() if idx == sum(n_list[:gold_idx + 1]): gold_idx += 1 gold = golds[gold_idx].strip() # remove `\n` #refs = [gold.split()] refs = [[gold.split()]] pred = [pred.strip().split()] #import ipdb; ipdb.set_trace() sent_bleu = bleu.bleu(pred, refs, smooth=True) print("%s : %s : %f" % (pred, refs, sent_bleu)) f_summary.write(" ".join(pred[0]) + "|||" + str(sent_bleu) + "\n") f_summary.close()
normal
{ "blob_id": "4437075901751adeaf3df63345e270a9b0090c14", "index": 1918, "step-1": "<mask token>\n", "step-2": "<mask token>\nparser.add_argument('-pred_path', type=str, required=True)\nparser.add_argument('-n_list_path', type=str, required=True)\nparser.add_argument('-refer_path', type=str, required=True)\n<mask token>\nwith open(args.pred_path, 'r') as f:\n preds = f.readlines()\nwith open(args.n_list_path, 'r') as f:\n for line in f:\n n_list.append(int(line.strip()))\nwith open(args.refer_path, 'r') as f:\n golds = f.readlines()\n<mask token>\nfor idx, pred in enumerate(preds):\n if idx == sum(n_list[:gold_idx + 1]):\n gold_idx += 1\n gold = golds[gold_idx].strip()\n refs = [[gold.split()]]\n pred = [pred.strip().split()]\n sent_bleu = bleu.bleu(pred, refs, smooth=True)\n print('%s : %s : %f' % (pred, refs, sent_bleu))\n f_summary.write(' '.join(pred[0]) + '|||' + str(sent_bleu) + '\\n')\nf_summary.close()\n", "step-3": "<mask token>\nparser = argparse.ArgumentParser('Compute sentence bleu.')\nparser.add_argument('-pred_path', type=str, required=True)\nparser.add_argument('-n_list_path', type=str, required=True)\nparser.add_argument('-refer_path', type=str, required=True)\nargs = parser.parse_args()\nn_list = []\nwith open(args.pred_path, 'r') as f:\n preds = f.readlines()\nwith open(args.n_list_path, 'r') as f:\n for line in f:\n n_list.append(int(line.strip()))\nwith open(args.refer_path, 'r') as f:\n golds = f.readlines()\nf_summary = open(args.pred_path + '.sent-bleu', 'w')\ngold_idx = 0\nfor idx, pred in enumerate(preds):\n if idx == sum(n_list[:gold_idx + 1]):\n gold_idx += 1\n gold = golds[gold_idx].strip()\n refs = [[gold.split()]]\n pred = [pred.strip().split()]\n sent_bleu = bleu.bleu(pred, refs, smooth=True)\n print('%s : %s : %f' % (pred, refs, sent_bleu))\n f_summary.write(' '.join(pred[0]) + '|||' + str(sent_bleu) + '\\n')\nf_summary.close()\n", "step-4": "import thumt.utils.bleu as bleu\nimport argparse\nparser = argparse.ArgumentParser('Compute sentence bleu.')\nparser.add_argument('-pred_path', type=str, required=True)\nparser.add_argument('-n_list_path', type=str, required=True)\nparser.add_argument('-refer_path', type=str, required=True)\nargs = parser.parse_args()\nn_list = []\nwith open(args.pred_path, 'r') as f:\n preds = f.readlines()\nwith open(args.n_list_path, 'r') as f:\n for line in f:\n n_list.append(int(line.strip()))\nwith open(args.refer_path, 'r') as f:\n golds = f.readlines()\nf_summary = open(args.pred_path + '.sent-bleu', 'w')\ngold_idx = 0\nfor idx, pred in enumerate(preds):\n if idx == sum(n_list[:gold_idx + 1]):\n gold_idx += 1\n gold = golds[gold_idx].strip()\n refs = [[gold.split()]]\n pred = [pred.strip().split()]\n sent_bleu = bleu.bleu(pred, refs, smooth=True)\n print('%s : %s : %f' % (pred, refs, sent_bleu))\n f_summary.write(' '.join(pred[0]) + '|||' + str(sent_bleu) + '\\n')\nf_summary.close()\n", "step-5": "import thumt.utils.bleu as bleu\nimport argparse\n\nparser = argparse.ArgumentParser(\"Compute sentence bleu.\")\nparser.add_argument(\"-pred_path\", type=str, required=True)\nparser.add_argument(\"-n_list_path\", type=str, required=True)\nparser.add_argument(\"-refer_path\", type=str, required=True)\n\nargs = parser.parse_args()\n\nn_list = []\nwith open(args.pred_path, 'r') as f:\n\tpreds = f.readlines()\nwith open(args.n_list_path, 'r') as f:\n for line in f:\n n_list.append(int(line.strip()))\n\nwith open(args.refer_path, 'r') as f:\n\tgolds = f.readlines()\n\nf_summary = open(args.pred_path + \".sent-bleu\", 'w')\ngold_idx = 0\nfor idx, pred in enumerate(preds):\n #import ipdb; ipdb.set_trace()\n if idx == sum(n_list[:gold_idx + 1]):\n gold_idx += 1\n\n gold = golds[gold_idx].strip()\t# remove `\\n`\n\t#refs = [gold.split()]\n refs = [[gold.split()]]\n pred = [pred.strip().split()]\n #import ipdb; ipdb.set_trace()\n sent_bleu = bleu.bleu(pred, refs, smooth=True)\n print(\"%s : %s : %f\" % (pred, refs, sent_bleu))\n f_summary.write(\" \".join(pred[0]) + \"|||\" + str(sent_bleu) + \"\\n\")\nf_summary.close()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import logging import terrestrial.config as config logger = logging.getLogger(f'{__name__}.common') def health(): return 'OK', 200 def verify_token(token): """ Verifies Token from Authorization header """ if config.API_TOKEN is None: logger.error('API token is not configured, auth will fail!') return token == config.API_TOKEN
normal
{ "blob_id": "167bd2c405171443c11fbd13575f8c7b20877289", "index": 8470, "step-1": "<mask token>\n\n\ndef health():\n return 'OK', 200\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef health():\n return 'OK', 200\n\n\ndef verify_token(token):\n \"\"\"\n Verifies Token from Authorization header\n \"\"\"\n if config.API_TOKEN is None:\n logger.error('API token is not configured, auth will fail!')\n return token == config.API_TOKEN\n", "step-3": "<mask token>\nlogger = logging.getLogger(f'{__name__}.common')\n\n\ndef health():\n return 'OK', 200\n\n\ndef verify_token(token):\n \"\"\"\n Verifies Token from Authorization header\n \"\"\"\n if config.API_TOKEN is None:\n logger.error('API token is not configured, auth will fail!')\n return token == config.API_TOKEN\n", "step-4": "import logging\nimport terrestrial.config as config\nlogger = logging.getLogger(f'{__name__}.common')\n\n\ndef health():\n return 'OK', 200\n\n\ndef verify_token(token):\n \"\"\"\n Verifies Token from Authorization header\n \"\"\"\n if config.API_TOKEN is None:\n logger.error('API token is not configured, auth will fail!')\n return token == config.API_TOKEN\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
Easy = [["4 + 12 = ?", 16], ["45 -34 = ?", 11], ["27 + 12 -18 = ?", 21], ['25 - 5 * 4 = ?', 5], ["18 + 45 / 5 - 3 * 2 = ?", 21], ["5! = ?", 120], ["3! + 2! = ?", 8], ["7 + 5! / 4! - 6 / 3 = ?", 10], ["(25 + 5) / 6 * 4 = ?", 20], ["4(3+c)+c=c+4; c=?", -2], ["​√​121 = ?" ,11], ["x = √​81 - √​64; x= ?", 1], ["x + y = 20; x - y = 4; y = ?", 8]] Normal = [["8(10−k)=2k; k = ?", 8], ["−4n−8=4(−3n+2); n=?", 2], ["4(3+c)+c=c+4; c=?", -2], ["​√​121 = ?" ,11], ["x = √​81 - √​64; x= ?", 1], ["y = √​16 * √​4 / √​9; y=?", 2], ["y−3=2(x+1); x= -2, y=?", 1], [" y*y = 4x/5 − 11; y= 5, x = ?", 45], ["How many unique ways are there to arrange the letters in the word CANNON?", 120], ["How many numbers between 1 and 100(inclusive) are divisible by 10 or 7", 23], ["y=−4x+6; ​3x+4y=-2 ; x=?", 2], ["−x+4y=−9; ​y=−2x-9; y=?", -3]] Hard = [["Emily is packing her bags for her vacation. She has 6 unique Fabergé eggs, but only 3 fit in her bag. How many different groups of 3 Fabergé eggs can she take?", 20], ["You just got a free ticket for a boat ride, and you can bring along 2 friends! Unfortunately, you have 5 friends who want to come along. How many different groups of friends could you take with you?", 10], ["Omar is packing his bags for his vacation. He has 999 unique shirts, but only 5 fit in his bag. How many different groups of 5 shirts can he take?", 126], ["How many numbers between 1 and 100(inclusive) are divisible by 3 or 2?" ,67], ["You need to put your reindeer, Gloopin, Quentin, Ezekiel, and Lancer, in a single-file line to pull your sleigh. However, Quentin and Gloopin are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?", 12], ["You need to put your reindeer, Gloopin, Balthazar, Bloopin, Prancer, and Quentin, in a single-file line to pull your sleigh. However, Prancer and Balthazar are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?", 2], ["y−3=2(x+1); x= -2, y=?", 48], ["How many unique ways are there to arrange the letters in the word CANNON?", 120], ["How many numbers between 1 and 100(inclusive) are divisible by 10 or 7", 23], ["−x+4y=−9; ​y=−2x-9; y=?", -3], ["x = √​81 - √​64; x= ?", 1], ["y = √​16 * √​4 / √​9; y=?", 2], ["y−3=2(x+1); x= -2, y=?", 1], [" y*y = 4x/5 − 11; y= 5, x = ?", 45], ["y=−4x+6; ​3x+4y=-2 ; x=?", 2], ["−x+4y=−9; ​y=−2x-9; y=?", -3]]
normal
{ "blob_id": "66edf0d2f7e25e166563bdb1063a1ed45ecda0e6", "index": 541, "step-1": "<mask token>\n", "step-2": "Easy = [['4 + 12 = ?', 16], ['45 -34 = ?', 11], ['27 + 12 -18 = ?', 21], [\n '25 - 5 * 4 = ?', 5], ['18 + 45 / 5 - 3 * 2 = ?', 21], ['5! = ?', 120],\n ['3! + 2! = ?', 8], ['7 + 5! / 4! - 6 / 3 = ?', 10], [\n '(25 + 5) / 6 * 4 = ?', 20], ['4(3+c)+c=c+4; c=?', -2], [\n '\\u200b√\\u200b121 = ?', 11], ['x = √\\u200b81 - √\\u200b64; x= ?', 1], [\n 'x + y = 20; x - y = 4; y = ?', 8]]\nNormal = [['8(10−k)=2k; k = ?', 8], ['−4n−8=4(−3n+2); n=?', 2], [\n '4(3+c)+c=c+4; c=?', -2], ['\\u200b√\\u200b121 = ?', 11], [\n 'x = √\\u200b81 - √\\u200b64; x= ?', 1], [\n 'y = √\\u200b16 * √\\u200b4 / √\\u200b9; y=?', 2], [\n 'y−3=2(x+1); x= -2, y=?', 1], [' y*y = 4x/5 − 11; y= 5, x = ?', 45], [\n 'How many unique ways are there to arrange the letters in the word CANNON?'\n , 120], [\n 'How many numbers between 1 and 100(inclusive) are divisible by 10 or 7',\n 23], ['y=−4x+6; \\u200b3x+4y=-2 ; x=?', 2], [\n '−x+4y=−9; \\u200by=−2x-9; y=?', -3]]\nHard = [[\n 'Emily is packing her bags for her vacation. She has 6 unique Fabergé eggs, but only 3 fit in her bag. How many different groups of 3 Fabergé eggs can she take?'\n , 20], [\n 'You just got a free ticket for a boat ride, and you can bring along 2 friends! Unfortunately, you have 5 friends who want to come along. How many different groups of friends could you take with you?'\n , 10], [\n 'Omar is packing his bags for his vacation. He has 999 unique shirts, but only 5 fit in his bag. How many different groups of 5 shirts can he take?'\n , 126], [\n 'How many numbers between 1 and 100(inclusive) are divisible by 3 or 2?',\n 67], [\n \"You need to put your reindeer, Gloopin, Quentin, Ezekiel, and Lancer, in a single-file line to pull your sleigh. However, Quentin and Gloopin are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?\"\n , 12], [\n \"You need to put your reindeer, Gloopin, Balthazar, Bloopin, Prancer, and Quentin, in a single-file line to pull your sleigh. However, Prancer and Balthazar are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?\"\n , 2], ['y−3=2(x+1); x= -2, y=?', 48], [\n 'How many unique ways are there to arrange the letters in the word CANNON?'\n , 120], [\n 'How many numbers between 1 and 100(inclusive) are divisible by 10 or 7',\n 23], ['−x+4y=−9; \\u200by=−2x-9; y=?', -3], [\n 'x = √\\u200b81 - √\\u200b64; x= ?', 1], [\n 'y = √\\u200b16 * √\\u200b4 / √\\u200b9; y=?', 2], [\n 'y−3=2(x+1); x= -2, y=?', 1], [' y*y = 4x/5 − 11; y= 5, x = ?', 45], [\n 'y=−4x+6; \\u200b3x+4y=-2 ; x=?', 2], ['−x+4y=−9; \\u200by=−2x-9; y=?', -3]]\n", "step-3": "Easy = [[\"4 + 12 = ?\", 16],\r\n [\"45 -34 = ?\", 11],\r\n [\"27 + 12 -18 = ?\", 21],\r\n ['25 - 5 * 4 = ?', 5],\r\n [\"18 + 45 / 5 - 3 * 2 = ?\", 21],\r\n [\"5! = ?\", 120],\r\n [\"3! + 2! = ?\", 8],\r\n [\"7 + 5! / 4! - 6 / 3 = ?\", 10],\r\n [\"(25 + 5) / 6 * 4 = ?\", 20],\r\n [\"4(3+c)+c=c+4; c=?\", -2],\r\n [\"​√​121 = ?\" ,11],\r\n [\"x = √​81 - √​64; x= ?\", 1],\r\n [\"x + y = 20; x - y = 4; y = ?\", 8]]\r\n\r\nNormal = [[\"8(10−k)=2k; k = ?\", 8],\r\n [\"−4n−8=4(−3n+2); n=?\", 2],\r\n [\"4(3+c)+c=c+4; c=?\", -2],\r\n [\"​√​121 = ?\" ,11],\r\n [\"x = √​81 - √​64; x= ?\", 1],\r\n [\"y = √​16 * √​4 / √​9; y=?\", 2],\r\n [\"y−3=2(x+1); x= -2, y=?\", 1],\r\n [\" y*y = 4x/5 − 11; y= 5, x = ?\", 45],\r\n [\"How many unique ways are there to arrange the letters in the word CANNON?\", 120],\r\n [\"How many numbers between 1 and 100(inclusive) are divisible by 10 or 7\", 23],\r\n [\"y=−4x+6; ​3x+4y=-2 ; x=?\", 2],\r\n [\"−x+4y=−9; ​y=−2x-9; y=?\", -3]]\r\n\r\nHard = [[\"Emily is packing her bags for her vacation. She has 6 unique Fabergé eggs, but only 3 fit in her bag. How many different groups of 3 Fabergé eggs can she take?\", 20],\r\n [\"You just got a free ticket for a boat ride, and you can bring along 2 friends! Unfortunately, you have 5 friends who want to come along. How many different groups of friends could you take with you?\", 10],\r\n [\"Omar is packing his bags for his vacation. He has 999 unique shirts, but only 5 fit in his bag. How many different groups of 5 shirts can he take?\", 126],\r\n [\"How many numbers between 1 and 100(inclusive) are divisible by 3 or 2?\" ,67],\r\n [\"You need to put your reindeer, Gloopin, Quentin, Ezekiel, and Lancer, in a single-file line to pull your sleigh. However, Quentin and Gloopin are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?\", 12],\r\n [\"You need to put your reindeer, Gloopin, Balthazar, Bloopin, Prancer, and Quentin, in a single-file line to pull your sleigh. However, Prancer and Balthazar are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?\", 2],\r\n [\"y−3=2(x+1); x= -2, y=?\", 48],\r\n [\"How many unique ways are there to arrange the letters in the word CANNON?\", 120],\r\n [\"How many numbers between 1 and 100(inclusive) are divisible by 10 or 7\", 23],\r\n [\"−x+4y=−9; ​y=−2x-9; y=?\", -3],\r\n [\"x = √​81 - √​64; x= ?\", 1],\r\n [\"y = √​16 * √​4 / √​9; y=?\", 2],\r\n [\"y−3=2(x+1); x= -2, y=?\", 1],\r\n [\" y*y = 4x/5 − 11; y= 5, x = ?\", 45],\r\n [\"y=−4x+6; ​3x+4y=-2 ; x=?\", 2],\r\n [\"−x+4y=−9; ​y=−2x-9; y=?\", -3]]\r\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
<|reserved_special_token_0|> class NearestStudents(Task): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def output(self): return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv') def requires(self): return {'data': HashedStudentData(path= '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'), 'embedStudentData': EmbedStudentData(path= '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')} def run(self): vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb') vectors_lookup = pickle.load(vectors_lookup_bytes) vecs_list = pd.Series(vectors_lookup) vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index) vectors_df.columns = ['vectors'] print('##### vectors_df : ', vectors_df) print(' vectors_df shape is :: ', vectors_df.shape) print('github_id param : ', self.github_id) pd_xls_data = pd.read_excel(self.input()['data'].path, 0) idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id] my_vec = vectors_df.iloc[[idx.values[0]]] self.my_vec = my_vec.values[0][0] print('my_vec : ', self.my_vec) print(' my_vec shape is :: ', self.my_vec.shape) distances = vectors_df['vectors'].apply(self.my_distance) sortedDistance = distances.sort_values() print('###### sortedDistance : ', sortedDistance) f = self.output().open('w') sortedDistance.str[0].to_csv(f) f.close() nearDis = sortedDistance.head(self.n).index print('******** Nearest**********') for index in nearDis: print(pd_xls_data.iloc[index]) farDis = sortedDistance.tail(5).index print('******** Farthest**********') for index in farDis: print(pd_xls_data.iloc[index]) def cosine_similarity(self, a, b): dot_product = npy.dot(a[0], b.T) norm_a = npy.linalg.norm(a) norm_b = npy.linalg.norm(b) return dot_product / (norm_a * norm_b) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class NearestStudents(Task): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def output(self): return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv') def requires(self): return {'data': HashedStudentData(path= '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'), 'embedStudentData': EmbedStudentData(path= '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')} def run(self): vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb') vectors_lookup = pickle.load(vectors_lookup_bytes) vecs_list = pd.Series(vectors_lookup) vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index) vectors_df.columns = ['vectors'] print('##### vectors_df : ', vectors_df) print(' vectors_df shape is :: ', vectors_df.shape) print('github_id param : ', self.github_id) pd_xls_data = pd.read_excel(self.input()['data'].path, 0) idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id] my_vec = vectors_df.iloc[[idx.values[0]]] self.my_vec = my_vec.values[0][0] print('my_vec : ', self.my_vec) print(' my_vec shape is :: ', self.my_vec.shape) distances = vectors_df['vectors'].apply(self.my_distance) sortedDistance = distances.sort_values() print('###### sortedDistance : ', sortedDistance) f = self.output().open('w') sortedDistance.str[0].to_csv(f) f.close() nearDis = sortedDistance.head(self.n).index print('******** Nearest**********') for index in nearDis: print(pd_xls_data.iloc[index]) farDis = sortedDistance.tail(5).index print('******** Farthest**********') for index in farDis: print(pd_xls_data.iloc[index]) def cosine_similarity(self, a, b): dot_product = npy.dot(a[0], b.T) norm_a = npy.linalg.norm(a) norm_b = npy.linalg.norm(b) return dot_product / (norm_a * norm_b) def my_distance(self, vec1): return 1 - self.cosine_similarity(vec1, self.my_vec) <|reserved_special_token_1|> <|reserved_special_token_0|> class NearestStudents(Task): github_id = Parameter(default='b280302a', description= 'Github id to search nearby (not hashed)') n = IntParameter(default=5, description='Output top N') farthest = BoolParameter(default=False, description='Find farthest instead' ) def output(self): return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv') def requires(self): return {'data': HashedStudentData(path= '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'), 'embedStudentData': EmbedStudentData(path= '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')} def run(self): vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb') vectors_lookup = pickle.load(vectors_lookup_bytes) vecs_list = pd.Series(vectors_lookup) vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index) vectors_df.columns = ['vectors'] print('##### vectors_df : ', vectors_df) print(' vectors_df shape is :: ', vectors_df.shape) print('github_id param : ', self.github_id) pd_xls_data = pd.read_excel(self.input()['data'].path, 0) idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id] my_vec = vectors_df.iloc[[idx.values[0]]] self.my_vec = my_vec.values[0][0] print('my_vec : ', self.my_vec) print(' my_vec shape is :: ', self.my_vec.shape) distances = vectors_df['vectors'].apply(self.my_distance) sortedDistance = distances.sort_values() print('###### sortedDistance : ', sortedDistance) f = self.output().open('w') sortedDistance.str[0].to_csv(f) f.close() nearDis = sortedDistance.head(self.n).index print('******** Nearest**********') for index in nearDis: print(pd_xls_data.iloc[index]) farDis = sortedDistance.tail(5).index print('******** Farthest**********') for index in farDis: print(pd_xls_data.iloc[index]) def cosine_similarity(self, a, b): dot_product = npy.dot(a[0], b.T) norm_a = npy.linalg.norm(a) norm_b = npy.linalg.norm(b) return dot_product / (norm_a * norm_b) def my_distance(self, vec1): return 1 - self.cosine_similarity(vec1, self.my_vec) <|reserved_special_token_1|> import luigi from luigi import * import pandas as pd from pset.tasks.embeddings.load_embeding import EmbedStudentData from pset.tasks.data.load_dataset import HashedStudentData import numpy as npy import pickle import os class NearestStudents(Task): github_id = Parameter(default='b280302a', description= 'Github id to search nearby (not hashed)') n = IntParameter(default=5, description='Output top N') farthest = BoolParameter(default=False, description='Find farthest instead' ) def output(self): return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv') def requires(self): return {'data': HashedStudentData(path= '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'), 'embedStudentData': EmbedStudentData(path= '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')} def run(self): vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb') vectors_lookup = pickle.load(vectors_lookup_bytes) vecs_list = pd.Series(vectors_lookup) vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index) vectors_df.columns = ['vectors'] print('##### vectors_df : ', vectors_df) print(' vectors_df shape is :: ', vectors_df.shape) print('github_id param : ', self.github_id) pd_xls_data = pd.read_excel(self.input()['data'].path, 0) idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id] my_vec = vectors_df.iloc[[idx.values[0]]] self.my_vec = my_vec.values[0][0] print('my_vec : ', self.my_vec) print(' my_vec shape is :: ', self.my_vec.shape) distances = vectors_df['vectors'].apply(self.my_distance) sortedDistance = distances.sort_values() print('###### sortedDistance : ', sortedDistance) f = self.output().open('w') sortedDistance.str[0].to_csv(f) f.close() nearDis = sortedDistance.head(self.n).index print('******** Nearest**********') for index in nearDis: print(pd_xls_data.iloc[index]) farDis = sortedDistance.tail(5).index print('******** Farthest**********') for index in farDis: print(pd_xls_data.iloc[index]) def cosine_similarity(self, a, b): dot_product = npy.dot(a[0], b.T) norm_a = npy.linalg.norm(a) norm_b = npy.linalg.norm(b) return dot_product / (norm_a * norm_b) def my_distance(self, vec1): return 1 - self.cosine_similarity(vec1, self.my_vec) <|reserved_special_token_1|> # -*- coding: utf-8 -*- import luigi from luigi import * #from luigi import Task import pandas as pd from pset.tasks.embeddings.load_embeding import EmbedStudentData from pset.tasks.data.load_dataset import HashedStudentData import numpy as npy import pickle import os class NearestStudents(Task): github_id = Parameter(default='b280302a', description='Github id to search nearby (not hashed)') n = IntParameter(default=5, description='Output top N') farthest = BoolParameter(default=False, description='Find farthest instead') def output(self): return luigi.LocalTarget("/Users/adcxdpf/Downloads/pset_03/sd.csv") def requires(self): return { 'data': HashedStudentData(path='/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'), 'embedStudentData': EmbedStudentData(path='/Users/adcxdpf/Downloads/pset_03/pset/tasks/data') } #return self.clone(EmbedStudentData) def run(self): vectors_lookup_bytes = (self.input()['embedStudentData'].open(mode='rb')) vectors_lookup = pickle.load(vectors_lookup_bytes) vecs_list = pd.Series(vectors_lookup) vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index) vectors_df.columns = ['vectors'] print('##### vectors_df : ', vectors_df) print(" vectors_df shape is :: " , vectors_df.shape) print("github_id param : " , self.github_id) pd_xls_data = pd.read_excel(self.input()['data'].path,0) idx = pd_xls_data.index[pd_xls_data['hashed_id']== self.github_id] #print ('######## idx.values ######### ', idx.values) my_vec = vectors_df.iloc[[idx.values[0]]] self.my_vec = (my_vec.values[0][0]) print ("my_vec : " , self.my_vec) print(" my_vec shape is :: " , self.my_vec.shape) distances = vectors_df['vectors'].apply(self.my_distance) sortedDistance= distances.sort_values() print('###### sortedDistance : ', sortedDistance) # output data f = self.output().open('w') sortedDistance.str[0].to_csv(f) #df.to_csv(f, sep='\t', encoding='utf-8', index=None) f.close() nearDis= sortedDistance.head(self.n).index print ("******** Nearest**********") for index in nearDis: print(pd_xls_data.iloc[index]) farDis = sortedDistance.tail(5).index print ("******** Farthest**********") for index in farDis: print(pd_xls_data.iloc[index]) def cosine_similarity(self,a, b): # """Takes 2 vectors a, b and returns the cosine similarity according # to the definition of the dot product # """ # dot_product = npy.dot(a, b) # norm_a = npy.linalg.norm(a) # norm_b = npy.linalg.norm(b) # return dot_product / (norm_a * norm_b) dot_product = npy.dot(a[0], b.T) norm_a = npy.linalg.norm(a) norm_b = npy.linalg.norm(b) return dot_product / (norm_a * norm_b) def my_distance(self,vec1): return 1 - self.cosine_similarity(vec1, self.my_vec)
flexible
{ "blob_id": "15eed401728e07bfe9299edd12add43ad8b9cb71", "index": 3802, "step-1": "<mask token>\n\n\nclass NearestStudents(Task):\n <mask token>\n <mask token>\n <mask token>\n\n def output(self):\n return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv')\n\n def requires(self):\n return {'data': HashedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')}\n\n def run(self):\n vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb')\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(' vectors_df shape is :: ', vectors_df.shape)\n print('github_id param : ', self.github_id)\n pd_xls_data = pd.read_excel(self.input()['data'].path, 0)\n idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id]\n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = my_vec.values[0][0]\n print('my_vec : ', self.my_vec)\n print(' my_vec shape is :: ', self.my_vec.shape)\n distances = vectors_df['vectors'].apply(self.my_distance)\n sortedDistance = distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n f.close()\n nearDis = sortedDistance.head(self.n).index\n print('******** Nearest**********')\n for index in nearDis:\n print(pd_xls_data.iloc[index])\n farDis = sortedDistance.tail(5).index\n print('******** Farthest**********')\n for index in farDis:\n print(pd_xls_data.iloc[index])\n\n def cosine_similarity(self, a, b):\n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n return dot_product / (norm_a * norm_b)\n <mask token>\n", "step-2": "<mask token>\n\n\nclass NearestStudents(Task):\n <mask token>\n <mask token>\n <mask token>\n\n def output(self):\n return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv')\n\n def requires(self):\n return {'data': HashedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')}\n\n def run(self):\n vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb')\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(' vectors_df shape is :: ', vectors_df.shape)\n print('github_id param : ', self.github_id)\n pd_xls_data = pd.read_excel(self.input()['data'].path, 0)\n idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id]\n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = my_vec.values[0][0]\n print('my_vec : ', self.my_vec)\n print(' my_vec shape is :: ', self.my_vec.shape)\n distances = vectors_df['vectors'].apply(self.my_distance)\n sortedDistance = distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n f.close()\n nearDis = sortedDistance.head(self.n).index\n print('******** Nearest**********')\n for index in nearDis:\n print(pd_xls_data.iloc[index])\n farDis = sortedDistance.tail(5).index\n print('******** Farthest**********')\n for index in farDis:\n print(pd_xls_data.iloc[index])\n\n def cosine_similarity(self, a, b):\n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n return dot_product / (norm_a * norm_b)\n\n def my_distance(self, vec1):\n return 1 - self.cosine_similarity(vec1, self.my_vec)\n", "step-3": "<mask token>\n\n\nclass NearestStudents(Task):\n github_id = Parameter(default='b280302a', description=\n 'Github id to search nearby (not hashed)')\n n = IntParameter(default=5, description='Output top N')\n farthest = BoolParameter(default=False, description='Find farthest instead'\n )\n\n def output(self):\n return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv')\n\n def requires(self):\n return {'data': HashedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')}\n\n def run(self):\n vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb')\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(' vectors_df shape is :: ', vectors_df.shape)\n print('github_id param : ', self.github_id)\n pd_xls_data = pd.read_excel(self.input()['data'].path, 0)\n idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id]\n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = my_vec.values[0][0]\n print('my_vec : ', self.my_vec)\n print(' my_vec shape is :: ', self.my_vec.shape)\n distances = vectors_df['vectors'].apply(self.my_distance)\n sortedDistance = distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n f.close()\n nearDis = sortedDistance.head(self.n).index\n print('******** Nearest**********')\n for index in nearDis:\n print(pd_xls_data.iloc[index])\n farDis = sortedDistance.tail(5).index\n print('******** Farthest**********')\n for index in farDis:\n print(pd_xls_data.iloc[index])\n\n def cosine_similarity(self, a, b):\n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n return dot_product / (norm_a * norm_b)\n\n def my_distance(self, vec1):\n return 1 - self.cosine_similarity(vec1, self.my_vec)\n", "step-4": "import luigi\nfrom luigi import *\nimport pandas as pd\nfrom pset.tasks.embeddings.load_embeding import EmbedStudentData\nfrom pset.tasks.data.load_dataset import HashedStudentData\nimport numpy as npy\nimport pickle\nimport os\n\n\nclass NearestStudents(Task):\n github_id = Parameter(default='b280302a', description=\n 'Github id to search nearby (not hashed)')\n n = IntParameter(default=5, description='Output top N')\n farthest = BoolParameter(default=False, description='Find farthest instead'\n )\n\n def output(self):\n return luigi.LocalTarget('/Users/adcxdpf/Downloads/pset_03/sd.csv')\n\n def requires(self):\n return {'data': HashedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path=\n '/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')}\n\n def run(self):\n vectors_lookup_bytes = self.input()['embedStudentData'].open(mode='rb')\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(' vectors_df shape is :: ', vectors_df.shape)\n print('github_id param : ', self.github_id)\n pd_xls_data = pd.read_excel(self.input()['data'].path, 0)\n idx = pd_xls_data.index[pd_xls_data['hashed_id'] == self.github_id]\n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = my_vec.values[0][0]\n print('my_vec : ', self.my_vec)\n print(' my_vec shape is :: ', self.my_vec.shape)\n distances = vectors_df['vectors'].apply(self.my_distance)\n sortedDistance = distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n f.close()\n nearDis = sortedDistance.head(self.n).index\n print('******** Nearest**********')\n for index in nearDis:\n print(pd_xls_data.iloc[index])\n farDis = sortedDistance.tail(5).index\n print('******** Farthest**********')\n for index in farDis:\n print(pd_xls_data.iloc[index])\n\n def cosine_similarity(self, a, b):\n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n return dot_product / (norm_a * norm_b)\n\n def my_distance(self, vec1):\n return 1 - self.cosine_similarity(vec1, self.my_vec)\n", "step-5": "# -*- coding: utf-8 -*-\n\nimport luigi\nfrom luigi import *\n#from luigi import Task\nimport pandas as pd\nfrom pset.tasks.embeddings.load_embeding import EmbedStudentData\nfrom pset.tasks.data.load_dataset import HashedStudentData\nimport numpy as npy\nimport pickle\nimport os\n\nclass NearestStudents(Task):\n\n github_id = Parameter(default='b280302a', description='Github id to search nearby (not hashed)')\n n = IntParameter(default=5, description='Output top N')\n farthest = BoolParameter(default=False, description='Find farthest instead')\n \n def output(self):\n return luigi.LocalTarget(\"/Users/adcxdpf/Downloads/pset_03/sd.csv\")\n \n\n def requires(self):\n return {\n 'data': HashedStudentData(path='/Users/adcxdpf/Downloads/pset_03/pset/tasks/data'),\n 'embedStudentData': EmbedStudentData(path='/Users/adcxdpf/Downloads/pset_03/pset/tasks/data')\n }\n #return self.clone(EmbedStudentData)\n\n\n def run(self):\n \n vectors_lookup_bytes = (self.input()['embedStudentData'].open(mode='rb'))\n vectors_lookup = pickle.load(vectors_lookup_bytes)\n\n vecs_list = pd.Series(vectors_lookup)\n vectors_df = pd.DataFrame(vectors_lookup, index=vecs_list.index)\n vectors_df.columns = ['vectors']\n print('##### vectors_df : ', vectors_df)\n print(\" vectors_df shape is :: \" , vectors_df.shape)\n \n print(\"github_id param : \" , self.github_id)\n \n pd_xls_data = pd.read_excel(self.input()['data'].path,0) \n idx = pd_xls_data.index[pd_xls_data['hashed_id']== self.github_id]\n #print ('######## idx.values ######### ', idx.values)\n \n my_vec = vectors_df.iloc[[idx.values[0]]]\n self.my_vec = (my_vec.values[0][0])\n \n print (\"my_vec : \" , self.my_vec)\n print(\" my_vec shape is :: \" , self.my_vec.shape)\n \n distances = vectors_df['vectors'].apply(self.my_distance)\n \n sortedDistance= distances.sort_values()\n print('###### sortedDistance : ', sortedDistance)\n \n # output data\n f = self.output().open('w')\n sortedDistance.str[0].to_csv(f)\n #df.to_csv(f, sep='\\t', encoding='utf-8', index=None)\n f.close() \n \n nearDis= sortedDistance.head(self.n).index\n print (\"******** Nearest**********\")\n for index in nearDis: \n print(pd_xls_data.iloc[index]) \n \n farDis = sortedDistance.tail(5).index\n print (\"******** Farthest**********\")\n for index in farDis: \n print(pd_xls_data.iloc[index]) \n \n\n\n def cosine_similarity(self,a, b):\n # \"\"\"Takes 2 vectors a, b and returns the cosine similarity according \n # to the definition of the dot product\n # \"\"\"\n # dot_product = npy.dot(a, b)\n # norm_a = npy.linalg.norm(a)\n # norm_b = npy.linalg.norm(b)\n # return dot_product / (norm_a * norm_b)\n \n \n dot_product = npy.dot(a[0], b.T)\n norm_a = npy.linalg.norm(a)\n norm_b = npy.linalg.norm(b)\n \n return dot_product / (norm_a * norm_b)\n \n\n def my_distance(self,vec1):\n \n return 1 - self.cosine_similarity(vec1, self.my_vec)\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
species( label = 'C=C([CH]C)C(=C)[CH]C(24182)', structure = SMILES('[CH2]C(=CC)C([CH2])=CC'), E0 = (249.687,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')), HinderedRotor(inertia=(0.735277,'amu*angstrom^2'), symmetry=1, barrier=(16.9055,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.0632434,'amu*angstrom^2'), symmetry=1, barrier=(29.514,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.737545,'amu*angstrom^2'), symmetry=1, barrier=(16.9576,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.732781,'amu*angstrom^2'), symmetry=1, barrier=(16.8481,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.739219,'amu*angstrom^2'), symmetry=1, barrier=(16.9961,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.384005,0.0840749,-5.09991e-05,5.50851e-09,4.14197e-12,30198.9,28.4131], Tmin=(100,'K'), Tmax=(1039.09,'K')), NASAPolynomial(coeffs=[18.1326,0.0354522,-1.35159e-05,2.44392e-09,-1.69358e-13,25127.7,-67.5143], Tmin=(1039.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(249.687,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_P)"""), ) species( label = 'CH3CHCCH2(18175)', structure = SMILES('C=C=CC'), E0 = (145.615,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,540,610,2055,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655],'cm^-1')), HinderedRotor(inertia=(0.759584,'amu*angstrom^2'), symmetry=1, barrier=(17.4643,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (54.0904,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(2996.71,'J/mol'), sigma=(5.18551,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.74635,0.0218189,8.22353e-06,-2.14768e-08,8.55624e-12,17563.6,12.7381], Tmin=(100,'K'), Tmax=(1025.6,'K')), NASAPolynomial(coeffs=[6.82078,0.0192338,-7.45622e-06,1.36536e-09,-9.53195e-14,16028,-10.4333], Tmin=(1025.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(145.615,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(228.648,'J/(mol*K)'), label="""CH3CHCCH2""", comment="""Thermo library: DFT_QCI_thermo"""), ) species( label = '[CH2]C1([CH]C)CC1=CC(25275)', structure = SMILES('[CH2]C1([CH]C)CC1=CC'), E0 = (462.221,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.263258,0.0692237,-2.26363e-05,-1.35463e-08,8.13734e-12,55737.7,31.4039], Tmin=(100,'K'), Tmax=(1105.46,'K')), NASAPolynomial(coeffs=[15.171,0.0400578,-1.66801e-05,3.13624e-09,-2.2049e-13,50927.8,-48.8594], Tmin=(1105.46,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(462.221,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)"""), ) species( label = 'C=[C][CH]C(18176)', structure = SMILES('[CH2][C]=CC'), E0 = (361.056,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655],'cm^-1')), HinderedRotor(inertia=(0.352622,'amu*angstrom^2'), symmetry=1, barrier=(8.10748,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.828631,'amu*angstrom^2'), symmetry=1, barrier=(19.0519,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (54.0904,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.42015,0.030446,-1.69076e-05,4.64684e-09,-5.12013e-13,43485.7,14.8304], Tmin=(100,'K'), Tmax=(2065.83,'K')), NASAPolynomial(coeffs=[10.7464,0.014324,-5.20136e-06,8.69079e-10,-5.48385e-14,40045.6,-31.3799], Tmin=(2065.83,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(361.056,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)"""), ) species( label = '[CH2]C(=CC)C(C)=[C]C(25412)', structure = SMILES('[CH2]C(=CC)C(C)=[C]C'), E0 = (336.03,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')), HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)"""), ) species( label = '[CH2]C(=[C]C)C(C)=CC(25413)', structure = SMILES('[CH2]C(=[C]C)C(C)=CC'), E0 = (336.03,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')), HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Cds_S)"""), ) species( label = '[CH2]C(=CC)[C](C)C=C(24605)', structure = SMILES('[CH2]C=C(C)C([CH2])=CC'), E0 = (216.244,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')), HinderedRotor(inertia=(0.712083,'amu*angstrom^2'), symmetry=1, barrier=(16.3722,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.555659,'amu*angstrom^2'), symmetry=1, barrier=(96.3851,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.0202512,'amu*angstrom^2'), symmetry=1, barrier=(16.3711,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.712008,'amu*angstrom^2'), symmetry=1, barrier=(16.3705,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(4.19211,'amu*angstrom^2'), symmetry=1, barrier=(96.3849,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)"""), ) species( label = '[CH2][C](C=C)C(C)=CC(24606)', structure = SMILES('[CH2]C=C([CH2])C(C)=CC'), E0 = (216.244,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)"""), ) species( label = '[CH2]C(=CC)[C]1CC1C(25414)', structure = SMILES('[CH2]C(=CC)[C]1CC1C'), E0 = (289.9,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.71289,0.0520158,3.84829e-05,-8.55933e-08,3.61457e-11,35003.5,26.4903], Tmin=(100,'K'), Tmax=(968.714,'K')), NASAPolynomial(coeffs=[16.7686,0.0352996,-1.24057e-05,2.26286e-09,-1.62921e-13,29566.5,-62.466], Tmin=(968.714,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(289.9,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Cyclopropane) + radical(Allyl_T) + radical(Allyl_P)"""), ) species( label = '[CH2][C]1C(=CC)CC1C(25415)', structure = SMILES('[CH2]C1=C([CH]C)CC1C'), E0 = (304.572,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.583091,0.0531885,4.0938e-05,-9.08388e-08,3.83549e-11,36774.2,26.4705], Tmin=(100,'K'), Tmax=(972.301,'K')), NASAPolynomial(coeffs=[18.2947,0.0339462,-1.21014e-05,2.24934e-09,-1.64353e-13,30795.4,-71.5147], Tmin=(972.301,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(304.572,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_S)"""), ) species( label = 'CH2(S)(23)', structure = SMILES('[CH2]'), E0 = (419.862,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')), ], spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (14.0266,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: Klippenstein_Glarborg2016"""), ) species( label = '[CH2]C(=C)C([CH2])=CC(25416)', structure = SMILES('[CH2]C(=C)C([CH2])=CC'), E0 = (285.713,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,3010,987.5,1337.5,450,1655,311.383],'cm^-1')), HinderedRotor(inertia=(0.327475,'amu*angstrom^2'), symmetry=1, barrier=(22.5291,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.327466,'amu*angstrom^2'), symmetry=1, barrier=(22.5294,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.327318,'amu*angstrom^2'), symmetry=1, barrier=(22.5272,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.327483,'amu*angstrom^2'), symmetry=1, barrier=(22.5297,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (94.1543,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.335271,0.0676667,-2.76626e-05,-1.62749e-08,1.21982e-11,34506.8,24.024], Tmin=(100,'K'), Tmax=(980.594,'K')), NASAPolynomial(coeffs=[17.5531,0.0266059,-9.47854e-06,1.70194e-09,-1.19937e-13,29727.4,-65.8563], Tmin=(980.594,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(285.713,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Allyl_P)"""), ) species( label = 'C=C([CH]C)C[C]=CC(24184)', structure = SMILES('[CH2]C(=CC)C[C]=CC'), E0 = (366.985,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1685,370,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,579.702],'cm^-1')), HinderedRotor(inertia=(0.147406,'amu*angstrom^2'), symmetry=1, barrier=(3.38916,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.64226,'amu*angstrom^2'), symmetry=1, barrier=(14.7668,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.64164,'amu*angstrom^2'), symmetry=1, barrier=(14.7526,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.643937,'amu*angstrom^2'), symmetry=1, barrier=(14.8054,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.145327,'amu*angstrom^2'), symmetry=1, barrier=(3.34136,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (108.181,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(3683.66,'J/mol'), sigma=(6.4482,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=575.38 K, Pc=31.18 bar (from Joback method)"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29648,0.0786067,-5.42868e-05,1.96375e-08,-2.97459e-12,44273.2,31.2372], Tmin=(100,'K'), Tmax=(1490.43,'K')), NASAPolynomial(coeffs=[13.9025,0.0420909,-1.75363e-05,3.199e-09,-2.17227e-13,40217.5,-39.8334], Tmin=(1490.43,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(366.985,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)"""), ) species( label = 'CC=C1CCC1=CC(25269)', structure = SMILES('CC=C1CCC1=CC'), E0 = (114.107,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.677799,0.0585738,5.80411e-06,-4.1598e-08,1.78951e-11,13856,25.5085], Tmin=(100,'K'), Tmax=(1034.79,'K')), NASAPolynomial(coeffs=[13.4814,0.0415234,-1.65073e-05,3.07348e-09,-2.16896e-13,9469.28,-45.0922], Tmin=(1034.79,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(114.107,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(12methylenecyclobutane)"""), ) species( label = 'CH2(19)', structure = SMILES('[CH2]'), E0 = (381.563,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (14.0266,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2""", comment="""Thermo library: Klippenstein_Glarborg2016"""), ) species( label = '[CH2]C([C]=CC)=CC(25417)', structure = SMILES('[CH2]C([C]=CC)=CC'), E0 = (334.774,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([350,440,435,1725,1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')), HinderedRotor(inertia=(0.7606,'amu*angstrom^2'), symmetry=1, barrier=(17.4877,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.760854,'amu*angstrom^2'), symmetry=1, barrier=(17.4935,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.760586,'amu*angstrom^2'), symmetry=1, barrier=(17.4874,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(2.15146,'amu*angstrom^2'), symmetry=1, barrier=(49.4663,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (94.1543,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.352604,0.0734369,-5.91187e-05,2.57941e-08,-4.60694e-12,40400.9,25.1788], Tmin=(100,'K'), Tmax=(1327.42,'K')), NASAPolynomial(coeffs=[14.2321,0.0316126,-1.18565e-05,2.05761e-09,-1.36512e-13,36716.1,-45.7131], Tmin=(1327.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(334.774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + radical(C=CJC=C) + radical(Allyl_P)"""), ) species( label = '[CH2]C1([CH]C)C(=C)C1C(25296)', structure = SMILES('[CH2]C1([CH]C)C(=C)C1C'), E0 = (466.494,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29276,0.0655305,-4.50464e-06,-3.74661e-08,1.7759e-11,56253.7,30.0992], Tmin=(100,'K'), Tmax=(1027.4,'K')), NASAPolynomial(coeffs=[16.6435,0.0372633,-1.49065e-05,2.81296e-09,-2.01072e-13,51026,-58.316], Tmin=(1027.4,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(466.494,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)"""), ) species( label = 'H(3)', structure = SMILES('[H]'), E0 = (211.792,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (1.00794,'amu'), collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""), ) species( label = '[CH2]C(=CC)C(=C)C=C(24604)', structure = SMILES('[CH2]C(=CC)C(=C)C=C'), E0 = (242.677,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,181.962,683.313],'cm^-1')), HinderedRotor(inertia=(0.669842,'amu*angstrom^2'), symmetry=1, barrier=(19.1337,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.0582339,'amu*angstrom^2'), symmetry=1, barrier=(19.1767,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.83204,'amu*angstrom^2'), symmetry=1, barrier=(19.1302,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(4.52237,'amu*angstrom^2'), symmetry=1, barrier=(104.569,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 2, opticalIsomers = 1, molecularWeight = (107.173,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.293043,0.0682771,-2.00337e-05,-2.05401e-08,1.21516e-11,29332.3,27.0261], Tmin=(100,'K'), Tmax=(1018.57,'K')), NASAPolynomial(coeffs=[15.7386,0.0358123,-1.37404e-05,2.51366e-09,-1.76142e-13,24723.4,-54.9529], Tmin=(1018.57,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(242.677,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Allyl_P)"""), ) species( label = '[CH2]CC(=C)C([CH2])=CC(25418)', structure = SMILES('[CH2]CC(=C)C([CH2])=CC'), E0 = (316.814,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,180,180],'cm^-1')), HinderedRotor(inertia=(0.0368535,'amu*angstrom^2'), symmetry=1, barrier=(17.9864,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.00736317,'amu*angstrom^2'), symmetry=1, barrier=(3.60618,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.781153,'amu*angstrom^2'), symmetry=1, barrier=(17.9602,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.779478,'amu*angstrom^2'), symmetry=1, barrier=(17.9217,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.781104,'amu*angstrom^2'), symmetry=1, barrier=(17.9591,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.348925,0.0836004,-5.1879e-05,7.14877e-09,3.44908e-12,38270.9,31.5928], Tmin=(100,'K'), Tmax=(1044.14,'K')), NASAPolynomial(coeffs=[17.9255,0.0352115,-1.34219e-05,2.42456e-09,-1.67785e-13,33276.3,-63.0036], Tmin=(1044.14,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(316.814,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(Allyl_P)"""), ) species( label = '[CH]=C(CC)C([CH2])=CC(25419)', structure = SMILES('[CH]=C(CC)C([CH2])=CC'), E0 = (358.664,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([3120,650,792.5,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180],'cm^-1')), HinderedRotor(inertia=(0.701639,'amu*angstrom^2'), symmetry=1, barrier=(16.1321,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.344302,'amu*angstrom^2'), symmetry=1, barrier=(16.1602,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.0492932,'amu*angstrom^2'), symmetry=1, barrier=(16.1378,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.702005,'amu*angstrom^2'), symmetry=1, barrier=(16.1405,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.702379,'amu*angstrom^2'), symmetry=1, barrier=(16.1491,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.468616,0.0864938,-5.84569e-05,1.27697e-08,1.75707e-12,43308.4,30.6389], Tmin=(100,'K'), Tmax=(1047.28,'K')), NASAPolynomial(coeffs=[18.4195,0.034593,-1.31104e-05,2.35762e-09,-1.62637e-13,38242.2,-66.6572], Tmin=(1047.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(358.664,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_P)"""), ) species( label = '[CH2]C(=[C]C)C(=C)CC(25420)', structure = SMILES('[CH2]C(=[C]C)C(=C)CC'), E0 = (349.41,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,180],'cm^-1')), HinderedRotor(inertia=(0.159905,'amu*angstrom^2'), symmetry=1, barrier=(15.9368,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.693159,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.693127,'amu*angstrom^2'), symmetry=1, barrier=(15.9364,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.693165,'amu*angstrom^2'), symmetry=1, barrier=(15.9372,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.0150632,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.583231,0.089245,-7.16619e-05,3.00631e-08,-5.07891e-12,42198.9,31.1306], Tmin=(100,'K'), Tmax=(1412.15,'K')), NASAPolynomial(coeffs=[19.0319,0.0336833,-1.2643e-05,2.20036e-09,-1.46165e-13,36659.1,-70.2702], Tmin=(1412.15,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(349.41,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)"""), ) species( label = '[CH]=C([CH]C)C(C)=CC(25421)', structure = SMILES('[CH]C(=CC)C(C)=CC'), E0 = (317.373,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,200,800,1200,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.247945,0.0873521,-6.16843e-05,2.31486e-08,-3.62747e-12,38328.8,29.1665], Tmin=(100,'K'), Tmax=(1460.93,'K')), NASAPolynomial(coeffs=[15.297,0.0447902,-1.7984e-05,3.20673e-09,-2.14924e-13,33786.8,-51.7212], Tmin=(1460.93,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(317.373,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)"""), ) species( label = '[CH2][C](C=C)C(=C)CC(24623)', structure = SMILES('[CH2]C(C=C)=C([CH2])CC'), E0 = (228.159,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0497728,0.0733281,-1.6094e-05,-3.35123e-08,1.88363e-11,27601.1,30.4448], Tmin=(100,'K'), Tmax=(975.095,'K')), NASAPolynomial(coeffs=[18.3695,0.0342638,-1.21408e-05,2.16747e-09,-1.52112e-13,22274,-66.8493], Tmin=(975.095,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(228.159,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CC=CCJ) + radical(Allyl_P)"""), ) species( label = 'C[CH][C]1CCC1=CC(25422)', structure = SMILES('C[CH]C1CCC=1[CH]C'), E0 = (303.292,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.788866,0.0500701,4.22235e-05,-8.64809e-08,3.53174e-11,36611.5,25.2586], Tmin=(100,'K'), Tmax=(987.239,'K')), NASAPolynomial(coeffs=[16.2187,0.0373502,-1.4111e-05,2.65357e-09,-1.92503e-13,31138.2,-61.2734], Tmin=(987.239,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(303.292,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_S) + radical(Allyl_S)"""), ) species( label = '[CH2][C]1C(=C)C(C)C1C(25423)', structure = SMILES('[CH2]C1=C([CH2])C(C)C1C'), E0 = (305.852,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.377097,0.0563026,3.9705e-05,-9.53284e-08,4.14811e-11,36937,26.2973], Tmin=(100,'K'), Tmax=(959.735,'K')), NASAPolynomial(coeffs=[20.4056,0.0304853,-1.006e-05,1.83774e-09,-1.35603e-13,30437.2,-83.3398], Tmin=(959.735,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(305.852,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_P)"""), ) species( label = 'C=CC(=C)C(C)=CC(24616)', structure = SMILES('C=CC(=C)C(C)=CC'), E0 = (91.1774,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.236638,0.0713806,-3.04205e-05,-5.26762e-09,5.54498e-12,11111.2,26.9518], Tmin=(100,'K'), Tmax=(1093.32,'K')), NASAPolynomial(coeffs=[14.1536,0.040705,-1.6104e-05,2.93544e-09,-2.02595e-13,6858.32,-46.9636], Tmin=(1093.32,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(91.1774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH)"""), ) species( label = 'C=[C]C(C)C(=C)[CH]C(24183)', structure = SMILES('[CH2]C(=CC)C(C)[C]=C'), E0 = (369.44,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1685,370,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,350,440,435,1725,3000,3100,440,815,1455,1000,345.333,347.343],'cm^-1')), HinderedRotor(inertia=(0.119405,'amu*angstrom^2'), symmetry=1, barrier=(9.93037,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.281457,'amu*angstrom^2'), symmetry=1, barrier=(24.022,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.116909,'amu*angstrom^2'), symmetry=1, barrier=(9.94809,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.117447,'amu*angstrom^2'), symmetry=1, barrier=(9.9744,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.116555,'amu*angstrom^2'), symmetry=1, barrier=(9.93684,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (108.181,'amu'), collisionModel = TransportData(shapeIndex=2, epsilon=(3625.33,'J/mol'), sigma=(6.4092,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=566.27 K, Pc=31.24 bar (from Joback method)"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.299693,0.0839308,-6.74533e-05,3.06742e-08,-6.02582e-12,44564.4,29.0122], Tmin=(100,'K'), Tmax=(1163.73,'K')), NASAPolynomial(coeffs=[10.857,0.0476425,-2.06788e-05,3.8782e-09,-2.69295e-13,42107.3,-23.5217], Tmin=(1163.73,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(369.44,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)"""), ) species( label = 'C=C1C(=CC)CC1C(25265)', structure = SMILES('C=C1C(=CC)CC1C'), E0 = (118.381,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.689924,0.0550304,2.3689e-05,-6.56265e-08,2.77602e-11,14372.8,24.9628], Tmin=(100,'K'), Tmax=(993.204,'K')), NASAPolynomial(coeffs=[15.3775,0.0380508,-1.43595e-05,2.66472e-09,-1.90565e-13,9375.16,-56.2678], Tmin=(993.204,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(118.381,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)"""), ) species( label = 'CHCH3(T)(95)', structure = SMILES('[CH]C'), E0 = (343.893,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,592.414,4000],'cm^-1')), HinderedRotor(inertia=(0.00438699,'amu*angstrom^2'), symmetry=1, barrier=(26.7685,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (28.0532,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.82363,-0.000909515,3.2138e-05,-3.7348e-08,1.3309e-11,41371.4,7.10948], Tmin=(100,'K'), Tmax=(960.812,'K')), NASAPolynomial(coeffs=[4.30487,0.00943069,-3.27559e-06,5.95121e-10,-4.27307e-14,40709.1,1.84202], Tmin=(960.812,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(343.893,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label="""CHCH3(T)""", comment="""Thermo library: DFT_QCI_thermo"""), ) species( label = '[CH2]C([C]=C)=CC(24774)', structure = SMILES('[CH2]C([C]=C)=CC'), E0 = (370.8,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,350,440,435,1725,3000,3100,440,815,1455,1000,180],'cm^-1')), HinderedRotor(inertia=(1.17315,'amu*angstrom^2'), symmetry=1, barrier=(26.9731,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(1.17496,'amu*angstrom^2'), symmetry=1, barrier=(27.0146,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(1.1727,'amu*angstrom^2'), symmetry=1, barrier=(26.9626,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (80.1277,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.0818,0.0569416,-3.56598e-05,4.1841e-09,3.20998e-12,44708.4,20.7527], Tmin=(100,'K'), Tmax=(982.69,'K')), NASAPolynomial(coeffs=[12.9204,0.0239405,-8.46845e-06,1.46434e-09,-9.91425e-14,41648.3,-39.886], Tmin=(982.69,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(370.8,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CJC=C) + radical(Allyl_P)"""), ) species( label = '[CH]=C([CH]C)C(=C)CC(25424)', structure = SMILES('[CH]C(=CC)C(=C)CC'), E0 = (330.753,'kJ/mol'), modes = [ HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,200,800,1066.67,1333.33,1600],'cm^-1')), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False), ], spinMultiplicity = 3, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.442166,0.0858934,-5.1432e-05,9.5936e-09,1.54315e-12,39950.3,30.9724], Tmin=(100,'K'), Tmax=(1106.5,'K')), NASAPolynomial(coeffs=[16.3579,0.0427111,-1.66841e-05,2.99222e-09,-2.04007e-13,35158.1,-56.633], Tmin=(1106.5,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(330.753,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(AllylJ2_triplet)"""), ) species( label = 'C=CC(=C)C(=C)CC(24630)', structure = SMILES('C=CC(=C)C(=C)CC'), E0 = (104.558,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.296747,0.0670054,-1.0269e-05,-3.13536e-08,1.59568e-11,12721.3,27.8384], Tmin=(100,'K'), Tmax=(1010.3,'K')), NASAPolynomial(coeffs=[15.6889,0.0379462,-1.44599e-05,2.64736e-09,-1.86033e-13,7984.11,-54.6302], Tmin=(1010.3,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(104.558,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH)"""), ) species( label = 'C=C1C(=C)C(C)C1C(25274)', structure = SMILES('C=C1C(=C)C(C)C1C'), E0 = (122.654,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (108.181,'amu'), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.691732,0.0515838,4.13669e-05,-8.96066e-08,3.77135e-11,14890,23.0693], Tmin=(100,'K'), Tmax=(969.873,'K')), NASAPolynomial(coeffs=[17.4573,0.0342784,-1.20439e-05,2.21718e-09,-1.61071e-13,9199.74,-69.8715], Tmin=(969.873,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(122.654,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)"""), ) species( label = 'N2', structure = SMILES('N#N'), E0 = (-8.69489,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (28.0135,'amu'), collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""), ) species( label = 'Ne', structure = SMILES('[Ne]'), E0 = (-6.19738,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, molecularWeight = (20.1797,'amu'), collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""), energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85), thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""), ) transitionState( label = 'TS1', E0 = (291.23,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS2', E0 = (462.221,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS3', E0 = (538.699,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS4', E0 = (497.951,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS5', E0 = (380.338,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS6', E0 = (399.474,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS7', E0 = (350.103,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS8', E0 = (722.113,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS9', E0 = (343.259,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS10', E0 = (380.132,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS11', E0 = (705.575,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS12', E0 = (537.022,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS13', E0 = (257.971,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS14', E0 = (716.337,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS15', E0 = (466.494,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS16', E0 = (454.469,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS17', E0 = (430.619,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS18', E0 = (503.849,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS19', E0 = (393.718,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS20', E0 = (361.682,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS21', E0 = (350.103,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS22', E0 = (380.132,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS23', E0 = (375.044,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS24', E0 = (274.66,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS25', E0 = (463.915,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS26', E0 = (257.971,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS27', E0 = (714.692,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS28', E0 = (375.062,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS29', E0 = (258.055,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) transitionState( label = 'TS30', E0 = (257.971,'kJ/mol'), spinMultiplicity = 1, opticalIsomers = 1, ) reaction( label = 'reaction1', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'], transitionState = 'TS1', kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(41.5431,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ] Euclidian distance = 0 family: 1,4_Linear_birad_scission Ea raised from 0.0 to 41.5 kJ/mol to match endothermicity of reaction."""), ) reaction( label = 'reaction2', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['[CH2]C1([CH]C)CC1=CC(25275)'], transitionState = 'TS2', kinetics = Arrhenius(A=(3.36e+09,'s^-1'), n=0.84, Ea=(212.534,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""Estimated using template [R4_S_D;doublebond_intra_HNd;radadd_intra_cs2H] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_cs2H] Euclidian distance = 2.0 Multiplied by reaction path degeneracy 2.0 family: Intra_R_Add_Exocyclic Ea raised from 210.2 to 212.5 kJ/mol to match endothermicity of reaction."""), ) reaction( label = 'reaction3', reactants = ['CH3CHCCH2(18175)', 'C=[C][CH]C(18176)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS3', kinetics = Arrhenius(A=(0.00086947,'m^3/(mol*s)'), n=2.67356, Ea=(32.0272,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Ca_Cds-HH;CJ] Euclidian distance = 0 family: R_Addition_MultipleBond"""), ) reaction( label = 'reaction4', reactants = ['[CH2]C(=CC)C(C)=[C]C(25412)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS4', kinetics = Arrhenius(A=(7.74e+09,'s^-1'), n=1.08, Ea=(161.921,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H Exact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H] Euclidian distance = 0 Multiplied by reaction path degeneracy 3.0 family: intra_H_migration"""), ) reaction( label = 'reaction5', reactants = ['[CH2]C(=[C]C)C(C)=CC(25413)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS5', kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_2H] Euclidian distance = 2.2360679775 Multiplied by reaction path degeneracy 3.0 family: intra_H_migration"""), ) reaction( label = 'reaction6', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['[CH2]C(=CC)[C](C)C=C(24605)'], transitionState = 'TS6', kinetics = Arrhenius(A=(1.6e+06,'s^-1'), n=1.81, Ea=(149.787,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 101 used for R4H_SDS;C_rad_out_2H;Cs_H_out_2H Exact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_2H] Euclidian distance = 0 Multiplied by reaction path degeneracy 6.0 family: intra_H_migration"""), ) reaction( label = 'reaction7', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['[CH2][C](C=C)C(C)=CC(24606)'], transitionState = 'TS7', kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 96 used for R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H Exact match found for rate rule [R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H] Euclidian distance = 0 Multiplied by reaction path degeneracy 6.0 family: intra_H_migration"""), ) reaction( label = 'reaction8', reactants = ['C=[C][CH]C(18176)', 'C=[C][CH]C(18176)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS8', kinetics = Arrhenius(A=(3.73038e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad] Euclidian distance = 0 family: R_Recombination Ea raised from -14.4 to 0 kJ/mol."""), ) reaction( label = 'reaction9', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['[CH2]C(=CC)[C]1CC1C(25414)'], transitionState = 'TS9', kinetics = Arrhenius(A=(7.36786e+12,'s^-1'), n=-0.105173, Ea=(93.5715,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R3_D;doublebond_intra;radadd_intra_cs2H] for rate rule [R3_D;doublebond_intra_secDe_HNd;radadd_intra_cs2H] Euclidian distance = 2.0 Multiplied by reaction path degeneracy 2.0 family: Intra_R_Add_Endocyclic"""), ) reaction( label = 'reaction10', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['[CH2][C]1C(=CC)CC1C(25415)'], transitionState = 'TS10', kinetics = Arrhenius(A=(6.43734e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: Intra_R_Add_Endocyclic"""), ) reaction( label = 'reaction11', reactants = ['CH2(S)(23)', '[CH2]C(=C)C([CH2])=CC(25416)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS11', kinetics = Arrhenius(A=(7.94e+13,'cm^3/(mol*s)','*|/',0.25), n=-0.324, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 4 used for carbene;Cd_pri Exact match found for rate rule [carbene;Cd_pri] Euclidian distance = 0 Multiplied by reaction path degeneracy 4.0 family: 1,2_Insertion_carbene Ea raised from -3.9 to 0 kJ/mol."""), ) reaction( label = 'reaction23', reactants = ['C=C([CH]C)C[C]=CC(24184)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS12', kinetics = Arrhenius(A=(1.74842e+09,'s^-1'), n=1.084, Ea=(170.038,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C] Euclidian distance = 1.0 family: 1,2_shiftC"""), ) reaction( label = 'reaction13', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['CC=C1CCC1=CC(25269)'], transitionState = 'TS13', kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""From training reaction 2 used for R4_SSS;C_rad_out_2H;Cpri_rad_out_2H Exact match found for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_2H] Euclidian distance = 0 family: Birad_recombination"""), ) reaction( label = 'reaction14', reactants = ['CH2(19)', '[CH2]C([C]=CC)=CC(25417)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS14', kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad] Euclidian distance = 3.0 family: Birad_R_Recombination Ea raised from -3.5 to 0 kJ/mol."""), ) reaction( label = 'reaction15', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['[CH2]C1([CH]C)C(=C)C1C(25296)'], transitionState = 'TS15', kinetics = Arrhenius(A=(6.72658e+10,'s^-1'), n=0.535608, Ea=(216.807,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R4_S_D;doublebond_intra;radadd_intra_csHNd] + [R4_S_D;doublebond_intra_HNd;radadd_intra_cs] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_csHNd] Euclidian distance = 2.2360679775 Multiplied by reaction path degeneracy 2.0 family: Intra_R_Add_Exocyclic Ea raised from 214.2 to 216.8 kJ/mol to match endothermicity of reaction."""), ) reaction( label = 'reaction16', reactants = ['H(3)', '[CH2]C(=CC)C(=C)C=C(24604)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS16', kinetics = Arrhenius(A=(2.31e+08,'cm^3/(mol*s)'), n=1.64, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 2544 used for Cds-HH_Cds-CdH;HJ Exact match found for rate rule [Cds-HH_Cds-CdH;HJ] Euclidian distance = 0 family: R_Addition_MultipleBond Ea raised from -2.0 to 0 kJ/mol."""), ) reaction( label = 'reaction17', reactants = ['[CH2]CC(=C)C([CH2])=CC(25418)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS17', kinetics = Arrhenius(A=(1.72e+06,'s^-1'), n=1.99, Ea=(113.805,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 84 used for R2H_S;C_rad_out_2H;Cs_H_out_H/Cd Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/Cd] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction18', reactants = ['[CH]=C(CC)C([CH2])=CC(25419)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS18', kinetics = Arrhenius(A=(1.846e+10,'s^-1'), n=0.74, Ea=(145.185,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 194 used for R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC Exact match found for rate rule [R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC] Euclidian distance = 0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction19', reactants = ['[CH2]C(=[C]C)C(=C)CC(25420)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS19', kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_H/NonDeC] Euclidian distance = 2.2360679775 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction20', reactants = ['[CH]=C([CH]C)C(C)=CC(25421)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS20', kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_2H] Euclidian distance = 1.0 Multiplied by reaction path degeneracy 3.0 family: intra_H_migration"""), ) reaction( label = 'reaction21', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['[CH2][C](C=C)C(=C)CC(24623)'], transitionState = 'TS21', kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H_SS(D)MS;C_rad_out_single;Cs_H_out_2H] for rate rule [R5H_SS(D)MS;C_rad_out_H/NonDeC;Cs_H_out_2H] Euclidian distance = 2.0 Multiplied by reaction path degeneracy 6.0 family: intra_H_migration"""), ) reaction( label = 'reaction22', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['C[CH][C]1CCC1=CC(25422)'], transitionState = 'TS22', kinetics = Arrhenius(A=(3.21867e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H] Euclidian distance = 0 family: Intra_R_Add_Endocyclic"""), ) reaction( label = 'reaction23', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['[CH2][C]1C(=C)C(C)C1C(25423)'], transitionState = 'TS23', kinetics = Arrhenius(A=(5.16207e+08,'s^-1'), n=0.911389, Ea=(125.357,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_csHCs] Euclidian distance = 0 family: Intra_R_Add_Endocyclic"""), ) reaction( label = 'reaction24', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['C=CC(=C)C(C)=CC(24616)'], transitionState = 'TS24', kinetics = Arrhenius(A=(1.27566e+10,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad] Euclidian distance = 1.0 Multiplied by reaction path degeneracy 6.0 family: Intra_Disproportionation"""), ) reaction( label = 'reaction24', reactants = ['C=[C]C(C)C(=C)[CH]C(24183)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS25', kinetics = Arrhenius(A=(8.66e+11,'s^-1'), n=0.438, Ea=(94.4747,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 5 used for cCs(-HC)CJ;CdsJ;C Exact match found for rate rule [cCs(-HC)CJ;CdsJ;C] Euclidian distance = 0 family: 1,2_shiftC"""), ) reaction( label = 'reaction26', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['C=C1C(=CC)CC1C(25265)'], transitionState = 'TS26', kinetics = Arrhenius(A=(3.24e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/NonDeC] Euclidian distance = 2.0 Multiplied by reaction path degeneracy 2.0 family: Birad_recombination"""), ) reaction( label = 'reaction27', reactants = ['CHCH3(T)(95)', '[CH2]C([C]=C)=CC(24774)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS27', kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad] Euclidian distance = 3.0 family: Birad_R_Recombination Ea raised from -3.5 to 0 kJ/mol."""), ) reaction( label = 'reaction28', reactants = ['[CH]=C([CH]C)C(=C)CC(25424)'], products = ['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState = 'TS28', kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC] Euclidian distance = 1.0 Multiplied by reaction path degeneracy 2.0 family: intra_H_migration"""), ) reaction( label = 'reaction29', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['C=CC(=C)C(=C)CC(24630)'], transitionState = 'TS29', kinetics = Arrhenius(A=(1.926e+10,'s^-1'), n=0.137, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad] Euclidian distance = 1.0 Multiplied by reaction path degeneracy 6.0 family: Intra_Disproportionation"""), ) reaction( label = 'reaction30', reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'], products = ['C=C1C(=C)C(C)C1C(25274)'], transitionState = 'TS30', kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_H/NonDeC] Euclidian distance = 2.82842712475 family: Birad_recombination"""), ) network( label = '4267', isomers = [ 'C=C([CH]C)C(=C)[CH]C(24182)', ], reactants = [ ('CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'), ], bathGas = { 'N2': 0.5, 'Ne': 0.5, }, ) pressureDependence( label = '4267', Tmin = (300,'K'), Tmax = (2000,'K'), Tcount = 8, Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'), Pmin = (0.01,'bar'), Pmax = (100,'bar'), Pcount = 5, Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'), maximumGrainSize = (0.5,'kcal/mol'), minimumGrainCount = 250, method = 'modified strong collision', interpolationModel = ('Chebyshev', 6, 4), activeKRotor = True, activeJRotor = True, rmgmode = True, )
normal
{ "blob_id": "63093190ee20e10698bd99dcea94ccf5d076a006", "index": 8921, "step-1": "<mask token>\n", "step-2": "species(label='C=C([CH]C)C(=C)[CH]C(24182)', structure=SMILES(\n '[CH2]C(=CC)C([CH2])=CC'), E0=(249.687, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700, \n 800, 1000, 1100, 1350, 1400, 900, 1100, 3000, 3033.33, 3066.67, 3100, \n 415, 465, 780, 850, 1435, 1475, 900, 1100, 2995, 3025, 975, 1000, 1300,\n 1375, 400, 500, 1630, 1680, 180], 'cm^-1')), HinderedRotor(inertia=(\n 0.735277, 'amu*angstrom^2'), symmetry=1, barrier=(16.9055, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.0632434,\n 'amu*angstrom^2'), symmetry=1, barrier=(29.514, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.737545, 'amu*angstrom^2'\n ), symmetry=1, barrier=(16.9576, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.732781, 'amu*angstrom^2'), symmetry=1, barrier\n =(16.8481, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.739219, 'amu*angstrom^2'), symmetry=1, barrier=(16.9961, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.384005, 0.0840749, -\n 5.09991e-05, 5.50851e-09, 4.14197e-12, 30198.9, 28.4131], Tmin=(100,\n 'K'), Tmax=(1039.09, 'K')), NASAPolynomial(coeffs=[18.1326, 0.0354522, \n -1.35159e-05, 2.44392e-09, -1.69358e-13, 25127.7, -67.5143], Tmin=(\n 1039.09, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(249.687, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_P)'\n ))\nspecies(label='CH3CHCCH2(18175)', structure=SMILES('C=C=CC'), E0=(145.615,\n 'kJ/mol'), modes=[HarmonicOscillator(frequencies=([2950, 3100, 1380, \n 975, 1025, 1650, 540, 610, 2055, 2750, 2800, 2850, 1350, 1500, 750, \n 1050, 1375, 1000, 3010, 987.5, 1337.5, 450, 1655], 'cm^-1')),\n HinderedRotor(inertia=(0.759584, 'amu*angstrom^2'), symmetry=1, barrier\n =(17.4643, 'kJ/mol'), semiclassical=False)], spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(54.0904, 'amu'), collisionModel=\n TransportData(shapeIndex=2, epsilon=(2996.71, 'J/mol'), sigma=(5.18551,\n 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(0, 'angstroms^3'\n ), rotrelaxcollnum=0, comment=\n 'Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)'\n ), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),\n T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=\n [2.74635, 0.0218189, 8.22353e-06, -2.14768e-08, 8.55624e-12, 17563.6, \n 12.7381], Tmin=(100, 'K'), Tmax=(1025.6, 'K')), NASAPolynomial(coeffs=[\n 6.82078, 0.0192338, -7.45622e-06, 1.36536e-09, -9.53195e-14, 16028, -\n 10.4333], Tmin=(1025.6, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax\n =(5000, 'K'), E0=(145.615, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf\n =(228.648, 'J/(mol*K)'), label='CH3CHCCH2', comment=\n 'Thermo library: DFT_QCI_thermo'))\nspecies(label='[CH2]C1([CH]C)CC1=CC(25275)', structure=SMILES(\n '[CH2]C1([CH]C)CC1=CC'), E0=(462.221, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.263258, 0.0692237,\n -2.26363e-05, -1.35463e-08, 8.13734e-12, 55737.7, 31.4039], Tmin=(100,\n 'K'), Tmax=(1105.46, 'K')), NASAPolynomial(coeffs=[15.171, 0.0400578, -\n 1.66801e-05, 3.13624e-09, -2.2049e-13, 50927.8, -48.8594], Tmin=(\n 1105.46, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(462.221, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)'\n ))\nspecies(label='C=[C][CH]C(18176)', structure=SMILES('[CH2][C]=CC'), E0=(\n 361.056, 'kJ/mol'), modes=[HarmonicOscillator(frequencies=([1685, 370, \n 2750, 2800, 2850, 1350, 1500, 750, 1050, 1375, 1000, 3000, 3100, 440, \n 815, 1455, 1000, 3010, 987.5, 1337.5, 450, 1655], 'cm^-1')),\n HinderedRotor(inertia=(0.352622, 'amu*angstrom^2'), symmetry=1, barrier\n =(8.10748, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.828631, 'amu*angstrom^2'), symmetry=1, barrier=(19.0519, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(54.0904, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[2.42015, 0.030446, -\n 1.69076e-05, 4.64684e-09, -5.12013e-13, 43485.7, 14.8304], Tmin=(100,\n 'K'), Tmax=(2065.83, 'K')), NASAPolynomial(coeffs=[10.7464, 0.014324, -\n 5.20136e-06, 8.69079e-10, -5.48385e-14, 40045.6, -31.3799], Tmin=(\n 2065.83, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(361.056, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(274.378,\n 'J/(mol*K)'), comment=\n 'Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)'))\nspecies(label='[CH2]C(=CC)C(C)=[C]C(25412)', structure=SMILES(\n '[CH2]C(=CC)C(C)=[C]C'), E0=(336.03, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 1685, 370, 2750, 2762.5, 2775, 2787.5, 2800, 2812.5, 2825, 2837.5,\n 2850, 1350, 1380, 1410, 1440, 1470, 1500, 700, 750, 800, 1000, 1050, \n 1100, 1350, 1375, 1400, 900, 1000, 1100, 3000, 3100, 440, 815, 1455, \n 1000, 3010, 987.5, 1337.5, 450, 1655, 222.04], 'cm^-1')), HinderedRotor\n (inertia=(0.395973, 'amu*angstrom^2'), symmetry=1, barrier=(13.8694,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.396086,\n 'amu*angstrom^2'), symmetry=1, barrier=(13.8683, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.395737, 'amu*angstrom^2'\n ), symmetry=1, barrier=(13.8691, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395039, 'amu*angstrom^2'), symmetry=1, barrier\n =(13.8689, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.395901, 'amu*angstrom^2'), symmetry=1, barrier=(13.8689, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365, 0.0876489, -\n 7.20737e-05, 3.21805e-08, -5.96317e-12, 40565.5, 28.3373], Tmin=(100,\n 'K'), Tmax=(1264.63, 'K')), NASAPolynomial(coeffs=[14.5979, 0.041109, -\n 1.68732e-05, 3.08148e-09, -2.10818e-13, 36843.8, -46.1055], Tmin=(\n 1264.63, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(336.03, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)'\n ))\nspecies(label='[CH2]C(=[C]C)C(C)=CC(25413)', structure=SMILES(\n '[CH2]C(=[C]C)C(C)=CC'), E0=(336.03, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 1685, 370, 2750, 2762.5, 2775, 2787.5, 2800, 2812.5, 2825, 2837.5,\n 2850, 1350, 1380, 1410, 1440, 1470, 1500, 700, 750, 800, 1000, 1050, \n 1100, 1350, 1375, 1400, 900, 1000, 1100, 3000, 3100, 440, 815, 1455, \n 1000, 3010, 987.5, 1337.5, 450, 1655, 222.04], 'cm^-1')), HinderedRotor\n (inertia=(0.395973, 'amu*angstrom^2'), symmetry=1, barrier=(13.8694,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.396086,\n 'amu*angstrom^2'), symmetry=1, barrier=(13.8683, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.395737, 'amu*angstrom^2'\n ), symmetry=1, barrier=(13.8691, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395039, 'amu*angstrom^2'), symmetry=1, barrier\n =(13.8689, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.395901, 'amu*angstrom^2'), symmetry=1, barrier=(13.8689, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365, 0.0876489, -\n 7.20737e-05, 3.21805e-08, -5.96317e-12, 40565.5, 28.3373], Tmin=(100,\n 'K'), Tmax=(1264.63, 'K')), NASAPolynomial(coeffs=[14.5979, 0.041109, -\n 1.68732e-05, 3.08148e-09, -2.10818e-13, 36843.8, -46.1055], Tmin=(\n 1264.63, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(336.03, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Cds_S)'\n ))\nspecies(label='[CH2]C(=CC)[C](C)C=C(24605)', structure=SMILES(\n '[CH2]C=C(C)C([CH2])=CC'), E0=(216.244, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700, \n 800, 1000, 1100, 1350, 1400, 900, 1100, 3000, 3033.33, 3066.67, 3100, \n 415, 465, 780, 850, 1435, 1475, 900, 1100, 2995, 3025, 975, 1000, 1300,\n 1375, 400, 500, 1630, 1680, 180], 'cm^-1')), HinderedRotor(inertia=(\n 0.712083, 'amu*angstrom^2'), symmetry=1, barrier=(16.3722, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.555659, 'amu*angstrom^2'\n ), symmetry=1, barrier=(96.3851, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0202512, 'amu*angstrom^2'), symmetry=1,\n barrier=(16.3711, 'kJ/mol'), semiclassical=False), HinderedRotor(\n inertia=(0.712008, 'amu*angstrom^2'), symmetry=1, barrier=(16.3705,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(4.19211,\n 'amu*angstrom^2'), symmetry=1, barrier=(96.3849, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175, 0.0775021, \n -3.58132e-05, -7.55711e-09, 8.27771e-12, 26166.1, 29.3215], Tmin=(100,\n 'K'), Tmax=(1017.17, 'K')), NASAPolynomial(coeffs=[16.4341, 0.0376674, \n -1.41425e-05, 2.53759e-09, -1.75328e-13, 21504.4, -57.0638], Tmin=(\n 1017.17, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(216.244, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)'\n ))\nspecies(label='[CH2][C](C=C)C(C)=CC(24606)', structure=SMILES(\n '[CH2]C=C([CH2])C(C)=CC'), E0=(216.244, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175, \n 0.0775021, -3.58132e-05, -7.55711e-09, 8.27771e-12, 26166.1, 29.3215],\n Tmin=(100, 'K'), Tmax=(1017.17, 'K')), NASAPolynomial(coeffs=[16.4341, \n 0.0376674, -1.41425e-05, 2.53759e-09, -1.75328e-13, 21504.4, -57.0638],\n Tmin=(1017.17, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000,\n 'K'), E0=(216.244, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(\n 461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)'\n ))\nspecies(label='[CH2]C(=CC)[C]1CC1C(25414)', structure=SMILES(\n '[CH2]C(=CC)[C]1CC1C'), E0=(289.9, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.71289, 0.0520158, \n 3.84829e-05, -8.55933e-08, 3.61457e-11, 35003.5, 26.4903], Tmin=(100,\n 'K'), Tmax=(968.714, 'K')), NASAPolynomial(coeffs=[16.7686, 0.0352996, \n -1.24057e-05, 2.26286e-09, -1.62921e-13, 29566.5, -62.466], Tmin=(\n 968.714, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(289.9, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Cyclopropane) + radical(Allyl_T) + radical(Allyl_P)'\n ))\nspecies(label='[CH2][C]1C(=CC)CC1C(25415)', structure=SMILES(\n '[CH2]C1=C([CH]C)CC1C'), E0=(304.572, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.583091, 0.0531885,\n 4.0938e-05, -9.08388e-08, 3.83549e-11, 36774.2, 26.4705], Tmin=(100,\n 'K'), Tmax=(972.301, 'K')), NASAPolynomial(coeffs=[18.2947, 0.0339462, \n -1.21014e-05, 2.24934e-09, -1.64353e-13, 30795.4, -71.5147], Tmin=(\n 972.301, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(304.572, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_S)'\n ))\nspecies(label='CH2(S)(23)', structure=SMILES('[CH2]'), E0=(419.862,\n 'kJ/mol'), modes=[HarmonicOscillator(frequencies=([1369.36, 2789.41, \n 2993.36], 'cm^-1'))], spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(14.0266, 'amu'), collisionModel=TransportData(\n shapeIndex=2, epsilon=(1197.29, 'J/mol'), sigma=(3.8, 'angstroms'),\n dipoleMoment=(0, 'C*m'), polarizability=(0, 'angstroms^3'),\n rotrelaxcollnum=0.0, comment='GRI-Mech'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[4.19195, -0.00230793, \n 8.0509e-06, -6.60123e-09, 1.95638e-12, 50484.3, -0.754589], Tmin=(200,\n 'K'), Tmax=(1000, 'K')), NASAPolynomial(coeffs=[2.28556, 0.00460255, -\n 1.97412e-06, 4.09548e-10, -3.34695e-14, 50922.4, 8.67684], Tmin=(1000,\n 'K'), Tmax=(3000, 'K'))], Tmin=(200, 'K'), Tmax=(3000, 'K'), E0=(\n 419.862, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(58.2013,\n 'J/(mol*K)'), label='CH2(S)', comment=\n 'Thermo library: Klippenstein_Glarborg2016'))\nspecies(label='[CH2]C(=C)C([CH2])=CC(25416)', structure=SMILES(\n '[CH2]C(=C)C([CH2])=CC'), E0=(285.713, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 2950, 3100, 1380, 975, 1025, 1650, 2750, 2800, 2850, 1350, 1500, \n 750, 1050, 1375, 1000, 3000, 3033.33, 3066.67, 3100, 415, 465, 780, 850,\n 1435, 1475, 900, 1100, 3010, 987.5, 1337.5, 450, 1655, 311.383],\n 'cm^-1')), HinderedRotor(inertia=(0.327475, 'amu*angstrom^2'), symmetry\n =1, barrier=(22.5291, 'kJ/mol'), semiclassical=False), HinderedRotor(\n inertia=(0.327466, 'amu*angstrom^2'), symmetry=1, barrier=(22.5294,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.327318,\n 'amu*angstrom^2'), symmetry=1, barrier=(22.5272, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.327483, 'amu*angstrom^2'\n ), symmetry=1, barrier=(22.5297, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(94.1543, 'amu'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[\n 0.335271, 0.0676667, -2.76626e-05, -1.62749e-08, 1.21982e-11, 34506.8, \n 24.024], Tmin=(100, 'K'), Tmax=(980.594, 'K')), NASAPolynomial(coeffs=[\n 17.5531, 0.0266059, -9.47854e-06, 1.70194e-09, -1.19937e-13, 29727.4, -\n 65.8563], Tmin=(980.594, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(285.713, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),\n CpInf=(390.78, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Allyl_P)'\n ))\nspecies(label='C=C([CH]C)C[C]=CC(24184)', structure=SMILES(\n '[CH2]C(=CC)C[C]=CC'), E0=(366.985, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([2995, 3025, 975, 1000, 1300, 1375, 400,\n 500, 1630, 1680, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, \n 1500, 700, 800, 1000, 1100, 1350, 1400, 900, 1100, 1685, 370, 350, 440,\n 435, 1725, 2750, 2850, 1437.5, 1250, 1305, 750, 350, 3000, 3100, 440, \n 815, 1455, 1000, 180, 579.702], 'cm^-1')), HinderedRotor(inertia=(\n 0.147406, 'amu*angstrom^2'), symmetry=1, barrier=(3.38916, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.64226, 'amu*angstrom^2'),\n symmetry=1, barrier=(14.7668, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.64164, 'amu*angstrom^2'), symmetry=1, barrier=\n (14.7526, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.643937, 'amu*angstrom^2'), symmetry=1, barrier=(14.8054, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.145327, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.34136, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(108.181, 'amu'),\n collisionModel=TransportData(shapeIndex=2, epsilon=(3683.66, 'J/mol'),\n sigma=(6.4482, 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(0,\n 'angstroms^3'), rotrelaxcollnum=0, comment=\n 'Epsilon & sigma estimated with Tc=575.38 K, Pc=31.18 bar (from Joback method)'\n ), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),\n T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=\n [0.29648, 0.0786067, -5.42868e-05, 1.96375e-08, -2.97459e-12, 44273.2, \n 31.2372], Tmin=(100, 'K'), Tmax=(1490.43, 'K')), NASAPolynomial(coeffs=\n [13.9025, 0.0420909, -1.75363e-05, 3.199e-09, -2.17227e-13, 40217.5, -\n 39.8334], Tmin=(1490.43, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(366.985, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),\n CpInf=(461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)'\n ))\nspecies(label='CC=C1CCC1=CC(25269)', structure=SMILES('CC=C1CCC1=CC'), E0=(\n 114.107, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.677799, 0.0585738, \n 5.80411e-06, -4.1598e-08, 1.78951e-11, 13856, 25.5085], Tmin=(100, 'K'),\n Tmax=(1034.79, 'K')), NASAPolynomial(coeffs=[13.4814, 0.0415234, -\n 1.65073e-05, 3.07348e-09, -2.16896e-13, 9469.28, -45.0922], Tmin=(\n 1034.79, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(114.107, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(473.925,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(12methylenecyclobutane)'\n ))\nspecies(label='CH2(19)', structure=SMILES('[CH2]'), E0=(381.563, 'kJ/mol'),\n modes=[HarmonicOscillator(frequencies=([1032.72, 2936.3, 3459], 'cm^-1'\n ))], spinMultiplicity=3, opticalIsomers=1, molecularWeight=(14.0266,\n 'amu'), collisionModel=TransportData(shapeIndex=2, epsilon=(1197.29,\n 'J/mol'), sigma=(3.8, 'angstroms'), dipoleMoment=(0, 'C*m'),\n polarizability=(0, 'angstroms^3'), rotrelaxcollnum=0.0, comment=\n 'GRI-Mech'), energyTransferModel=SingleExponentialDown(alpha0=(3.5886,\n 'kJ/mol'), T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[\n NASAPolynomial(coeffs=[3.8328, 0.000224446, 4.68033e-06, -6.04743e-09, \n 2.59009e-12, 45920.8, 1.40666], Tmin=(200, 'K'), Tmax=(1000, 'K')),\n NASAPolynomial(coeffs=[3.16229, 0.00281798, -7.56235e-07, 5.05446e-11, \n 5.65236e-15, 46099.1, 4.77656], Tmin=(1000, 'K'), Tmax=(3000, 'K'))],\n Tmin=(200, 'K'), Tmax=(3000, 'K'), E0=(381.563, 'kJ/mol'), Cp0=(33.2579,\n 'J/(mol*K)'), CpInf=(58.2013, 'J/(mol*K)'), label='CH2', comment=\n 'Thermo library: Klippenstein_Glarborg2016'))\nspecies(label='[CH2]C([C]=CC)=CC(25417)', structure=SMILES(\n '[CH2]C([C]=CC)=CC'), E0=(334.774, 'kJ/mol'), modes=[HarmonicOscillator\n (frequencies=([350, 440, 435, 1725, 1685, 370, 2750, 2770, 2790, 2810, \n 2830, 2850, 1350, 1400, 1450, 1500, 700, 800, 1000, 1100, 1350, 1400, \n 900, 1100, 3000, 3100, 440, 815, 1455, 1000, 2995, 3025, 975, 1000, \n 1300, 1375, 400, 500, 1630, 1680, 180], 'cm^-1')), HinderedRotor(\n inertia=(0.7606, 'amu*angstrom^2'), symmetry=1, barrier=(17.4877,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.760854,\n 'amu*angstrom^2'), symmetry=1, barrier=(17.4935, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.760586, 'amu*angstrom^2'\n ), symmetry=1, barrier=(17.4874, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(2.15146, 'amu*angstrom^2'), symmetry=1, barrier=\n (49.4663, 'kJ/mol'), semiclassical=False)], spinMultiplicity=3,\n opticalIsomers=1, molecularWeight=(94.1543, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.352604, 0.0734369,\n -5.91187e-05, 2.57941e-08, -4.60694e-12, 40400.9, 25.1788], Tmin=(100,\n 'K'), Tmax=(1327.42, 'K')), NASAPolynomial(coeffs=[14.2321, 0.0316126, \n -1.18565e-05, 2.05761e-09, -1.36512e-13, 36716.1, -45.7131], Tmin=(\n 1327.42, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(334.774, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(390.78,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + radical(C=CJC=C) + radical(Allyl_P)'\n ))\nspecies(label='[CH2]C1([CH]C)C(=C)C1C(25296)', structure=SMILES(\n '[CH2]C1([CH]C)C(=C)C1C'), E0=(466.494, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.29276, 0.0655305, \n -4.50464e-06, -3.74661e-08, 1.7759e-11, 56253.7, 30.0992], Tmin=(100,\n 'K'), Tmax=(1027.4, 'K')), NASAPolynomial(coeffs=[16.6435, 0.0372633, -\n 1.49065e-05, 2.81296e-09, -2.01072e-13, 51026, -58.316], Tmin=(1027.4,\n 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(\n 466.494, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)'\n ))\nspecies(label='H(3)', structure=SMILES('[H]'), E0=(211.792, 'kJ/mol'),\n spinMultiplicity=1, opticalIsomers=1, molecularWeight=(1.00794, 'amu'),\n collisionModel=TransportData(shapeIndex=0, epsilon=(1205.6, 'J/mol'),\n sigma=(2.05, 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(0,\n 'angstroms^3'), rotrelaxcollnum=0.0, comment='GRI-Mech'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[\n 2.5, 9.24385e-15, -1.3678e-17, 6.66185e-21, -1.00107e-24, 25472.7, -\n 0.459566], Tmin=(100, 'K'), Tmax=(3459.6, 'K')), NASAPolynomial(coeffs=\n [2.5, 9.20456e-12, -3.58608e-15, 6.15199e-19, -3.92042e-23, 25472.7, -\n 0.459566], Tmin=(3459.6, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(211.792, 'kJ/mol'), Cp0=(20.7862, 'J/(mol*K)'),\n CpInf=(20.7862, 'J/(mol*K)'), label='H', comment=\n 'Thermo library: BurkeH2O2'))\nspecies(label='[CH2]C(=CC)C(=C)C=C(24604)', structure=SMILES(\n '[CH2]C(=CC)C(=C)C=C'), E0=(242.677, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([325, 375, 415, 465, 420, 450, 1700, \n 1750, 2950, 3000, 3050, 3100, 1330, 1430, 900, 1050, 1000, 1050, 1600, \n 1700, 2750, 2800, 2850, 1350, 1500, 750, 1050, 1375, 1000, 3000, 3100, \n 440, 815, 1455, 1000, 2995, 3025, 975, 1000, 1300, 1375, 400, 500, 1630,\n 1680, 181.962, 683.313], 'cm^-1')), HinderedRotor(inertia=(0.669842,\n 'amu*angstrom^2'), symmetry=1, barrier=(19.1337, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.0582339,\n 'amu*angstrom^2'), symmetry=1, barrier=(19.1767, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.83204, 'amu*angstrom^2'),\n symmetry=1, barrier=(19.1302, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(4.52237, 'amu*angstrom^2'), symmetry=1, barrier=\n (104.569, 'kJ/mol'), semiclassical=False)], spinMultiplicity=2,\n opticalIsomers=1, molecularWeight=(107.173, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.293043, 0.0682771,\n -2.00337e-05, -2.05401e-08, 1.21516e-11, 29332.3, 27.0261], Tmin=(100,\n 'K'), Tmax=(1018.57, 'K')), NASAPolynomial(coeffs=[15.7386, 0.0358123, \n -1.37404e-05, 2.51366e-09, -1.76142e-13, 24723.4, -54.9529], Tmin=(\n 1018.57, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(242.677, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(440.667,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Allyl_P)'\n ))\nspecies(label='[CH2]CC(=C)C([CH2])=CC(25418)', structure=SMILES(\n '[CH2]CC(=C)C([CH2])=CC'), E0=(316.814, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([3010, 987.5, 1337.5, 450, 1655, 2750, \n 2800, 2850, 1350, 1500, 750, 1050, 1375, 1000, 2950, 3100, 1380, 975, \n 1025, 1650, 325, 375, 415, 465, 420, 450, 1700, 1750, 2750, 2850, \n 1437.5, 1250, 1305, 750, 350, 3000, 3033.33, 3066.67, 3100, 415, 465, \n 780, 850, 1435, 1475, 900, 1100, 180, 180], 'cm^-1')), HinderedRotor(\n inertia=(0.0368535, 'amu*angstrom^2'), symmetry=1, barrier=(17.9864,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.00736317,\n 'amu*angstrom^2'), symmetry=1, barrier=(3.60618, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.781153, 'amu*angstrom^2'\n ), symmetry=1, barrier=(17.9602, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.779478, 'amu*angstrom^2'), symmetry=1, barrier\n =(17.9217, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.781104, 'amu*angstrom^2'), symmetry=1, barrier=(17.9591, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.348925, 0.0836004, -\n 5.1879e-05, 7.14877e-09, 3.44908e-12, 38270.9, 31.5928], Tmin=(100, 'K'\n ), Tmax=(1044.14, 'K')), NASAPolynomial(coeffs=[17.9255, 0.0352115, -\n 1.34219e-05, 2.42456e-09, -1.67785e-13, 33276.3, -63.0036], Tmin=(\n 1044.14, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(316.814, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(Allyl_P)'\n ))\nspecies(label='[CH]=C(CC)C([CH2])=CC(25419)', structure=SMILES(\n '[CH]=C(CC)C([CH2])=CC'), E0=(358.664, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([3120, 650, 792.5, 1650, 3010, 987.5, \n 1337.5, 450, 1655, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450,\n 1500, 700, 800, 1000, 1100, 1350, 1400, 900, 1100, 325, 375, 415, 465, \n 420, 450, 1700, 1750, 2750, 2850, 1437.5, 1250, 1305, 750, 350, 3000, \n 3100, 440, 815, 1455, 1000, 180], 'cm^-1')), HinderedRotor(inertia=(\n 0.701639, 'amu*angstrom^2'), symmetry=1, barrier=(16.1321, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.344302, 'amu*angstrom^2'\n ), symmetry=1, barrier=(16.1602, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0492932, 'amu*angstrom^2'), symmetry=1,\n barrier=(16.1378, 'kJ/mol'), semiclassical=False), HinderedRotor(\n inertia=(0.702005, 'amu*angstrom^2'), symmetry=1, barrier=(16.1405,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.702379,\n 'amu*angstrom^2'), symmetry=1, barrier=(16.1491, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.468616, 0.0864938, -\n 5.84569e-05, 1.27697e-08, 1.75707e-12, 43308.4, 30.6389], Tmin=(100,\n 'K'), Tmax=(1047.28, 'K')), NASAPolynomial(coeffs=[18.4195, 0.034593, -\n 1.31104e-05, 2.35762e-09, -1.62637e-13, 38242.2, -66.6572], Tmin=(\n 1047.28, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(358.664, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_P)'\n ))\nspecies(label='[CH2]C(=[C]C)C(=C)CC(25420)', structure=SMILES(\n '[CH2]C(=[C]C)C(=C)CC'), E0=(349.41, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([1685, 370, 2750, 2770, 2790, 2810, \n 2830, 2850, 1350, 1400, 1450, 1500, 700, 800, 1000, 1100, 1350, 1400, \n 900, 1100, 2950, 3100, 1380, 975, 1025, 1650, 325, 375, 415, 465, 420, \n 450, 1700, 1750, 2750, 2850, 1437.5, 1250, 1305, 750, 350, 3000, 3100, \n 440, 815, 1455, 1000, 180, 180], 'cm^-1')), HinderedRotor(inertia=(\n 0.159905, 'amu*angstrom^2'), symmetry=1, barrier=(15.9368, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.693159, 'amu*angstrom^2'\n ), symmetry=1, barrier=(15.9371, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.693127, 'amu*angstrom^2'), symmetry=1, barrier\n =(15.9364, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.693165, 'amu*angstrom^2'), symmetry=1, barrier=(15.9372, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.0150632,\n 'amu*angstrom^2'), symmetry=1, barrier=(15.9371, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.583231, 0.089245, -\n 7.16619e-05, 3.00631e-08, -5.07891e-12, 42198.9, 31.1306], Tmin=(100,\n 'K'), Tmax=(1412.15, 'K')), NASAPolynomial(coeffs=[19.0319, 0.0336833, \n -1.2643e-05, 2.20036e-09, -1.46165e-13, 36659.1, -70.2702], Tmin=(\n 1412.15, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(349.41, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(461.453,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)'\n ))\nspecies(label='[CH]=C([CH]C)C(C)=CC(25421)', structure=SMILES(\n '[CH]C(=CC)C(C)=CC'), E0=(317.373, 'kJ/mol'), modes=[HarmonicOscillator\n (frequencies=([325, 375, 415, 465, 420, 450, 1700, 1750, 2750, 2762.5, \n 2775, 2787.5, 2800, 2812.5, 2825, 2837.5, 2850, 1350, 1380, 1410, 1440,\n 1470, 1500, 700, 750, 800, 1000, 1050, 1100, 1350, 1375, 1400, 900, \n 1000, 1100, 2995, 3025, 975, 1000, 1300, 1375, 400, 500, 1630, 1680, \n 200, 800, 1200, 1600], 'cm^-1')), HinderedRotor(inertia=(0.156089,\n 'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'), symmetry=1, barrier\n =(3.5888, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.156089, 'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(108.181, 'amu'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-\n 0.247945, 0.0873521, -6.16843e-05, 2.31486e-08, -3.62747e-12, 38328.8, \n 29.1665], Tmin=(100, 'K'), Tmax=(1460.93, 'K')), NASAPolynomial(coeffs=\n [15.297, 0.0447902, -1.7984e-05, 3.20673e-09, -2.14924e-13, 33786.8, -\n 51.7212], Tmin=(1460.93, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(317.373, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),\n CpInf=(461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)'\n ))\nspecies(label='[CH2][C](C=C)C(=C)CC(24623)', structure=SMILES(\n '[CH2]C(C=C)=C([CH2])CC'), E0=(228.159, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-0.0497728, \n 0.0733281, -1.6094e-05, -3.35123e-08, 1.88363e-11, 27601.1, 30.4448],\n Tmin=(100, 'K'), Tmax=(975.095, 'K')), NASAPolynomial(coeffs=[18.3695, \n 0.0342638, -1.21408e-05, 2.16747e-09, -1.52112e-13, 22274, -66.8493],\n Tmin=(975.095, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000,\n 'K'), E0=(228.159, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(\n 461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CC=CCJ) + radical(Allyl_P)'\n ))\nspecies(label='C[CH][C]1CCC1=CC(25422)', structure=SMILES(\n 'C[CH]C1CCC=1[CH]C'), E0=(303.292, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.788866, 0.0500701,\n 4.22235e-05, -8.64809e-08, 3.53174e-11, 36611.5, 25.2586], Tmin=(100,\n 'K'), Tmax=(987.239, 'K')), NASAPolynomial(coeffs=[16.2187, 0.0373502, \n -1.4111e-05, 2.65357e-09, -1.92503e-13, 31138.2, -61.2734], Tmin=(\n 987.239, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(303.292, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_S) + radical(Allyl_S)'\n ))\nspecies(label='[CH2][C]1C(=C)C(C)C1C(25423)', structure=SMILES(\n '[CH2]C1=C([CH2])C(C)C1C'), E0=(305.852, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.377097, 0.0563026,\n 3.9705e-05, -9.53284e-08, 4.14811e-11, 36937, 26.2973], Tmin=(100, 'K'),\n Tmax=(959.735, 'K')), NASAPolynomial(coeffs=[20.4056, 0.0304853, -\n 1.006e-05, 1.83774e-09, -1.35603e-13, 30437.2, -83.3398], Tmin=(959.735,\n 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(\n 305.852, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_P)'\n ))\nspecies(label='C=CC(=C)C(C)=CC(24616)', structure=SMILES('C=CC(=C)C(C)=CC'),\n E0=(91.1774, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.236638, 0.0713806, -\n 3.04205e-05, -5.26762e-09, 5.54498e-12, 11111.2, 26.9518], Tmin=(100,\n 'K'), Tmax=(1093.32, 'K')), NASAPolynomial(coeffs=[14.1536, 0.040705, -\n 1.6104e-05, 2.93544e-09, -2.02595e-13, 6858.32, -46.9636], Tmin=(\n 1093.32, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(91.1774, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH)'\n ))\nspecies(label='C=[C]C(C)C(=C)[CH]C(24183)', structure=SMILES(\n '[CH2]C(=CC)C(C)[C]=C'), E0=(369.44, 'kJ/mol'), modes=[\n HarmonicOscillator(frequencies=([1685, 370, 3010, 987.5, 1337.5, 450, \n 1655, 2750, 2770, 2790, 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700, \n 800, 1000, 1100, 1350, 1400, 900, 1100, 2950, 3100, 1380, 975, 1025, \n 1650, 1380, 1390, 370, 380, 2900, 435, 350, 440, 435, 1725, 3000, 3100,\n 440, 815, 1455, 1000, 345.333, 347.343], 'cm^-1')), HinderedRotor(\n inertia=(0.119405, 'amu*angstrom^2'), symmetry=1, barrier=(9.93037,\n 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(0.281457,\n 'amu*angstrom^2'), symmetry=1, barrier=(24.022, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.116909, 'amu*angstrom^2'\n ), symmetry=1, barrier=(9.94809, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.117447, 'amu*angstrom^2'), symmetry=1, barrier\n =(9.9744, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.116555, 'amu*angstrom^2'), symmetry=1, barrier=(9.93684, 'kJ/mol'),\n semiclassical=False)], spinMultiplicity=3, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), collisionModel=TransportData(\n shapeIndex=2, epsilon=(3625.33, 'J/mol'), sigma=(6.4092, 'angstroms'),\n dipoleMoment=(0, 'C*m'), polarizability=(0, 'angstroms^3'),\n rotrelaxcollnum=0, comment=\n 'Epsilon & sigma estimated with Tc=566.27 K, Pc=31.24 bar (from Joback method)'\n ), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),\n T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=\n [0.299693, 0.0839308, -6.74533e-05, 3.06742e-08, -6.02582e-12, 44564.4,\n 29.0122], Tmin=(100, 'K'), Tmax=(1163.73, 'K')), NASAPolynomial(coeffs=\n [10.857, 0.0476425, -2.06788e-05, 3.8782e-09, -2.69295e-13, 42107.3, -\n 23.5217], Tmin=(1163.73, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'),\n Tmax=(5000, 'K'), E0=(369.44, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'),\n CpInf=(461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)'\n ))\nspecies(label='C=C1C(=CC)CC1C(25265)', structure=SMILES('C=C1C(=CC)CC1C'),\n E0=(118.381, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.689924, 0.0550304, \n 2.3689e-05, -6.56265e-08, 2.77602e-11, 14372.8, 24.9628], Tmin=(100,\n 'K'), Tmax=(993.204, 'K')), NASAPolynomial(coeffs=[15.3775, 0.0380508, \n -1.43595e-05, 2.66472e-09, -1.90565e-13, 9375.16, -56.2678], Tmin=(\n 993.204, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(118.381, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(473.925,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)'\n ))\nspecies(label='CHCH3(T)(95)', structure=SMILES('[CH]C'), E0=(343.893,\n 'kJ/mol'), modes=[HarmonicOscillator(frequencies=([2750, 2800, 2850, \n 1350, 1500, 750, 1050, 1375, 1000, 592.414, 4000], 'cm^-1')),\n HinderedRotor(inertia=(0.00438699, 'amu*angstrom^2'), symmetry=1,\n barrier=(26.7685, 'kJ/mol'), semiclassical=False)], spinMultiplicity=3,\n opticalIsomers=1, molecularWeight=(28.0532, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[3.82363, -\n 0.000909515, 3.2138e-05, -3.7348e-08, 1.3309e-11, 41371.4, 7.10948],\n Tmin=(100, 'K'), Tmax=(960.812, 'K')), NASAPolynomial(coeffs=[4.30487, \n 0.00943069, -3.27559e-06, 5.95121e-10, -4.27307e-14, 40709.1, 1.84202],\n Tmin=(960.812, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000,\n 'K'), E0=(343.893, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(\n 128.874, 'J/(mol*K)'), label='CHCH3(T)', comment=\n 'Thermo library: DFT_QCI_thermo'))\nspecies(label='[CH2]C([C]=C)=CC(24774)', structure=SMILES(\n '[CH2]C([C]=C)=CC'), E0=(370.8, 'kJ/mol'), modes=[HarmonicOscillator(\n frequencies=([1685, 370, 2750, 2800, 2850, 1350, 1500, 750, 1050, 1375,\n 1000, 3010, 987.5, 1337.5, 450, 1655, 2950, 3100, 1380, 975, 1025, 1650,\n 350, 440, 435, 1725, 3000, 3100, 440, 815, 1455, 1000, 180], 'cm^-1')),\n HinderedRotor(inertia=(1.17315, 'amu*angstrom^2'), symmetry=1, barrier=\n (26.9731, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 1.17496, 'amu*angstrom^2'), symmetry=1, barrier=(27.0146, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(1.1727, 'amu*angstrom^2'),\n symmetry=1, barrier=(26.9626, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(80.1277, 'amu'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[\n 1.0818, 0.0569416, -3.56598e-05, 4.1841e-09, 3.20998e-12, 44708.4, \n 20.7527], Tmin=(100, 'K'), Tmax=(982.69, 'K')), NASAPolynomial(coeffs=[\n 12.9204, 0.0239405, -8.46845e-06, 1.46434e-09, -9.91425e-14, 41648.3, -\n 39.886], Tmin=(982.69, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=\n (5000, 'K'), E0=(370.8, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(\n 320.107, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CJC=C) + radical(Allyl_P)'\n ))\nspecies(label='[CH]=C([CH]C)C(=C)CC(25424)', structure=SMILES(\n '[CH]C(=CC)C(=C)CC'), E0=(330.753, 'kJ/mol'), modes=[HarmonicOscillator\n (frequencies=([2750, 2850, 1437.5, 1250, 1305, 750, 350, 2950, 3100, \n 1380, 975, 1025, 1650, 3010, 987.5, 1337.5, 450, 1655, 2750, 2770, 2790,\n 2810, 2830, 2850, 1350, 1400, 1450, 1500, 700, 800, 1000, 1100, 1350, \n 1400, 900, 1100, 325, 375, 415, 465, 420, 450, 1700, 1750, 200, 800, \n 1066.67, 1333.33, 1600], 'cm^-1')), HinderedRotor(inertia=(0.156089,\n 'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'), symmetry=1, barrier\n =(3.5888, 'kJ/mol'), semiclassical=False), HinderedRotor(inertia=(\n 0.156089, 'amu*angstrom^2'), symmetry=1, barrier=(3.5888, 'kJ/mol'),\n semiclassical=False), HinderedRotor(inertia=(0.156089, 'amu*angstrom^2'\n ), symmetry=1, barrier=(3.5888, 'kJ/mol'), semiclassical=False)],\n spinMultiplicity=3, opticalIsomers=1, molecularWeight=(108.181, 'amu'),\n energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0\n =(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[-\n 0.442166, 0.0858934, -5.1432e-05, 9.5936e-09, 1.54315e-12, 39950.3, \n 30.9724], Tmin=(100, 'K'), Tmax=(1106.5, 'K')), NASAPolynomial(coeffs=[\n 16.3579, 0.0427111, -1.66841e-05, 2.99222e-09, -2.04007e-13, 35158.1, -\n 56.633], Tmin=(1106.5, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=\n (5000, 'K'), E0=(330.753, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=\n (461.453, 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(AllylJ2_triplet)'\n ))\nspecies(label='C=CC(=C)C(=C)CC(24630)', structure=SMILES('C=CC(=C)C(=C)CC'),\n E0=(104.558, 'kJ/mol'), spinMultiplicity=1, opticalIsomers=1,\n molecularWeight=(108.181, 'amu'), energyTransferModel=\n SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85),\n thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.296747, 0.0670054, -\n 1.0269e-05, -3.13536e-08, 1.59568e-11, 12721.3, 27.8384], Tmin=(100,\n 'K'), Tmax=(1010.3, 'K')), NASAPolynomial(coeffs=[15.6889, 0.0379462, -\n 1.44599e-05, 2.64736e-09, -1.86033e-13, 7984.11, -54.6302], Tmin=(\n 1010.3, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0\n =(104.558, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(465.61,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH)'\n ))\nspecies(label='C=C1C(=C)C(C)C1C(25274)', structure=SMILES(\n 'C=C1C(=C)C(C)C1C'), E0=(122.654, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1, molecularWeight=(108.181, 'amu'), energyTransferModel\n =SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85\n ), thermo=NASA(polynomials=[NASAPolynomial(coeffs=[0.691732, 0.0515838,\n 4.13669e-05, -8.96066e-08, 3.77135e-11, 14890, 23.0693], Tmin=(100, 'K'\n ), Tmax=(969.873, 'K')), NASAPolynomial(coeffs=[17.4573, 0.0342784, -\n 1.20439e-05, 2.21718e-09, -1.61071e-13, 9199.74, -69.8715], Tmin=(\n 969.873, 'K'), Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'),\n E0=(122.654, 'kJ/mol'), Cp0=(33.2579, 'J/(mol*K)'), CpInf=(473.925,\n 'J/(mol*K)'), comment=\n 'Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)'\n ))\nspecies(label='N2', structure=SMILES('N#N'), E0=(-8.69489, 'kJ/mol'),\n spinMultiplicity=1, opticalIsomers=1, molecularWeight=(28.0135, 'amu'),\n collisionModel=TransportData(shapeIndex=1, epsilon=(810.913, 'J/mol'),\n sigma=(3.621, 'angstroms'), dipoleMoment=(0, 'C*m'), polarizability=(\n 1.76, 'angstroms^3'), rotrelaxcollnum=4.0, comment=\n 'PrimaryTransportLibrary'), energyTransferModel=SingleExponentialDown(\n alpha0=(3.5886, 'kJ/mol'), T0=(300, 'K'), n=0.85), thermo=NASA(\n polynomials=[NASAPolynomial(coeffs=[3.61263, -0.00100893, 2.49898e-06, \n -1.43376e-09, 2.58636e-13, -1051.1, 2.6527], Tmin=(100, 'K'), Tmax=(\n 1817.04, 'K')), NASAPolynomial(coeffs=[2.9759, 0.00164141, -7.19722e-07,\n 1.25378e-10, -7.91526e-15, -1025.84, 5.53757], Tmin=(1817.04, 'K'),\n Tmax=(5000, 'K'))], Tmin=(100, 'K'), Tmax=(5000, 'K'), E0=(-8.69489,\n 'kJ/mol'), Cp0=(29.1007, 'J/(mol*K)'), CpInf=(37.4151, 'J/(mol*K)'),\n label='N2', comment='Thermo library: BurkeH2O2'))\nspecies(label='Ne', structure=SMILES('[Ne]'), E0=(-6.19738, 'kJ/mol'),\n spinMultiplicity=1, opticalIsomers=1, molecularWeight=(20.1797, 'amu'),\n collisionModel=TransportData(shapeIndex=0, epsilon=(1235.53, 'J/mol'),\n sigma=(3.758e-10, 'm'), dipoleMoment=(0, 'C*m'), polarizability=(0,\n 'angstroms^3'), rotrelaxcollnum=0, comment=\n 'Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!'\n ), energyTransferModel=SingleExponentialDown(alpha0=(3.5886, 'kJ/mol'),\n T0=(300, 'K'), n=0.85), thermo=NASA(polynomials=[NASAPolynomial(coeffs=\n [2.5, 0, 0, 0, 0, -745.375, 3.35532], Tmin=(200, 'K'), Tmax=(1000, 'K')\n ), NASAPolynomial(coeffs=[2.5, 0, 0, 0, 0, -745.375, 3.35532], Tmin=(\n 1000, 'K'), Tmax=(6000, 'K'))], Tmin=(200, 'K'), Tmax=(6000, 'K'), E0=(\n -6.19738, 'kJ/mol'), Cp0=(20.7862, 'J/(mol*K)'), CpInf=(20.7862,\n 'J/(mol*K)'), label='Ne', comment='Thermo library: primaryThermoLibrary'))\ntransitionState(label='TS1', E0=(291.23, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS2', E0=(462.221, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS3', E0=(538.699, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS4', E0=(497.951, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS5', E0=(380.338, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS6', E0=(399.474, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS7', E0=(350.103, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS8', E0=(722.113, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS9', E0=(343.259, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS10', E0=(380.132, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS11', E0=(705.575, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS12', E0=(537.022, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS13', E0=(257.971, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS14', E0=(716.337, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS15', E0=(466.494, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS16', E0=(454.469, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS17', E0=(430.619, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS18', E0=(503.849, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS19', E0=(393.718, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS20', E0=(361.682, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS21', E0=(350.103, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS22', E0=(380.132, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS23', E0=(375.044, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS24', E0=(274.66, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS25', E0=(463.915, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS26', E0=(257.971, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS27', E0=(714.692, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS28', E0=(375.062, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS29', E0=(258.055, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\ntransitionState(label='TS30', E0=(257.971, 'kJ/mol'), spinMultiplicity=1,\n opticalIsomers=1)\nreaction(label='reaction1', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'], transitionState=\n 'TS1', kinetics=Arrhenius(A=(5000000000000.0, 's^-1'), n=0, Ea=(41.5431,\n 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=\n \"\"\"Exact match found for rate rule [RJJ]\nEuclidian distance = 0\nfamily: 1,4_Linear_birad_scission\nEa raised from 0.0 to 41.5 kJ/mol to match endothermicity of reaction.\"\"\"\n ))\nreaction(label='reaction2', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2]C1([CH]C)CC1=CC(25275)'], transitionState='TS2',\n kinetics=Arrhenius(A=(3360000000.0, 's^-1'), n=0.84, Ea=(212.534,\n 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(2500, 'K'), comment=\n \"\"\"Estimated using template [R4_S_D;doublebond_intra_HNd;radadd_intra_cs2H] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_cs2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Exocyclic\nEa raised from 210.2 to 212.5 kJ/mol to match endothermicity of reaction.\"\"\"\n ))\nreaction(label='reaction3', reactants=['CH3CHCCH2(18175)',\n 'C=[C][CH]C(18176)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS3', kinetics=Arrhenius(A=(0.00086947, 'm^3/(mol*s)'),\n n=2.67356, Ea=(32.0272, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [Ca_Cds-HH;CJ]\nEuclidian distance = 0\nfamily: R_Addition_MultipleBond\"\"\"\n ))\nreaction(label='reaction4', reactants=['[CH2]C(=CC)C(C)=[C]C(25412)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS4',\n kinetics=Arrhenius(A=(7740000000.0, 's^-1'), n=1.08, Ea=(161.921,\n 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=\n \"\"\"From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H\nExact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction5', reactants=['[CH2]C(=[C]C)C(C)=CC(25413)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS5',\n kinetics=Arrhenius(A=(111300, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_2H]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction6', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2]C(=CC)[C](C)C=C(24605)'], transitionState='TS6',\n kinetics=Arrhenius(A=(1600000.0, 's^-1'), n=1.81, Ea=(149.787, 'kJ/mol'\n ), T0=(1, 'K'), comment=\n \"\"\"From training reaction 101 used for R4H_SDS;C_rad_out_2H;Cs_H_out_2H\nExact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction7', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2][C](C=C)C(C)=CC(24606)'], transitionState='TS7',\n kinetics=Arrhenius(A=(6660000.0, 's^-1'), n=1.64, Ea=(100.416, 'kJ/mol'\n ), T0=(1, 'K'), comment=\n \"\"\"From training reaction 96 used for R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H\nExact match found for rate rule [R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction8', reactants=['C=[C][CH]C(18176)',\n 'C=[C][CH]C(18176)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS8', kinetics=Arrhenius(A=(3730380.0, 'm^3/(mol*s)'),\n n=0.027223, Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [Y_rad;Y_rad]\nEuclidian distance = 0\nfamily: R_Recombination\nEa raised from -14.4 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction9', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2]C(=CC)[C]1CC1C(25414)'], transitionState='TS9',\n kinetics=Arrhenius(A=(7367860000000.0, 's^-1'), n=-0.105173, Ea=(\n 93.5715, 'kJ/mol'), T0=(1, 'K'), Tmin=(303.03, 'K'), Tmax=(2000, 'K'),\n comment=\n \"\"\"Estimated using template [R3_D;doublebond_intra;radadd_intra_cs2H] for rate rule [R3_D;doublebond_intra_secDe_HNd;radadd_intra_cs2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Endocyclic\"\"\"\n ))\nreaction(label='reaction10', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2][C]1C(=CC)CC1C(25415)'], transitionState='TS10',\n kinetics=Arrhenius(A=(643734000.0, 's^-1'), n=0.926191, Ea=(130.445,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Endocyclic\"\"\"\n ))\nreaction(label='reaction11', reactants=['CH2(S)(23)',\n '[CH2]C(=C)C([CH2])=CC(25416)'], products=[\n 'C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS11', kinetics=\n Arrhenius(A=(79400000000000.0, 'cm^3/(mol*s)', '*|/', 0.25), n=-0.324,\n Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"From training reaction 4 used for carbene;Cd_pri\nExact match found for rate rule [carbene;Cd_pri]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 4.0\nfamily: 1,2_Insertion_carbene\nEa raised from -3.9 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction23', reactants=['C=C([CH]C)C[C]=CC(24184)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS12',\n kinetics=Arrhenius(A=(1748420000.0, 's^-1'), n=1.084, Ea=(170.038,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C]\nEuclidian distance = 1.0\nfamily: 1,2_shiftC\"\"\"\n ))\nreaction(label='reaction13', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['CC=C1CCC1=CC(25269)'], transitionState='TS13', kinetics=\n Arrhenius(A=(1620000000000.0, 's^-1'), n=-0.305, Ea=(8.28432, 'kJ/mol'),\n T0=(1, 'K'), Tmin=(600, 'K'), Tmax=(2000, 'K'), comment=\n \"\"\"From training reaction 2 used for R4_SSS;C_rad_out_2H;Cpri_rad_out_2H\nExact match found for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_2H]\nEuclidian distance = 0\nfamily: Birad_recombination\"\"\"\n ))\nreaction(label='reaction14', reactants=['CH2(19)',\n '[CH2]C([C]=CC)=CC(25417)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS14', kinetics=Arrhenius(A=(1067320.0, 'm^3/(mol*s)'),\n n=0.472793, Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]\nEuclidian distance = 3.0\nfamily: Birad_R_Recombination\nEa raised from -3.5 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction15', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2]C1([CH]C)C(=C)C1C(25296)'], transitionState='TS15',\n kinetics=Arrhenius(A=(67265800000.0, 's^-1'), n=0.535608, Ea=(216.807,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using average of templates [R4_S_D;doublebond_intra;radadd_intra_csHNd] + [R4_S_D;doublebond_intra_HNd;radadd_intra_cs] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_csHNd]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Exocyclic\nEa raised from 214.2 to 216.8 kJ/mol to match endothermicity of reaction.\"\"\"\n ))\nreaction(label='reaction16', reactants=['H(3)',\n '[CH2]C(=CC)C(=C)C=C(24604)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS16', kinetics=Arrhenius(A=(231000000.0,\n 'cm^3/(mol*s)'), n=1.64, Ea=(0, 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'),\n Tmax=(1500, 'K'), comment=\n \"\"\"From training reaction 2544 used for Cds-HH_Cds-CdH;HJ\nExact match found for rate rule [Cds-HH_Cds-CdH;HJ]\nEuclidian distance = 0\nfamily: R_Addition_MultipleBond\nEa raised from -2.0 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction17', reactants=['[CH2]CC(=C)C([CH2])=CC(25418)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS17',\n kinetics=Arrhenius(A=(1720000.0, 's^-1'), n=1.99, Ea=(113.805, 'kJ/mol'\n ), T0=(1, 'K'), comment=\n \"\"\"From training reaction 84 used for R2H_S;C_rad_out_2H;Cs_H_out_H/Cd\nExact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/Cd]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction18', reactants=['[CH]=C(CC)C([CH2])=CC(25419)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS18',\n kinetics=Arrhenius(A=(18460000000.0, 's^-1'), n=0.74, Ea=(145.185,\n 'kJ/mol'), T0=(1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=\n \"\"\"From training reaction 194 used for R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC\nExact match found for rate rule [R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction19', reactants=['[CH2]C(=[C]C)C(=C)CC(25420)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS19',\n kinetics=Arrhenius(A=(74200, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_H/NonDeC]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction20', reactants=['[CH]=C([CH]C)C(C)=CC(25421)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS20',\n kinetics=Arrhenius(A=(111300, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_2H]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction21', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2][C](C=C)C(=C)CC(24623)'], transitionState='TS21',\n kinetics=Arrhenius(A=(6660000.0, 's^-1'), n=1.64, Ea=(100.416, 'kJ/mol'\n ), T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R5H_SS(D)MS;C_rad_out_single;Cs_H_out_2H] for rate rule [R5H_SS(D)MS;C_rad_out_H/NonDeC;Cs_H_out_2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction22', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C[CH][C]1CCC1=CC(25422)'], transitionState='TS22', kinetics=\n Arrhenius(A=(321867000.0, 's^-1'), n=0.926191, Ea=(130.445, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]\nEuclidian distance = 0\nfamily: Intra_R_Add_Endocyclic\"\"\"\n ))\nreaction(label='reaction23', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['[CH2][C]1C(=C)C(C)C1C(25423)'], transitionState='TS23',\n kinetics=Arrhenius(A=(516207000.0, 's^-1'), n=0.911389, Ea=(125.357,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_csHCs]\nEuclidian distance = 0\nfamily: Intra_R_Add_Endocyclic\"\"\"\n ))\nreaction(label='reaction24', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C=CC(=C)C(C)=CC(24616)'], transitionState='TS24', kinetics=\n Arrhenius(A=(12756600000.0, 's^-1'), n=0.137, Ea=(24.9733, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 6.0\nfamily: Intra_Disproportionation\"\"\"\n ))\nreaction(label='reaction24', reactants=['C=[C]C(C)C(=C)[CH]C(24183)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS25',\n kinetics=Arrhenius(A=(866000000000.0, 's^-1'), n=0.438, Ea=(94.4747,\n 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"From training reaction 5 used for cCs(-HC)CJ;CdsJ;C\nExact match found for rate rule [cCs(-HC)CJ;CdsJ;C]\nEuclidian distance = 0\nfamily: 1,2_shiftC\"\"\"\n ))\nreaction(label='reaction26', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C=C1C(=CC)CC1C(25265)'], transitionState='TS26', kinetics=\n Arrhenius(A=(3240000000000.0, 's^-1'), n=-0.305, Ea=(8.28432, 'kJ/mol'),\n T0=(1, 'K'), Tmin=(600, 'K'), Tmax=(2000, 'K'), comment=\n \"\"\"Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/NonDeC]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Birad_recombination\"\"\"\n ))\nreaction(label='reaction27', reactants=['CHCH3(T)(95)',\n '[CH2]C([C]=C)=CC(24774)'], products=['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState='TS27', kinetics=Arrhenius(A=(1067320.0, 'm^3/(mol*s)'),\n n=0.472793, Ea=(0, 'kJ/mol'), T0=(1, 'K'), comment=\n \"\"\"Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]\nEuclidian distance = 3.0\nfamily: Birad_R_Recombination\nEa raised from -3.5 to 0 kJ/mol.\"\"\"\n ))\nreaction(label='reaction28', reactants=['[CH]=C([CH]C)C(=C)CC(25424)'],\n products=['C=C([CH]C)C(=C)[CH]C(24182)'], transitionState='TS28',\n kinetics=Arrhenius(A=(74200, 's^-1'), n=2.23, Ea=(44.3086, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"\n ))\nreaction(label='reaction29', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C=CC(=C)C(=C)CC(24630)'], transitionState='TS29', kinetics=\n Arrhenius(A=(19260000000.0, 's^-1'), n=0.137, Ea=(8.368, 'kJ/mol'), T0=\n (1, 'K'), Tmin=(300, 'K'), Tmax=(1500, 'K'), comment=\n \"\"\"Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 6.0\nfamily: Intra_Disproportionation\"\"\"\n ))\nreaction(label='reaction30', reactants=['C=C([CH]C)C(=C)[CH]C(24182)'],\n products=['C=C1C(=C)C(C)C1C(25274)'], transitionState='TS30', kinetics=\n Arrhenius(A=(1620000000000.0, 's^-1'), n=-0.305, Ea=(8.28432, 'kJ/mol'),\n T0=(1, 'K'), comment=\n \"\"\"Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_H/NonDeC]\nEuclidian distance = 2.82842712475\nfamily: Birad_recombination\"\"\"\n ))\nnetwork(label='4267', isomers=['C=C([CH]C)C(=C)[CH]C(24182)'], reactants=[(\n 'CH3CHCCH2(18175)', 'CH3CHCCH2(18175)')], bathGas={'N2': 0.5, 'Ne': 0.5})\npressureDependence(label='4267', Tmin=(300, 'K'), Tmax=(2000, 'K'), Tcount=\n 8, Tlist=([302.47, 323.145, 369.86, 455.987, 609.649, 885.262, 1353.64,\n 1896.74], 'K'), Pmin=(0.01, 'bar'), Pmax=(100, 'bar'), Pcount=5, Plist=\n ([0.0125282, 0.0667467, 1, 14.982, 79.8202], 'bar'), maximumGrainSize=(\n 0.5, 'kcal/mol'), minimumGrainCount=250, method=\n 'modified strong collision', interpolationModel=('Chebyshev', 6, 4),\n activeKRotor=True, activeJRotor=True, rmgmode=True)\n", "step-3": "species(\n label = 'C=C([CH]C)C(=C)[CH]C(24182)',\n structure = SMILES('[CH2]C(=CC)C([CH2])=CC'),\n E0 = (249.687,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),\n HinderedRotor(inertia=(0.735277,'amu*angstrom^2'), symmetry=1, barrier=(16.9055,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0632434,'amu*angstrom^2'), symmetry=1, barrier=(29.514,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.737545,'amu*angstrom^2'), symmetry=1, barrier=(16.9576,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.732781,'amu*angstrom^2'), symmetry=1, barrier=(16.8481,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.739219,'amu*angstrom^2'), symmetry=1, barrier=(16.9961,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.384005,0.0840749,-5.09991e-05,5.50851e-09,4.14197e-12,30198.9,28.4131], Tmin=(100,'K'), Tmax=(1039.09,'K')), NASAPolynomial(coeffs=[18.1326,0.0354522,-1.35159e-05,2.44392e-09,-1.69358e-13,25127.7,-67.5143], Tmin=(1039.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(249.687,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'CH3CHCCH2(18175)',\n structure = SMILES('C=C=CC'),\n E0 = (145.615,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,540,610,2055,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655],'cm^-1')),\n HinderedRotor(inertia=(0.759584,'amu*angstrom^2'), symmetry=1, barrier=(17.4643,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (54.0904,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(2996.71,'J/mol'), sigma=(5.18551,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment=\"\"\"Epsilon & sigma estimated with Tc=468.08 K, Pc=48.77 bar (from Joback method)\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.74635,0.0218189,8.22353e-06,-2.14768e-08,8.55624e-12,17563.6,12.7381], Tmin=(100,'K'), Tmax=(1025.6,'K')), NASAPolynomial(coeffs=[6.82078,0.0192338,-7.45622e-06,1.36536e-09,-9.53195e-14,16028,-10.4333], Tmin=(1025.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(145.615,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(228.648,'J/(mol*K)'), label=\"\"\"CH3CHCCH2\"\"\", comment=\"\"\"Thermo library: DFT_QCI_thermo\"\"\"),\n)\n\nspecies(\n label = '[CH2]C1([CH]C)CC1=CC(25275)',\n structure = SMILES('[CH2]C1([CH]C)CC1=CC'),\n E0 = (462.221,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.263258,0.0692237,-2.26363e-05,-1.35463e-08,8.13734e-12,55737.7,31.4039], Tmin=(100,'K'), Tmax=(1105.46,'K')), NASAPolynomial(coeffs=[15.171,0.0400578,-1.66801e-05,3.13624e-09,-2.2049e-13,50927.8,-48.8594], Tmin=(1105.46,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(462.221,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)\"\"\"),\n)\n\nspecies(\n label = 'C=[C][CH]C(18176)',\n structure = SMILES('[CH2][C]=CC'),\n E0 = (361.056,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655],'cm^-1')),\n HinderedRotor(inertia=(0.352622,'amu*angstrom^2'), symmetry=1, barrier=(8.10748,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.828631,'amu*angstrom^2'), symmetry=1, barrier=(19.0519,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (54.0904,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.42015,0.030446,-1.69076e-05,4.64684e-09,-5.12013e-13,43485.7,14.8304], Tmin=(100,'K'), Tmax=(2065.83,'K')), NASAPolynomial(coeffs=[10.7464,0.014324,-5.20136e-06,8.69079e-10,-5.48385e-14,40045.6,-31.3799], Tmin=(2065.83,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(361.056,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment=\"\"\"Thermo library: DFT_QCI_thermo + radical(Cds_S) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=CC)C(C)=[C]C(25412)',\n structure = SMILES('[CH2]C(=CC)C(C)=[C]C'),\n E0 = (336.03,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')),\n HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=[C]C)C(C)=CC(25413)',\n structure = SMILES('[CH2]C(=[C]C)C(C)=CC'),\n E0 = (336.03,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,1685,370,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,222.04],'cm^-1')),\n HinderedRotor(inertia=(0.395973,'amu*angstrom^2'), symmetry=1, barrier=(13.8694,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.396086,'amu*angstrom^2'), symmetry=1, barrier=(13.8683,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395737,'amu*angstrom^2'), symmetry=1, barrier=(13.8691,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395039,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.395901,'amu*angstrom^2'), symmetry=1, barrier=(13.8689,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.116365,0.0876489,-7.20737e-05,3.21805e-08,-5.96317e-12,40565.5,28.3373], Tmin=(100,'K'), Tmax=(1264.63,'K')), NASAPolynomial(coeffs=[14.5979,0.041109,-1.68732e-05,3.08148e-09,-2.10818e-13,36843.8,-46.1055], Tmin=(1264.63,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(336.03,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(Cds_S)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=CC)[C](C)C=C(24605)',\n structure = SMILES('[CH2]C=C(C)C([CH2])=CC'),\n E0 = (216.244,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),\n HinderedRotor(inertia=(0.712083,'amu*angstrom^2'), symmetry=1, barrier=(16.3722,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.555659,'amu*angstrom^2'), symmetry=1, barrier=(96.3851,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0202512,'amu*angstrom^2'), symmetry=1, barrier=(16.3711,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.712008,'amu*angstrom^2'), symmetry=1, barrier=(16.3705,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(4.19211,'amu*angstrom^2'), symmetry=1, barrier=(96.3849,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)\"\"\"),\n)\n\nspecies(\n label = '[CH2][C](C=C)C(C)=CC(24606)',\n structure = SMILES('[CH2]C=C([CH2])C(C)=CC'),\n E0 = (216.244,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0883175,0.0775021,-3.58132e-05,-7.55711e-09,8.27771e-12,26166.1,29.3215], Tmin=(100,'K'), Tmax=(1017.17,'K')), NASAPolynomial(coeffs=[16.4341,0.0376674,-1.41425e-05,2.53759e-09,-1.75328e-13,21504.4,-57.0638], Tmin=(1017.17,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(216.244,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Allyl_P) + radical(C=CC=CCJ)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=CC)[C]1CC1C(25414)',\n structure = SMILES('[CH2]C(=CC)[C]1CC1C'),\n E0 = (289.9,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.71289,0.0520158,3.84829e-05,-8.55933e-08,3.61457e-11,35003.5,26.4903], Tmin=(100,'K'), Tmax=(968.714,'K')), NASAPolynomial(coeffs=[16.7686,0.0352996,-1.24057e-05,2.26286e-09,-1.62921e-13,29566.5,-62.466], Tmin=(968.714,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(289.9,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-CsCsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + ring(Cyclopropane) + radical(Allyl_T) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2][C]1C(=CC)CC1C(25415)',\n structure = SMILES('[CH2]C1=C([CH]C)CC1C'),\n E0 = (304.572,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.583091,0.0531885,4.0938e-05,-9.08388e-08,3.83549e-11,36774.2,26.4705], Tmin=(100,'K'), Tmax=(972.301,'K')), NASAPolynomial(coeffs=[18.2947,0.0339462,-1.21014e-05,2.24934e-09,-1.64353e-13,30795.4,-71.5147], Tmin=(972.301,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(304.572,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_S)\"\"\"),\n)\n\nspecies(\n label = 'CH2(S)(23)',\n structure = SMILES('[CH2]'),\n E0 = (419.862,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')),\n ],\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (14.0266,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment=\"\"\"GRI-Mech\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label=\"\"\"CH2(S)\"\"\", comment=\"\"\"Thermo library: Klippenstein_Glarborg2016\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=C)C([CH2])=CC(25416)',\n structure = SMILES('[CH2]C(=C)C([CH2])=CC'),\n E0 = (285.713,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,3010,987.5,1337.5,450,1655,311.383],'cm^-1')),\n HinderedRotor(inertia=(0.327475,'amu*angstrom^2'), symmetry=1, barrier=(22.5291,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.327466,'amu*angstrom^2'), symmetry=1, barrier=(22.5294,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.327318,'amu*angstrom^2'), symmetry=1, barrier=(22.5272,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.327483,'amu*angstrom^2'), symmetry=1, barrier=(22.5297,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (94.1543,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.335271,0.0676667,-2.76626e-05,-1.62749e-08,1.21982e-11,34506.8,24.024], Tmin=(100,'K'), Tmax=(980.594,'K')), NASAPolynomial(coeffs=[17.5531,0.0266059,-9.47854e-06,1.70194e-09,-1.19937e-13,29727.4,-65.8563], Tmin=(980.594,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(285.713,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'C=C([CH]C)C[C]=CC(24184)',\n structure = SMILES('[CH2]C(=CC)C[C]=CC'),\n E0 = (366.985,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([2995,3025,975,1000,1300,1375,400,500,1630,1680,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,1685,370,350,440,435,1725,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,579.702],'cm^-1')),\n HinderedRotor(inertia=(0.147406,'amu*angstrom^2'), symmetry=1, barrier=(3.38916,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.64226,'amu*angstrom^2'), symmetry=1, barrier=(14.7668,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.64164,'amu*angstrom^2'), symmetry=1, barrier=(14.7526,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.643937,'amu*angstrom^2'), symmetry=1, barrier=(14.8054,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.145327,'amu*angstrom^2'), symmetry=1, barrier=(3.34136,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(3683.66,'J/mol'), sigma=(6.4482,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment=\"\"\"Epsilon & sigma estimated with Tc=575.38 K, Pc=31.18 bar (from Joback method)\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29648,0.0786067,-5.42868e-05,1.96375e-08,-2.97459e-12,44273.2,31.2372], Tmin=(100,'K'), Tmax=(1490.43,'K')), NASAPolynomial(coeffs=[13.9025,0.0420909,-1.75363e-05,3.199e-09,-2.17227e-13,40217.5,-39.8334], Tmin=(1490.43,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(366.985,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)HH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(Cds_S) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'CC=C1CCC1=CC(25269)',\n structure = SMILES('CC=C1CCC1=CC'),\n E0 = (114.107,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.677799,0.0585738,5.80411e-06,-4.1598e-08,1.78951e-11,13856,25.5085], Tmin=(100,'K'), Tmax=(1034.79,'K')), NASAPolynomial(coeffs=[13.4814,0.0415234,-1.65073e-05,3.07348e-09,-2.16896e-13,9469.28,-45.0922], Tmin=(1034.79,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(114.107,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(12methylenecyclobutane)\"\"\"),\n)\n\nspecies(\n label = 'CH2(19)',\n structure = SMILES('[CH2]'),\n E0 = (381.563,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (14.0266,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment=\"\"\"GRI-Mech\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label=\"\"\"CH2\"\"\", comment=\"\"\"Thermo library: Klippenstein_Glarborg2016\"\"\"),\n)\n\nspecies(\n label = '[CH2]C([C]=CC)=CC(25417)',\n structure = SMILES('[CH2]C([C]=CC)=CC'),\n E0 = (334.774,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([350,440,435,1725,1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,180],'cm^-1')),\n HinderedRotor(inertia=(0.7606,'amu*angstrom^2'), symmetry=1, barrier=(17.4877,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.760854,'amu*angstrom^2'), symmetry=1, barrier=(17.4935,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.760586,'amu*angstrom^2'), symmetry=1, barrier=(17.4874,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(2.15146,'amu*angstrom^2'), symmetry=1, barrier=(49.4663,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (94.1543,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.352604,0.0734369,-5.91187e-05,2.57941e-08,-4.60694e-12,40400.9,25.1788], Tmin=(100,'K'), Tmax=(1327.42,'K')), NASAPolynomial(coeffs=[14.2321,0.0316126,-1.18565e-05,2.05761e-09,-1.36512e-13,36716.1,-45.7131], Tmin=(1327.42,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(334.774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(390.78,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + radical(C=CJC=C) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C1([CH]C)C(=C)C1C(25296)',\n structure = SMILES('[CH2]C1([CH]C)C(=C)C1C'),\n E0 = (466.494,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.29276,0.0655305,-4.50464e-06,-3.74661e-08,1.7759e-11,56253.7,30.0992], Tmin=(100,'K'), Tmax=(1027.4,'K')), NASAPolynomial(coeffs=[16.6435,0.0372633,-1.49065e-05,2.81296e-09,-2.01072e-13,51026,-58.316], Tmin=(1027.4,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(466.494,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsCs) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsHH) + ring(Methylene_cyclopropane) + radical(Neopentyl) + radical(Cs_S)\"\"\"),\n)\n\nspecies(\n label = 'H(3)',\n structure = SMILES('[H]'),\n E0 = (211.792,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (1.00794,'amu'),\n collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment=\"\"\"GRI-Mech\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label=\"\"\"H\"\"\", comment=\"\"\"Thermo library: BurkeH2O2\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=CC)C(=C)C=C(24604)',\n structure = SMILES('[CH2]C(=CC)C(=C)C=C'),\n E0 = (242.677,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,2995,3025,975,1000,1300,1375,400,500,1630,1680,181.962,683.313],'cm^-1')),\n HinderedRotor(inertia=(0.669842,'amu*angstrom^2'), symmetry=1, barrier=(19.1337,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0582339,'amu*angstrom^2'), symmetry=1, barrier=(19.1767,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.83204,'amu*angstrom^2'), symmetry=1, barrier=(19.1302,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(4.52237,'amu*angstrom^2'), symmetry=1, barrier=(104.569,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 2,\n opticalIsomers = 1,\n molecularWeight = (107.173,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.293043,0.0682771,-2.00337e-05,-2.05401e-08,1.21516e-11,29332.3,27.0261], Tmin=(100,'K'), Tmax=(1018.57,'K')), NASAPolynomial(coeffs=[15.7386,0.0358123,-1.37404e-05,2.51366e-09,-1.76142e-13,24723.4,-54.9529], Tmin=(1018.57,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(242.677,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(440.667,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]CC(=C)C([CH2])=CC(25418)',\n structure = SMILES('[CH2]CC(=C)C([CH2])=CC'),\n E0 = (316.814,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([3010,987.5,1337.5,450,1655,2750,2800,2850,1350,1500,750,1050,1375,1000,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,180,180],'cm^-1')),\n HinderedRotor(inertia=(0.0368535,'amu*angstrom^2'), symmetry=1, barrier=(17.9864,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.00736317,'amu*angstrom^2'), symmetry=1, barrier=(3.60618,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.781153,'amu*angstrom^2'), symmetry=1, barrier=(17.9602,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.779478,'amu*angstrom^2'), symmetry=1, barrier=(17.9217,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.781104,'amu*angstrom^2'), symmetry=1, barrier=(17.9591,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.348925,0.0836004,-5.1879e-05,7.14877e-09,3.44908e-12,38270.9,31.5928], Tmin=(100,'K'), Tmax=(1044.14,'K')), NASAPolynomial(coeffs=[17.9255,0.0352115,-1.34219e-05,2.42456e-09,-1.67785e-13,33276.3,-63.0036], Tmin=(1044.14,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(316.814,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(RCCJ) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH]=C(CC)C([CH2])=CC(25419)',\n structure = SMILES('[CH]=C(CC)C([CH2])=CC'),\n E0 = (358.664,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([3120,650,792.5,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180],'cm^-1')),\n HinderedRotor(inertia=(0.701639,'amu*angstrom^2'), symmetry=1, barrier=(16.1321,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.344302,'amu*angstrom^2'), symmetry=1, barrier=(16.1602,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0492932,'amu*angstrom^2'), symmetry=1, barrier=(16.1378,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.702005,'amu*angstrom^2'), symmetry=1, barrier=(16.1405,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.702379,'amu*angstrom^2'), symmetry=1, barrier=(16.1491,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.468616,0.0864938,-5.84569e-05,1.27697e-08,1.75707e-12,43308.4,30.6389], Tmin=(100,'K'), Tmax=(1047.28,'K')), NASAPolynomial(coeffs=[18.4195,0.034593,-1.31104e-05,2.35762e-09,-1.62637e-13,38242.2,-66.6572], Tmin=(1047.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(358.664,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_P)\"\"\"),\n)\n\nspecies(\n label = '[CH2]C(=[C]C)C(=C)CC(25420)',\n structure = SMILES('[CH2]C(=[C]C)C(=C)CC'),\n E0 = (349.41,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1685,370,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,325,375,415,465,420,450,1700,1750,2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,180,180],'cm^-1')),\n HinderedRotor(inertia=(0.159905,'amu*angstrom^2'), symmetry=1, barrier=(15.9368,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.693159,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.693127,'amu*angstrom^2'), symmetry=1, barrier=(15.9364,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.693165,'amu*angstrom^2'), symmetry=1, barrier=(15.9372,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.0150632,'amu*angstrom^2'), symmetry=1, barrier=(15.9371,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.583231,0.089245,-7.16619e-05,3.00631e-08,-5.07891e-12,42198.9,31.1306], Tmin=(100,'K'), Tmax=(1412.15,'K')), NASAPolynomial(coeffs=[19.0319,0.0336833,-1.2643e-05,2.20036e-09,-1.46165e-13,36659.1,-70.2702], Tmin=(1412.15,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(349.41,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)\"\"\"),\n)\n\nspecies(\n label = '[CH]=C([CH]C)C(C)=CC(25421)',\n structure = SMILES('[CH]C(=CC)C(C)=CC'),\n E0 = (317.373,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([325,375,415,465,420,450,1700,1750,2750,2762.5,2775,2787.5,2800,2812.5,2825,2837.5,2850,1350,1380,1410,1440,1470,1500,700,750,800,1000,1050,1100,1350,1375,1400,900,1000,1100,2995,3025,975,1000,1300,1375,400,500,1630,1680,200,800,1200,1600],'cm^-1')),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.247945,0.0873521,-6.16843e-05,2.31486e-08,-3.62747e-12,38328.8,29.1665], Tmin=(100,'K'), Tmax=(1460.93,'K')), NASAPolynomial(coeffs=[15.297,0.0447902,-1.7984e-05,3.20673e-09,-2.14924e-13,33786.8,-51.7212], Tmin=(1460.93,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(317.373,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + radical(AllylJ2_triplet)\"\"\"),\n)\n\nspecies(\n label = '[CH2][C](C=C)C(=C)CC(24623)',\n structure = SMILES('[CH2]C(C=C)=C([CH2])CC'),\n E0 = (228.159,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.0497728,0.0733281,-1.6094e-05,-3.35123e-08,1.88363e-11,27601.1,30.4448], Tmin=(100,'K'), Tmax=(975.095,'K')), NASAPolynomial(coeffs=[18.3695,0.0342638,-1.21408e-05,2.16747e-09,-1.52112e-13,22274,-66.8493], Tmin=(975.095,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(228.159,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CC=CCJ) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'C[CH][C]1CCC1=CC(25422)',\n structure = SMILES('C[CH]C1CCC=1[CH]C'),\n E0 = (303.292,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.788866,0.0500701,4.22235e-05,-8.64809e-08,3.53174e-11,36611.5,25.2586], Tmin=(100,'K'), Tmax=(987.239,'K')), NASAPolynomial(coeffs=[16.2187,0.0373502,-1.4111e-05,2.65357e-09,-1.92503e-13,31138.2,-61.2734], Tmin=(987.239,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(303.292,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_S) + radical(Allyl_S)\"\"\"),\n)\n\nspecies(\n label = '[CH2][C]1C(=C)C(C)C1C(25423)',\n structure = SMILES('[CH2]C1=C([CH2])C(C)C1C'),\n E0 = (305.852,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.377097,0.0563026,3.9705e-05,-9.53284e-08,4.14811e-11,36937,26.2973], Tmin=(100,'K'), Tmax=(959.735,'K')), NASAPolynomial(coeffs=[20.4056,0.0304853,-1.006e-05,1.83774e-09,-1.35603e-13,30437.2,-83.3398], Tmin=(959.735,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(305.852,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsCs) + ring(Cyclobutene) + radical(Allyl_P) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = 'C=CC(=C)C(C)=CC(24616)',\n structure = SMILES('C=CC(=C)C(C)=CC'),\n E0 = (91.1774,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.236638,0.0713806,-3.04205e-05,-5.26762e-09,5.54498e-12,11111.2,26.9518], Tmin=(100,'K'), Tmax=(1093.32,'K')), NASAPolynomial(coeffs=[14.1536,0.040705,-1.6104e-05,2.93544e-09,-2.02595e-13,6858.32,-46.9636], Tmin=(1093.32,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(91.1774,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH)\"\"\"),\n)\n\nspecies(\n label = 'C=[C]C(C)C(=C)[CH]C(24183)',\n structure = SMILES('[CH2]C(=CC)C(C)[C]=C'),\n E0 = (369.44,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1685,370,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,350,440,435,1725,3000,3100,440,815,1455,1000,345.333,347.343],'cm^-1')),\n HinderedRotor(inertia=(0.119405,'amu*angstrom^2'), symmetry=1, barrier=(9.93037,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.281457,'amu*angstrom^2'), symmetry=1, barrier=(24.022,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.116909,'amu*angstrom^2'), symmetry=1, barrier=(9.94809,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.117447,'amu*angstrom^2'), symmetry=1, barrier=(9.9744,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.116555,'amu*angstrom^2'), symmetry=1, barrier=(9.93684,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n collisionModel = TransportData(shapeIndex=2, epsilon=(3625.33,'J/mol'), sigma=(6.4092,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment=\"\"\"Epsilon & sigma estimated with Tc=566.27 K, Pc=31.24 bar (from Joback method)\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.299693,0.0839308,-6.74533e-05,3.06742e-08,-6.02582e-12,44564.4,29.0122], Tmin=(100,'K'), Tmax=(1163.73,'K')), NASAPolynomial(coeffs=[10.857,0.0476425,-2.06788e-05,3.8782e-09,-2.69295e-13,42107.3,-23.5217], Tmin=(1163.73,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(369.44,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)(Cds-Cds)CsH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsCs) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Allyl_P) + radical(Cds_S)\"\"\"),\n)\n\nspecies(\n label = 'C=C1C(=CC)CC1C(25265)',\n structure = SMILES('C=C1C(=CC)CC1C'),\n E0 = (118.381,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.689924,0.0550304,2.3689e-05,-6.56265e-08,2.77602e-11,14372.8,24.9628], Tmin=(100,'K'), Tmax=(993.204,'K')), NASAPolynomial(coeffs=[15.3775,0.0380508,-1.43595e-05,2.66472e-09,-1.90565e-13,9375.16,-56.2678], Tmin=(993.204,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(118.381,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)\"\"\"),\n)\n\nspecies(\n label = 'CHCH3(T)(95)',\n structure = SMILES('[CH]C'),\n E0 = (343.893,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,592.414,4000],'cm^-1')),\n HinderedRotor(inertia=(0.00438699,'amu*angstrom^2'), symmetry=1, barrier=(26.7685,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (28.0532,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.82363,-0.000909515,3.2138e-05,-3.7348e-08,1.3309e-11,41371.4,7.10948], Tmin=(100,'K'), Tmax=(960.812,'K')), NASAPolynomial(coeffs=[4.30487,0.00943069,-3.27559e-06,5.95121e-10,-4.27307e-14,40709.1,1.84202], Tmin=(960.812,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(343.893,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label=\"\"\"CHCH3(T)\"\"\", comment=\"\"\"Thermo library: DFT_QCI_thermo\"\"\"),\n)\n\nspecies(\n label = '[CH2]C([C]=C)=CC(24774)',\n structure = SMILES('[CH2]C([C]=C)=CC'),\n E0 = (370.8,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([1685,370,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,350,440,435,1725,3000,3100,440,815,1455,1000,180],'cm^-1')),\n HinderedRotor(inertia=(1.17315,'amu*angstrom^2'), symmetry=1, barrier=(26.9731,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(1.17496,'amu*angstrom^2'), symmetry=1, barrier=(27.0146,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(1.1727,'amu*angstrom^2'), symmetry=1, barrier=(26.9626,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (80.1277,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.0818,0.0569416,-3.56598e-05,4.1841e-09,3.20998e-12,44708.4,20.7527], Tmin=(100,'K'), Tmax=(982.69,'K')), NASAPolynomial(coeffs=[12.9204,0.0239405,-8.46845e-06,1.46434e-09,-9.91425e-14,41648.3,-39.886], Tmin=(982.69,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(370.8,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CJC=C) + radical(Allyl_P)\"\"\"),\n)\n\nspecies(\n label = '[CH]=C([CH]C)C(=C)CC(25424)',\n structure = SMILES('[CH]C(=CC)C(=C)CC'),\n E0 = (330.753,'kJ/mol'),\n modes = [\n HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,325,375,415,465,420,450,1700,1750,200,800,1066.67,1333.33,1600],'cm^-1')),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),\n ],\n spinMultiplicity = 3,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.442166,0.0858934,-5.1432e-05,9.5936e-09,1.54315e-12,39950.3,30.9724], Tmin=(100,'K'), Tmax=(1106.5,'K')), NASAPolynomial(coeffs=[16.3579,0.0427111,-1.66841e-05,2.99222e-09,-2.04007e-13,35158.1,-56.633], Tmin=(1106.5,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(330.753,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(461.453,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(AllylJ2_triplet)\"\"\"),\n)\n\nspecies(\n label = 'C=CC(=C)C(=C)CC(24630)',\n structure = SMILES('C=CC(=C)C(=C)CC'),\n E0 = (104.558,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.296747,0.0670054,-1.0269e-05,-3.13536e-08,1.59568e-11,12721.3,27.8384], Tmin=(100,'K'), Tmax=(1010.3,'K')), NASAPolynomial(coeffs=[15.6889,0.0379462,-1.44599e-05,2.64736e-09,-1.86033e-13,7984.11,-54.6302], Tmin=(1010.3,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(104.558,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(465.61,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)(Cds-Cds)) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + group(Cds-CdsHH) + group(Cds-CdsHH)\"\"\"),\n)\n\nspecies(\n label = 'C=C1C(=C)C(C)C1C(25274)',\n structure = SMILES('C=C1C(=C)C(C)C1C'),\n E0 = (122.654,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (108.181,'amu'),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.691732,0.0515838,4.13669e-05,-8.96066e-08,3.77135e-11,14890,23.0693], Tmin=(100,'K'), Tmax=(969.873,'K')), NASAPolynomial(coeffs=[17.4573,0.0342784,-1.20439e-05,2.21718e-09,-1.61071e-13,9199.74,-69.8715], Tmin=(969.873,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(122.654,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(473.925,'J/(mol*K)'), comment=\"\"\"Thermo group additivity estimation: group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-Cds(Cds-Cds)Cs) + group(Cds-CdsHH) + group(Cds-CdsHH) + ring(12methylenecyclobutane)\"\"\"),\n)\n\nspecies(\n label = 'N2',\n structure = SMILES('N#N'),\n E0 = (-8.69489,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (28.0135,'amu'),\n collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment=\"\"\"PrimaryTransportLibrary\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label=\"\"\"N2\"\"\", comment=\"\"\"Thermo library: BurkeH2O2\"\"\"),\n)\n\nspecies(\n label = 'Ne',\n structure = SMILES('[Ne]'),\n E0 = (-6.19738,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n molecularWeight = (20.1797,'amu'),\n collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment=\"\"\"Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!\"\"\"),\n energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),\n thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label=\"\"\"Ne\"\"\", comment=\"\"\"Thermo library: primaryThermoLibrary\"\"\"),\n)\n\ntransitionState(\n label = 'TS1',\n E0 = (291.23,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS2',\n E0 = (462.221,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS3',\n E0 = (538.699,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS4',\n E0 = (497.951,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS5',\n E0 = (380.338,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS6',\n E0 = (399.474,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS7',\n E0 = (350.103,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS8',\n E0 = (722.113,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS9',\n E0 = (343.259,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS10',\n E0 = (380.132,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS11',\n E0 = (705.575,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS12',\n E0 = (537.022,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS13',\n E0 = (257.971,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS14',\n E0 = (716.337,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS15',\n E0 = (466.494,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS16',\n E0 = (454.469,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS17',\n E0 = (430.619,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS18',\n E0 = (503.849,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS19',\n E0 = (393.718,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS20',\n E0 = (361.682,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS21',\n E0 = (350.103,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS22',\n E0 = (380.132,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS23',\n E0 = (375.044,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS24',\n E0 = (274.66,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS25',\n E0 = (463.915,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS26',\n E0 = (257.971,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS27',\n E0 = (714.692,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS28',\n E0 = (375.062,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS29',\n E0 = (258.055,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\ntransitionState(\n label = 'TS30',\n E0 = (257.971,'kJ/mol'),\n spinMultiplicity = 1,\n opticalIsomers = 1,\n)\n\nreaction(\n label = 'reaction1',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'],\n transitionState = 'TS1',\n kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(41.5431,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"Exact match found for rate rule [RJJ]\nEuclidian distance = 0\nfamily: 1,4_Linear_birad_scission\nEa raised from 0.0 to 41.5 kJ/mol to match endothermicity of reaction.\"\"\"),\n)\n\nreaction(\n label = 'reaction2',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2]C1([CH]C)CC1=CC(25275)'],\n transitionState = 'TS2',\n kinetics = Arrhenius(A=(3.36e+09,'s^-1'), n=0.84, Ea=(212.534,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment=\"\"\"Estimated using template [R4_S_D;doublebond_intra_HNd;radadd_intra_cs2H] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_cs2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Exocyclic\nEa raised from 210.2 to 212.5 kJ/mol to match endothermicity of reaction.\"\"\"),\n)\n\nreaction(\n label = 'reaction3',\n reactants = ['CH3CHCCH2(18175)', 'C=[C][CH]C(18176)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS3',\n kinetics = Arrhenius(A=(0.00086947,'m^3/(mol*s)'), n=2.67356, Ea=(32.0272,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [Ca_Cds-HH;CJ]\nEuclidian distance = 0\nfamily: R_Addition_MultipleBond\"\"\"),\n)\n\nreaction(\n label = 'reaction4',\n reactants = ['[CH2]C(=CC)C(C)=[C]C(25412)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS4',\n kinetics = Arrhenius(A=(7.74e+09,'s^-1'), n=1.08, Ea=(161.921,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"From training reaction 198 used for R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H\nExact match found for rate rule [R3H_DS;Cd_rad_out_Cs;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction5',\n reactants = ['[CH2]C(=[C]C)C(C)=CC(25413)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS5',\n kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_2H]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction6',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2]C(=CC)[C](C)C=C(24605)'],\n transitionState = 'TS6',\n kinetics = Arrhenius(A=(1.6e+06,'s^-1'), n=1.81, Ea=(149.787,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 101 used for R4H_SDS;C_rad_out_2H;Cs_H_out_2H\nExact match found for rate rule [R4H_SDS;C_rad_out_2H;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction7',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2][C](C=C)C(C)=CC(24606)'],\n transitionState = 'TS7',\n kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 96 used for R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H\nExact match found for rate rule [R5H_SS(D)MS;C_rad_out_2H;Cs_H_out_2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction8',\n reactants = ['C=[C][CH]C(18176)', 'C=[C][CH]C(18176)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS8',\n kinetics = Arrhenius(A=(3.73038e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [Y_rad;Y_rad]\nEuclidian distance = 0\nfamily: R_Recombination\nEa raised from -14.4 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction9',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2]C(=CC)[C]1CC1C(25414)'],\n transitionState = 'TS9',\n kinetics = Arrhenius(A=(7.36786e+12,'s^-1'), n=-0.105173, Ea=(93.5715,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment=\"\"\"Estimated using template [R3_D;doublebond_intra;radadd_intra_cs2H] for rate rule [R3_D;doublebond_intra_secDe_HNd;radadd_intra_cs2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Endocyclic\"\"\"),\n)\n\nreaction(\n label = 'reaction10',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2][C]1C(=CC)CC1C(25415)'],\n transitionState = 'TS10',\n kinetics = Arrhenius(A=(6.43734e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Endocyclic\"\"\"),\n)\n\nreaction(\n label = 'reaction11',\n reactants = ['CH2(S)(23)', '[CH2]C(=C)C([CH2])=CC(25416)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS11',\n kinetics = Arrhenius(A=(7.94e+13,'cm^3/(mol*s)','*|/',0.25), n=-0.324, Ea=(0,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 4 used for carbene;Cd_pri\nExact match found for rate rule [carbene;Cd_pri]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 4.0\nfamily: 1,2_Insertion_carbene\nEa raised from -3.9 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction23',\n reactants = ['C=C([CH]C)C[C]=CC(24184)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS12',\n kinetics = Arrhenius(A=(1.74842e+09,'s^-1'), n=1.084, Ea=(170.038,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using average of templates [cCsCJ;CdsJ;C] + [cCs(-HH)CJ;CJ;C] for rate rule [cCs(-HH)CJ;CdsJ;C]\nEuclidian distance = 1.0\nfamily: 1,2_shiftC\"\"\"),\n)\n\nreaction(\n label = 'reaction13',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['CC=C1CCC1=CC(25269)'],\n transitionState = 'TS13',\n kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment=\"\"\"From training reaction 2 used for R4_SSS;C_rad_out_2H;Cpri_rad_out_2H\nExact match found for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_2H]\nEuclidian distance = 0\nfamily: Birad_recombination\"\"\"),\n)\n\nreaction(\n label = 'reaction14',\n reactants = ['CH2(19)', '[CH2]C([C]=CC)=CC(25417)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS14',\n kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]\nEuclidian distance = 3.0\nfamily: Birad_R_Recombination\nEa raised from -3.5 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction15',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2]C1([CH]C)C(=C)C1C(25296)'],\n transitionState = 'TS15',\n kinetics = Arrhenius(A=(6.72658e+10,'s^-1'), n=0.535608, Ea=(216.807,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using average of templates [R4_S_D;doublebond_intra;radadd_intra_csHNd] + [R4_S_D;doublebond_intra_HNd;radadd_intra_cs] for rate rule [R4_S_(Cd)_D;doublebond_intra_HNd;radadd_intra_csHNd]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 2.0\nfamily: Intra_R_Add_Exocyclic\nEa raised from 214.2 to 216.8 kJ/mol to match endothermicity of reaction.\"\"\"),\n)\n\nreaction(\n label = 'reaction16',\n reactants = ['H(3)', '[CH2]C(=CC)C(=C)C=C(24604)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS16',\n kinetics = Arrhenius(A=(2.31e+08,'cm^3/(mol*s)'), n=1.64, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"From training reaction 2544 used for Cds-HH_Cds-CdH;HJ\nExact match found for rate rule [Cds-HH_Cds-CdH;HJ]\nEuclidian distance = 0\nfamily: R_Addition_MultipleBond\nEa raised from -2.0 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction17',\n reactants = ['[CH2]CC(=C)C([CH2])=CC(25418)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS17',\n kinetics = Arrhenius(A=(1.72e+06,'s^-1'), n=1.99, Ea=(113.805,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 84 used for R2H_S;C_rad_out_2H;Cs_H_out_H/Cd\nExact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/Cd]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction18',\n reactants = ['[CH]=C(CC)C([CH2])=CC(25419)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS18',\n kinetics = Arrhenius(A=(1.846e+10,'s^-1'), n=0.74, Ea=(145.185,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"From training reaction 194 used for R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC\nExact match found for rate rule [R3H_DS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]\nEuclidian distance = 0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction19',\n reactants = ['[CH2]C(=[C]C)C(=C)CC(25420)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS19',\n kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4H_DSS;Cd_rad_out_single;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_Cs;Cs_H_out_H/NonDeC]\nEuclidian distance = 2.2360679775\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction20',\n reactants = ['[CH]=C([CH]C)C(C)=CC(25421)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS20',\n kinetics = Arrhenius(A=(111300,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_2H]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 3.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction21',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2][C](C=C)C(=C)CC(24623)'],\n transitionState = 'TS21',\n kinetics = Arrhenius(A=(6.66e+06,'s^-1'), n=1.64, Ea=(100.416,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R5H_SS(D)MS;C_rad_out_single;Cs_H_out_2H] for rate rule [R5H_SS(D)MS;C_rad_out_H/NonDeC;Cs_H_out_2H]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 6.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction22',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C[CH][C]1CCC1=CC(25422)'],\n transitionState = 'TS22',\n kinetics = Arrhenius(A=(3.21867e+08,'s^-1'), n=0.926191, Ea=(130.445,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_cs2H]\nEuclidian distance = 0\nfamily: Intra_R_Add_Endocyclic\"\"\"),\n)\n\nreaction(\n label = 'reaction23',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['[CH2][C]1C(=C)C(C)C1C(25423)'],\n transitionState = 'TS23',\n kinetics = Arrhenius(A=(5.16207e+08,'s^-1'), n=0.911389, Ea=(125.357,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using an average for rate rule [R4_S_D;doublebond_intra;radadd_intra_csHCs]\nEuclidian distance = 0\nfamily: Intra_R_Add_Endocyclic\"\"\"),\n)\n\nreaction(\n label = 'reaction24',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C=CC(=C)C(C)=CC(24616)'],\n transitionState = 'TS24',\n kinetics = Arrhenius(A=(1.27566e+10,'s^-1'), n=0.137, Ea=(24.9733,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R5;Y_rad;XH_Rrad] for rate rule [R5radEndo;Y_rad;XH_Rrad]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 6.0\nfamily: Intra_Disproportionation\"\"\"),\n)\n\nreaction(\n label = 'reaction24',\n reactants = ['C=[C]C(C)C(=C)[CH]C(24183)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS25',\n kinetics = Arrhenius(A=(8.66e+11,'s^-1'), n=0.438, Ea=(94.4747,'kJ/mol'), T0=(1,'K'), comment=\"\"\"From training reaction 5 used for cCs(-HC)CJ;CdsJ;C\nExact match found for rate rule [cCs(-HC)CJ;CdsJ;C]\nEuclidian distance = 0\nfamily: 1,2_shiftC\"\"\"),\n)\n\nreaction(\n label = 'reaction26',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C=C1C(=CC)CC1C(25265)'],\n transitionState = 'TS26',\n kinetics = Arrhenius(A=(3.24e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment=\"\"\"Estimated using template [R4_SSS;C_rad_out_2H;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_H/NonDeC]\nEuclidian distance = 2.0\nMultiplied by reaction path degeneracy 2.0\nfamily: Birad_recombination\"\"\"),\n)\n\nreaction(\n label = 'reaction27',\n reactants = ['CHCH3(T)(95)', '[CH2]C([C]=C)=CC(24774)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS27',\n kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [Y_rad;Birad] for rate rule [Cd_rad/OneDe;Birad]\nEuclidian distance = 3.0\nfamily: Birad_R_Recombination\nEa raised from -3.5 to 0 kJ/mol.\"\"\"),\n)\n\nreaction(\n label = 'reaction28',\n reactants = ['[CH]=C([CH]C)C(=C)CC(25424)'],\n products = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n transitionState = 'TS28',\n kinetics = Arrhenius(A=(74200,'s^-1'), n=2.23, Ea=(44.3086,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_1H] for rate rule [R4H_DSS;Cd_rad_out_singleH;Cs_H_out_H/NonDeC]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 2.0\nfamily: intra_H_migration\"\"\"),\n)\n\nreaction(\n label = 'reaction29',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C=CC(=C)C(=C)CC(24630)'],\n transitionState = 'TS29',\n kinetics = Arrhenius(A=(1.926e+10,'s^-1'), n=0.137, Ea=(8.368,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment=\"\"\"Estimated using template [R5;Y_rad_NDe;XH_Rrad] for rate rule [R5radEndo;Y_rad_NDe;XH_Rrad]\nEuclidian distance = 1.0\nMultiplied by reaction path degeneracy 6.0\nfamily: Intra_Disproportionation\"\"\"),\n)\n\nreaction(\n label = 'reaction30',\n reactants = ['C=C([CH]C)C(=C)[CH]C(24182)'],\n products = ['C=C1C(=C)C(C)C1C(25274)'],\n transitionState = 'TS30',\n kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), comment=\"\"\"Estimated using template [R4_SSS;C_rad_out_single;Cpri_rad_out_single] for rate rule [R4_SSS;C_rad_out_H/NonDeC;Cpri_rad_out_H/NonDeC]\nEuclidian distance = 2.82842712475\nfamily: Birad_recombination\"\"\"),\n)\n\nnetwork(\n label = '4267',\n isomers = [\n 'C=C([CH]C)C(=C)[CH]C(24182)',\n ],\n reactants = [\n ('CH3CHCCH2(18175)', 'CH3CHCCH2(18175)'),\n ],\n bathGas = {\n 'N2': 0.5,\n 'Ne': 0.5,\n },\n)\n\npressureDependence(\n label = '4267',\n Tmin = (300,'K'),\n Tmax = (2000,'K'),\n Tcount = 8,\n Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),\n Pmin = (0.01,'bar'),\n Pmax = (100,'bar'),\n Pcount = 5,\n Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),\n maximumGrainSize = (0.5,'kcal/mol'),\n minimumGrainCount = 250,\n method = 'modified strong collision',\n interpolationModel = ('Chebyshev', 6, 4),\n activeKRotor = True,\n activeJRotor = True,\n rmgmode = True,\n)\n\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import pygame # import random # import text_scroll from os import path img_dir = path.join(path.dirname(__file__), 'img') # define screen and refresh rate WIDTH = 720 HEIGHT = 720 FPS = 30 # define colors RED = (255, 0, 0) GREEN = (0, 255, 0) BLUE = (0, 0, 255) BLACK = (0, 0, 0) YELLOW = (255, 255, 0) BROWN = (165, 42, 42) WHITE = (255, 255, 255) # define runtime settings screen = pygame.display.set_mode((WIDTH, HEIGHT)) background = pygame.Surface(screen.get_size()) pygame.display.set_caption('Space Force Prime') clock = pygame.time.Clock()
normal
{ "blob_id": "88dfb422b1c9f9a9a8f497e1dbba5598c2710e9b", "index": 5718, "step-1": "<mask token>\n", "step-2": "<mask token>\npygame.display.set_caption('Space Force Prime')\n<mask token>\n", "step-3": "<mask token>\nimg_dir = path.join(path.dirname(__file__), 'img')\nWIDTH = 720\nHEIGHT = 720\nFPS = 30\nRED = 255, 0, 0\nGREEN = 0, 255, 0\nBLUE = 0, 0, 255\nBLACK = 0, 0, 0\nYELLOW = 255, 255, 0\nBROWN = 165, 42, 42\nWHITE = 255, 255, 255\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\nbackground = pygame.Surface(screen.get_size())\npygame.display.set_caption('Space Force Prime')\nclock = pygame.time.Clock()\n", "step-4": "import pygame\nfrom os import path\nimg_dir = path.join(path.dirname(__file__), 'img')\nWIDTH = 720\nHEIGHT = 720\nFPS = 30\nRED = 255, 0, 0\nGREEN = 0, 255, 0\nBLUE = 0, 0, 255\nBLACK = 0, 0, 0\nYELLOW = 255, 255, 0\nBROWN = 165, 42, 42\nWHITE = 255, 255, 255\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\nbackground = pygame.Surface(screen.get_size())\npygame.display.set_caption('Space Force Prime')\nclock = pygame.time.Clock()\n", "step-5": "import pygame\n# import random\n# import text_scroll\n\nfrom os import path\nimg_dir = path.join(path.dirname(__file__), 'img')\n\n# define screen and refresh rate\nWIDTH = 720\nHEIGHT = 720\nFPS = 30\n\n# define colors\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nBLACK = (0, 0, 0)\nYELLOW = (255, 255, 0)\nBROWN = (165, 42, 42)\nWHITE = (255, 255, 255)\n\n# define runtime settings\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\nbackground = pygame.Surface(screen.get_size())\npygame.display.set_caption('Space Force Prime')\nclock = pygame.time.Clock()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from django.contrib import admin from .models import Client, Adress # Register your models here. class ClientInline(admin.StackedInline): model = Adress can_delete = False extra = 1 class ClientAdmin(admin.ModelAdmin): inlines = [ClientInline] admin.site.register(Client, ClientAdmin)
normal
{ "blob_id": "ffd7aef2e72e64ac5b9f85b9d12845479187d89b", "index": 2010, "step-1": "<mask token>\n\n\nclass ClientInline(admin.StackedInline):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass ClientInline(admin.StackedInline):\n model = Adress\n can_delete = False\n extra = 1\n\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass ClientInline(admin.StackedInline):\n model = Adress\n can_delete = False\n extra = 1\n\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\nadmin.site.register(Client, ClientAdmin)\n", "step-4": "from django.contrib import admin\nfrom .models import Client, Adress\n\n\nclass ClientInline(admin.StackedInline):\n model = Adress\n can_delete = False\n extra = 1\n\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\nadmin.site.register(Client, ClientAdmin)\n", "step-5": "from django.contrib import admin\nfrom .models import Client, Adress\n\n# Register your models here.\n\nclass ClientInline(admin.StackedInline):\n model = Adress\n can_delete = False\n extra = 1\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\nadmin.site.register(Client, ClientAdmin)", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
# #1 # def bi_search(l, r, arr, x): # # Code Here # if(l == r): # return arr[r] == x # mid = (l + r)//2 + 1 # if(arr[mid] > x): # return bi_search(l,mid-1,arr,x) # else: # return bi_search(mid,r,arr,x) # inp = input('Enter Input : ').split('/') # arr, k = list(map(int, inp[0].split())), int(inp[1]) # print(bi_search(0, len(arr) - 1, sorted(arr), k)) # #2 # def bi_search(l, r, arr, x): # if(l == r): # if arr[l] > x : # return arr[l] # else: # return None # mid = (l + r)//2 + 1 # res = None # if(arr[mid] > x): # res = bi_search(l,mid-1,arr,x) # else: # res = bi_search(mid,r,arr,x) # return res if res else (arr[mid] if arr[mid] > x else None) # inp = input('Enter Input : ').split('/') # arr, arr2 = sorted(list(map(int, inp[0].split()))), list(map(int, inp[1].split())) # for k in arr2: # res = bi_search(0, len(arr) - 1, arr, k) # print(res if res else "No First Greater Value") #3 # class Data: # def __init__(self, key, value): # self.key = key # self.value = value # def __str__(self): # return "({0}, {1})".format(self.key, self.value) # class hash: # def __init__(self,max,chain): # self.data = [None for i in range(max)] # self.limit= max # self.chain= chain # self.length = 0 # def code(self,a): # return sum([ord(i) for i in a]) # def isFull(self): # return self.length == self.limit # def insert(self,value): # key,val = value.split(" ") # s = self.code(key) # co = 0 # now = 0 # while(co <= self.chain): # if(co != 0): # print ("collision number",co,"at",now) # if(co == self.chain): # break # now = (s + (0 if not co else co*co) ) % self.limit # if(self.data[now] == None): # self.data[now] = Data(key,val) # self.length += 1 # break # co += 1 # if(co >= self.chain): # print("Max of collisionChain") # def __str__(self): # return "\n".join(list(map(str,[ "#{0} {1}".format(str(i+1),self.data[i]) for i in range( len(self.data) ) ] ) ) ) + "\n---------------------------" # print(" ***** Fun with hashing *****") # val,arr = input("Enter Input : ").split("/") # h = hash(int(val.split(" ")[0]),int(val.split(" ")[1])) # arr = arr.split(",") # for i in arr: # h.insert(i) # print(h) # if(h.isFull()): # print("This table is full !!!!!!") # break #4 # import math # class Data: # def __init__(self, value): # self.value = value # def __str__(self): # return str(self.value) # class hash: # def __init__(self,max,chain,t): # self.data = [None for i in range(max)] # self.limit = max # self.chain = chain # self.length = 0 # self.threshold = t # self.bu = list() # def code(self,a): # # return sum([ord(i) for i in a]) # return int(a) # def isFull(self): # return self.length == self.limit # def findNearPrime(self): # i = self.limit * 2 # while(True): # c = True # for j in range(2, int(math.sqrt(i)) + 1): # if(not i % j): # i += 1 # c = False # break # if c : # break # return i # def handlerIllegal(self,co,value): # if(self.length * 100 // self.limit >= self.threshold): # print("****** Data over threshold - Rehash !!! ******") # self.resize() # self.Rehash() # elif (co >= self.chain): # print("****** Max collision - Rehash !!! ******") # self.resize() # self.Rehash() # def resize(self): # self.data += [None for i in range(self.findNearPrime() - self.limit)] # self.limit = len(self.data) # def Rehash(self): # for i in range(self.limit): # self.data[i] = None # for i in self.bu: # self.insert(i,False) # def insert(self,value,Rehash = True): # s = self.code(value) # co = 0 # now = 0 # while(co <= self.chain): # if(co != 0): # print ("collision number",co,"at",now) # if(co == self.chain): # break # now = (s + (0 if not co else co*co) ) % self.limit # if(self.data[now] == None): # self.data[now] = Data(value) # if(Rehash): # self.length += 1 # break # co += 1 # if(Rehash): # self.handlerIllegal(co,value) # def addBuff(self,value): # self.bu.append(value) # def __str__(self): # return "\n".join(list(map(str,[ "#{0} {1}".format(str(i+1),self.data[i]) for i in range( len(self.data) ) ] ) ) ) + "\n----------------------------------------" # print(" ***** Rehashing *****") # val,arr = input("Enter Input : ").split("/") # h = hash(int(val.split(" ")[0]),int(val.split(" ")[1]),int(val.split(" ")[2])) # arr = arr.split() # print("Initial Table :",h,sep="\n") # for i in arr: # print("Add :",i) # h.addBuff(i) # h.insert(i) # print(h) # if(h.isFull()): # print("This table is full !!!!!!") # break # 5 boxes = 0 ans = -1 def solve(dpArr,list,box,i): global boxes global ans if(box == boxes): s = 0 for j in list: s += len(j) if(s == len(dpArr)): mx = 0 for j in list: if(sum(j) > mx): mx = sum(j) if(mx < ans or ans == -1): ans = mx return for j in range(1,len(dpArr) + 1): if ( i + j > len(dpArr) ): break solve(dpArr,list + [dpArr[i:i + j]],box + 1 ,i + j) inp = input("Enter Input : ") inp,boxes = list(map(int,inp.split("/")[0].split() )) , int( inp.split("/")[1]) # for i in range(1,len(inp)): # inp[i] += inp[i-1] solve(dpArr = inp,list = [],box = 0,i = 0) print("Minimum weigth for",boxes,"box(es) =",ans)
normal
{ "blob_id": "883b4de18dddede97f850e3a184a0e1072bda99e", "index": 814, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef solve(dpArr, list, box, i):\n global boxes\n global ans\n if box == boxes:\n s = 0\n for j in list:\n s += len(j)\n if s == len(dpArr):\n mx = 0\n for j in list:\n if sum(j) > mx:\n mx = sum(j)\n if mx < ans or ans == -1:\n ans = mx\n return\n for j in range(1, len(dpArr) + 1):\n if i + j > len(dpArr):\n break\n solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef solve(dpArr, list, box, i):\n global boxes\n global ans\n if box == boxes:\n s = 0\n for j in list:\n s += len(j)\n if s == len(dpArr):\n mx = 0\n for j in list:\n if sum(j) > mx:\n mx = sum(j)\n if mx < ans or ans == -1:\n ans = mx\n return\n for j in range(1, len(dpArr) + 1):\n if i + j > len(dpArr):\n break\n solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)\n\n\n<mask token>\nsolve(dpArr=inp, list=[], box=0, i=0)\nprint('Minimum weigth for', boxes, 'box(es) =', ans)\n", "step-4": "boxes = 0\nans = -1\n\n\ndef solve(dpArr, list, box, i):\n global boxes\n global ans\n if box == boxes:\n s = 0\n for j in list:\n s += len(j)\n if s == len(dpArr):\n mx = 0\n for j in list:\n if sum(j) > mx:\n mx = sum(j)\n if mx < ans or ans == -1:\n ans = mx\n return\n for j in range(1, len(dpArr) + 1):\n if i + j > len(dpArr):\n break\n solve(dpArr, list + [dpArr[i:i + j]], box + 1, i + j)\n\n\ninp = input('Enter Input : ')\ninp, boxes = list(map(int, inp.split('/')[0].split())), int(inp.split('/')[1])\nsolve(dpArr=inp, list=[], box=0, i=0)\nprint('Minimum weigth for', boxes, 'box(es) =', ans)\n", "step-5": "# #1\n# def bi_search(l, r, arr, x):\n# # Code Here\n# if(l == r):\n# return arr[r] == x\n \n# mid = (l + r)//2 + 1\n# if(arr[mid] > x):\n# return bi_search(l,mid-1,arr,x)\n# else:\n# return bi_search(mid,r,arr,x)\n\n# inp = input('Enter Input : ').split('/')\n# arr, k = list(map(int, inp[0].split())), int(inp[1])\n# print(bi_search(0, len(arr) - 1, sorted(arr), k))\n\n# #2\n# def bi_search(l, r, arr, x):\n# if(l == r):\n# if arr[l] > x :\n# return arr[l]\n# else: \n# return None\n\n# mid = (l + r)//2 + 1\n# res = None\n# if(arr[mid] > x):\n# res = bi_search(l,mid-1,arr,x)\n# else:\n# res = bi_search(mid,r,arr,x)\n# return res if res else (arr[mid] if arr[mid] > x else None)\n\n\n# inp = input('Enter Input : ').split('/')\n# arr, arr2 = sorted(list(map(int, inp[0].split()))), list(map(int, inp[1].split()))\n# for k in arr2:\n# res = bi_search(0, len(arr) - 1, arr, k) \n# print(res if res else \"No First Greater Value\")\n\n#3\n# class Data:\n# def __init__(self, key, value):\n# self.key = key\n# self.value = value\n\n# def __str__(self):\n# return \"({0}, {1})\".format(self.key, self.value)\n\n# class hash:\n\n# def __init__(self,max,chain):\n# self.data = [None for i in range(max)]\n# self.limit= max\n# self.chain= chain\n# self.length = 0\n\n# def code(self,a):\n# return sum([ord(i) for i in a]) \n\n# def isFull(self):\n# return self.length == self.limit\n\n# def insert(self,value):\n# key,val = value.split(\" \")\n# s = self.code(key)\n# co = 0\n# now = 0\n# while(co <= self.chain):\n# if(co != 0):\n# print (\"collision number\",co,\"at\",now)\n# if(co == self.chain):\n# break\n# now = (s + (0 if not co else co*co) ) % self.limit \n \n\n# if(self.data[now] == None):\n# self.data[now] = Data(key,val)\n# self.length += 1\n# break\n# co += 1\n\n# if(co >= self.chain):\n# print(\"Max of collisionChain\")\n\n\n# def __str__(self):\n# return \"\\n\".join(list(map(str,[ \"#{0}\t{1}\".format(str(i+1),self.data[i]) for i in range( len(self.data) ) ] ) ) ) + \"\\n---------------------------\"\n\n\n# print(\" ***** Fun with hashing *****\")\n\n# val,arr = input(\"Enter Input : \").split(\"/\")\n\n# h = hash(int(val.split(\" \")[0]),int(val.split(\" \")[1]))\n\n# arr = arr.split(\",\")\n\n# for i in arr:\n# h.insert(i)\n# print(h)\n# if(h.isFull()):\n# print(\"This table is full !!!!!!\")\n# break\n\n\n#4\n# import math\n# class Data:\n# def __init__(self, value):\n# self.value = value\n\n# def __str__(self):\n# return str(self.value)\n\n# class hash:\n\n# def __init__(self,max,chain,t):\n# self.data = [None for i in range(max)]\n# self.limit = max\n# self.chain = chain\n# self.length = 0\n# self.threshold = t\n# self.bu = list()\n\n# def code(self,a):\n# # return sum([ord(i) for i in a]) \n# return int(a)\n\n# def isFull(self):\n# return self.length == self.limit\n\n# def findNearPrime(self):\n# i = self.limit * 2\n# while(True):\n# c = True\n# for j in range(2, int(math.sqrt(i)) + 1):\n# if(not i % j):\n# i += 1\n# c = False\n# break\n# if c :\n# break\n\n# return i\n\n# def handlerIllegal(self,co,value):\n# if(self.length * 100 // self.limit >= self.threshold):\n# print(\"****** Data over threshold - Rehash !!! ******\")\n# self.resize()\n# self.Rehash()\n# elif (co >= self.chain):\n# print(\"****** Max collision - Rehash !!! ******\")\n# self.resize()\n# self.Rehash()\n\n# def resize(self):\n# self.data += [None for i in range(self.findNearPrime() - self.limit)]\n# self.limit = len(self.data)\n\n# def Rehash(self):\n# for i in range(self.limit):\n# self.data[i] = None\n# for i in self.bu:\n# self.insert(i,False)\n\n# def insert(self,value,Rehash = True):\n# s = self.code(value)\n# co = 0\n# now = 0\n# while(co <= self.chain):\n# if(co != 0):\n# print (\"collision number\",co,\"at\",now)\n# if(co == self.chain):\n# break\n# now = (s + (0 if not co else co*co) ) % self.limit \n\n# if(self.data[now] == None):\n# self.data[now] = Data(value)\n# if(Rehash):\n# self.length += 1\n# break\n# co += 1\n\n# if(Rehash):\n# self.handlerIllegal(co,value)\n\n# def addBuff(self,value):\n# self.bu.append(value)\n\n# def __str__(self):\n# return \"\\n\".join(list(map(str,[ \"#{0}\t{1}\".format(str(i+1),self.data[i]) for i in range( len(self.data) ) ] ) ) ) + \"\\n----------------------------------------\"\n\n\n# print(\" ***** Rehashing *****\")\n\n# val,arr = input(\"Enter Input : \").split(\"/\")\n\n# h = hash(int(val.split(\" \")[0]),int(val.split(\" \")[1]),int(val.split(\" \")[2]))\n\n# arr = arr.split()\n\n# print(\"Initial Table :\",h,sep=\"\\n\")\n\n# for i in arr:\n# print(\"Add :\",i)\n# h.addBuff(i)\n# h.insert(i)\n# print(h)\n# if(h.isFull()):\n# print(\"This table is full !!!!!!\")\n# break\n\n\n# 5\nboxes = 0\nans = -1\ndef solve(dpArr,list,box,i):\n global boxes \n global ans\n if(box == boxes):\n s = 0\n for j in list:\n s += len(j)\n \n if(s == len(dpArr)):\n mx = 0\n for j in list:\n if(sum(j) > mx):\n mx = sum(j)\n\n if(mx < ans or ans == -1):\n ans = mx \n return\n\n for j in range(1,len(dpArr) + 1):\n if ( i + j > len(dpArr) ):\n break\n solve(dpArr,list + [dpArr[i:i + j]],box + 1 ,i + j)\n\n\ninp = input(\"Enter Input : \")\n\ninp,boxes = list(map(int,inp.split(\"/\")[0].split() )) , int( inp.split(\"/\")[1])\n\n# for i in range(1,len(inp)):\n# inp[i] += inp[i-1]\n\nsolve(dpArr = inp,list = [],box = 0,i = 0)\nprint(\"Minimum weigth for\",boxes,\"box(es) =\",ans)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import torch import torch.nn as nn import torch.optim as optim import torchtext import absl.flags import absl.app import pickle import yaml import numpy as np from tqdm import tqdm from core import model import core.dnc.explanation from core import functions from core.config import ControllerConfig, MemoryConfig, TrainingConfig # user flags FLAGS = absl.flags.FLAGS absl.flags.DEFINE_string("path_model", None, "Path of the trained model") absl.flags.DEFINE_string("path_training", None, "Path where is stored the csv dataset") absl.flags.DEFINE_string("path_val", None, "Path where is stored the csv dataset") absl.flags.DEFINE_integer("top_k", 25, "Number of read cells considered for each step") absl.flags.DEFINE_boolean("use_surrogate", False, " Whether to extract surrogate ground truth for explanation") absl.flags.mark_flag_as_required("path_model") absl.flags.mark_flag_as_required("path_training") absl.flags.mark_flag_as_required("path_val") def run_explanations(network, explanation_module, data_iterator): network.eval() best_accuracy = 0 worst_accuracy = 0 best_correct = 0 worst_correct = 0 covered = 0 total = 0 #print stuff pbar = tqdm() pbar.reset(total=len(data_iterator)) for _, data in enumerate(data_iterator): (_, p1, p2, p3, p4, a1, a2), y = data y = y - 1 # gold index story = torch.cat((p1,p2,p3,p4),1) background = [p1,p2,p3,p4] answers = [a1,a2] total += y.size(0) #get output with torch.no_grad(): outcome, rh, wh = network(story,[a1,a2]) predicted = torch.argmax(outcome, 1) for index_elem in range(p1.shape[0]): elem_background = [p1[index_elem:index_elem+1,:], p2[index_elem:index_elem+1,:],p3[index_elem:index_elem+1,:],p4[index_elem:index_elem+1,:]] elem_answers = [a1[index_elem:index_elem+1,:], a2[index_elem:index_elem+1,:]] elem_predicted = predicted[index_elem] sgt = explanation_module.get_sgt(network, elem_background,elem_answers ) # case where there are contraddictory surrogate ground truth if len(set(sgt)) > 1: covered += 1 rank, _ = explanation_module.get_rank(elem_background,wh[0][0],rh[elem_predicted.item()+1][0] ) best_prediction = sgt[rank[0]-1] best_correct += (elem_predicted == best_prediction).sum().item() worst_prediction = sgt[rank[-1]-1] worst_correct += (elem_predicted == worst_prediction).sum().item() best_accuracy = float(best_correct / covered) if best_correct > 0 else 0 worst_accuracy = float(worst_correct / covered) if worst_correct > 0 else 0 #print pbar.set_postfix({'Best':best_accuracy,'Worst':worst_accuracy, 'cov':covered/total}) pbar.update() pbar.close() return best_accuracy, worst_accuracy def run_training_epoch(network, data_iterator, loss_function, optimizer, max_grad_norm): network.train() # init cumulative variables accuracy = 0 correct = 0 total = 0 losses = [] # print utility pbar = tqdm() pbar.reset(total=len(data_iterator)) #data_iterator.init_epoch() for _, data in enumerate(data_iterator): optimizer.zero_grad() (_, p1, p2, p3, p4, a1, a2), y = data y = y - 1 # gold index story = torch.cat((p1,p2,p3,p4),1) # get output outcome, _, _ = network(story,[a1,a2]) predicted = torch.argmax(outcome, 1) # get loss loss = loss_function(outcome,y) loss.backward() losses.append(loss.item()) # update metrics correct += (predicted == y).sum().item() total += y.size(0) accuracy = float(correct / total) if correct > 0 else 0 # update weights nn.utils.clip_grad_norm_(network.parameters(), max_norm=max_grad_norm) optimizer.step() pbar.set_postfix({'Acc':accuracy}) #print pbar.update() pbar.close() return accuracy, np.mean(losses) def run_val_epoch(network, data_iterator): network.eval() accuracy = 0 correct = 0 total = 0 pbar = tqdm() pbar.reset(total=len(data_iterator)) with torch.no_grad(): for _, data in enumerate(data_iterator): (_,p1, p2, p3, p4, a1, a2), y = data y = y - 1 # gold index story = torch.cat((p1,p2,p3,p4),1) outcome, _, _ = network(story,[a1,a2]) # update metrics predicted = torch.argmax(outcome, 1) correct += (predicted == y).sum().item() total += y.size(0) accuracy = float(correct / total) if correct > 0 else 0 #print pbar.set_postfix({'Acc':accuracy}) pbar.update() pbar.close() return accuracy def run_training(path_training, path_val, path_model, top_k, required_explanation): #get configuration from dict and user config_dict = yaml.safe_load(open("config.yaml", 'r')) controller_config = ControllerConfig(**config_dict['controller']) memory_config = MemoryConfig(**config_dict['memory']) training_parameters = TrainingConfig(**config_dict['training']) # get available device DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") train_dataset = functions.get_cloze_dataset(path_training) val_dataset = functions.get_cloze_dataset(path_val) train_iterator = torchtext.data.Iterator(train_dataset,batch_size=training_parameters.batch_size, train=True, shuffle=True, device=DEVICE) val_iterator = torchtext.data.Iterator(val_dataset,batch_size=training_parameters.batch_size, train=False, sort=False,device=DEVICE) # Get Embedding vocab = torch.load("dataset/vocab")['vocab'] embedding_pretrained_weights = vocab.vectors pre_trained_embeddings = torch.as_tensor(embedding_pretrained_weights).to(DEVICE) padding_index=1 embedding_dim = len(embedding_pretrained_weights[0]) #init model network = model.ClozeModel(controller_config, memory_config, embedding_dim,len(pre_trained_embeddings),dropout=training_parameters.dropout).to(DEVICE) network.embeddings.weight.data.copy_(pre_trained_embeddings) network.embeddings.weight.requires_grad = True explanation_mod = core.dnc.explanation.ExplanationModule(padding_value=padding_index,top_k=top_k) loss_function = nn.CrossEntropyLoss() optimizer = optim.Adam(network.parameters(), lr=training_parameters.learning_rate, eps=1e-7) # initialize variables top1_acc = 0.0 for epoch in range(1,11): print("Running epoch {}".format(epoch)) _,_ = run_training_epoch(network,train_iterator,loss_function,optimizer,training_parameters.max_grad_norm) print("Validation epoch {}".format(epoch)) accuracy = run_val_epoch(network,val_iterator) if required_explanation: print("Explaining training dataset") run_explanations(network,explanation_mod,train_iterator) print("Explain validation dataset") run_explanations(network,explanation_mod,val_iterator) if accuracy > top1_acc: top1_acc = accuracy print("saving model...") checkpoint = {'controller_config':config_dict['controller'], 'memory_config':config_dict['memory'], 'state_dict':network.state_dict(), 'len_embeddings':len(pre_trained_embeddings)} torch.save(checkpoint, path_model) def main(argv): path_model = FLAGS.path_model path_training = FLAGS.path_training path_val = FLAGS.path_val top_k = FLAGS.top_k use_surr = FLAGS.use_surrogate run_training(path_training,path_val, path_model, top_k, use_surr) print("Training process ended! The new model is stored on {}.".format(path_model)) if __name__ == '__main__': absl.app.run(main)
normal
{ "blob_id": "00dbcae2d3941c9ef4c8b6753b8f6f7a46417400", "index": 5110, "step-1": "<mask token>\n\n\ndef run_explanations(network, explanation_module, data_iterator):\n network.eval()\n best_accuracy = 0\n worst_accuracy = 0\n best_correct = 0\n worst_correct = 0\n covered = 0\n total = 0\n pbar = tqdm()\n pbar.reset(total=len(data_iterator))\n for _, data in enumerate(data_iterator):\n (_, p1, p2, p3, p4, a1, a2), y = data\n y = y - 1\n story = torch.cat((p1, p2, p3, p4), 1)\n background = [p1, p2, p3, p4]\n answers = [a1, a2]\n total += y.size(0)\n with torch.no_grad():\n outcome, rh, wh = network(story, [a1, a2])\n predicted = torch.argmax(outcome, 1)\n for index_elem in range(p1.shape[0]):\n elem_background = [p1[index_elem:index_elem + 1, :], p2[\n index_elem:index_elem + 1, :], p3[index_elem:index_elem +\n 1, :], p4[index_elem:index_elem + 1, :]]\n elem_answers = [a1[index_elem:index_elem + 1, :], a2[\n index_elem:index_elem + 1, :]]\n elem_predicted = predicted[index_elem]\n sgt = explanation_module.get_sgt(network, elem_background,\n elem_answers)\n if len(set(sgt)) > 1:\n covered += 1\n rank, _ = explanation_module.get_rank(elem_background,\n wh[0][0], rh[elem_predicted.item() + 1][0])\n best_prediction = sgt[rank[0] - 1]\n best_correct += (elem_predicted == best_prediction).sum(\n ).item()\n worst_prediction = sgt[rank[-1] - 1]\n worst_correct += (elem_predicted == worst_prediction).sum(\n ).item()\n best_accuracy = float(best_correct / covered\n ) if best_correct > 0 else 0\n worst_accuracy = float(worst_correct / covered\n ) if worst_correct > 0 else 0\n pbar.set_postfix({'Best': best_accuracy, 'Worst': worst_accuracy,\n 'cov': covered / total})\n pbar.update()\n pbar.close()\n return best_accuracy, worst_accuracy\n\n\n<mask token>\n\n\ndef run_val_epoch(network, data_iterator):\n network.eval()\n accuracy = 0\n correct = 0\n total = 0\n pbar = tqdm()\n pbar.reset(total=len(data_iterator))\n with torch.no_grad():\n for _, data in enumerate(data_iterator):\n (_, p1, p2, p3, p4, a1, a2), y = data\n y = y - 1\n story = torch.cat((p1, p2, p3, p4), 1)\n outcome, _, _ = network(story, [a1, a2])\n predicted = torch.argmax(outcome, 1)\n correct += (predicted == y).sum().item()\n total += y.size(0)\n accuracy = float(correct / total) if correct > 0 else 0\n pbar.set_postfix({'Acc': accuracy})\n pbar.update()\n pbar.close()\n return accuracy\n\n\n<mask token>\n\n\ndef main(argv):\n path_model = FLAGS.path_model\n path_training = FLAGS.path_training\n path_val = FLAGS.path_val\n top_k = FLAGS.top_k\n use_surr = FLAGS.use_surrogate\n run_training(path_training, path_val, path_model, top_k, use_surr)\n print('Training process ended! The new model is stored on {}.'.format(\n path_model))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef run_explanations(network, explanation_module, data_iterator):\n network.eval()\n best_accuracy = 0\n worst_accuracy = 0\n best_correct = 0\n worst_correct = 0\n covered = 0\n total = 0\n pbar = tqdm()\n pbar.reset(total=len(data_iterator))\n for _, data in enumerate(data_iterator):\n (_, p1, p2, p3, p4, a1, a2), y = data\n y = y - 1\n story = torch.cat((p1, p2, p3, p4), 1)\n background = [p1, p2, p3, p4]\n answers = [a1, a2]\n total += y.size(0)\n with torch.no_grad():\n outcome, rh, wh = network(story, [a1, a2])\n predicted = torch.argmax(outcome, 1)\n for index_elem in range(p1.shape[0]):\n elem_background = [p1[index_elem:index_elem + 1, :], p2[\n index_elem:index_elem + 1, :], p3[index_elem:index_elem +\n 1, :], p4[index_elem:index_elem + 1, :]]\n elem_answers = [a1[index_elem:index_elem + 1, :], a2[\n index_elem:index_elem + 1, :]]\n elem_predicted = predicted[index_elem]\n sgt = explanation_module.get_sgt(network, elem_background,\n elem_answers)\n if len(set(sgt)) > 1:\n covered += 1\n rank, _ = explanation_module.get_rank(elem_background,\n wh[0][0], rh[elem_predicted.item() + 1][0])\n best_prediction = sgt[rank[0] - 1]\n best_correct += (elem_predicted == best_prediction).sum(\n ).item()\n worst_prediction = sgt[rank[-1] - 1]\n worst_correct += (elem_predicted == worst_prediction).sum(\n ).item()\n best_accuracy = float(best_correct / covered\n ) if best_correct > 0 else 0\n worst_accuracy = float(worst_correct / covered\n ) if worst_correct > 0 else 0\n pbar.set_postfix({'Best': best_accuracy, 'Worst': worst_accuracy,\n 'cov': covered / total})\n pbar.update()\n pbar.close()\n return best_accuracy, worst_accuracy\n\n\ndef run_training_epoch(network, data_iterator, loss_function, optimizer,\n max_grad_norm):\n network.train()\n accuracy = 0\n correct = 0\n total = 0\n losses = []\n pbar = tqdm()\n pbar.reset(total=len(data_iterator))\n for _, data in enumerate(data_iterator):\n optimizer.zero_grad()\n (_, p1, p2, p3, p4, a1, a2), y = data\n y = y - 1\n story = torch.cat((p1, p2, p3, p4), 1)\n outcome, _, _ = network(story, [a1, a2])\n predicted = torch.argmax(outcome, 1)\n loss = loss_function(outcome, y)\n loss.backward()\n losses.append(loss.item())\n correct += (predicted == y).sum().item()\n total += y.size(0)\n accuracy = float(correct / total) if correct > 0 else 0\n nn.utils.clip_grad_norm_(network.parameters(), max_norm=max_grad_norm)\n optimizer.step()\n pbar.set_postfix({'Acc': accuracy})\n pbar.update()\n pbar.close()\n return accuracy, np.mean(losses)\n\n\ndef run_val_epoch(network, data_iterator):\n network.eval()\n accuracy = 0\n correct = 0\n total = 0\n pbar = tqdm()\n pbar.reset(total=len(data_iterator))\n with torch.no_grad():\n for _, data in enumerate(data_iterator):\n (_, p1, p2, p3, p4, a1, a2), y = data\n y = y - 1\n story = torch.cat((p1, p2, p3, p4), 1)\n outcome, _, _ = network(story, [a1, a2])\n predicted = torch.argmax(outcome, 1)\n correct += (predicted == y).sum().item()\n total += y.size(0)\n accuracy = float(correct / total) if correct > 0 else 0\n pbar.set_postfix({'Acc': accuracy})\n pbar.update()\n pbar.close()\n return accuracy\n\n\ndef run_training(path_training, path_val, path_model, top_k,\n required_explanation):\n config_dict = yaml.safe_load(open('config.yaml', 'r'))\n controller_config = ControllerConfig(**config_dict['controller'])\n memory_config = MemoryConfig(**config_dict['memory'])\n training_parameters = TrainingConfig(**config_dict['training'])\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n train_dataset = functions.get_cloze_dataset(path_training)\n val_dataset = functions.get_cloze_dataset(path_val)\n train_iterator = torchtext.data.Iterator(train_dataset, batch_size=\n training_parameters.batch_size, train=True, shuffle=True, device=DEVICE\n )\n val_iterator = torchtext.data.Iterator(val_dataset, batch_size=\n training_parameters.batch_size, train=False, sort=False, device=DEVICE)\n vocab = torch.load('dataset/vocab')['vocab']\n embedding_pretrained_weights = vocab.vectors\n pre_trained_embeddings = torch.as_tensor(embedding_pretrained_weights).to(\n DEVICE)\n padding_index = 1\n embedding_dim = len(embedding_pretrained_weights[0])\n network = model.ClozeModel(controller_config, memory_config,\n embedding_dim, len(pre_trained_embeddings), dropout=\n training_parameters.dropout).to(DEVICE)\n network.embeddings.weight.data.copy_(pre_trained_embeddings)\n network.embeddings.weight.requires_grad = True\n explanation_mod = core.dnc.explanation.ExplanationModule(padding_value=\n padding_index, top_k=top_k)\n loss_function = nn.CrossEntropyLoss()\n optimizer = optim.Adam(network.parameters(), lr=training_parameters.\n learning_rate, eps=1e-07)\n top1_acc = 0.0\n for epoch in range(1, 11):\n print('Running epoch {}'.format(epoch))\n _, _ = run_training_epoch(network, train_iterator, loss_function,\n optimizer, training_parameters.max_grad_norm)\n print('Validation epoch {}'.format(epoch))\n accuracy = run_val_epoch(network, val_iterator)\n if required_explanation:\n print('Explaining training dataset')\n run_explanations(network, explanation_mod, train_iterator)\n print('Explain validation dataset')\n run_explanations(network, explanation_mod, val_iterator)\n if accuracy > top1_acc:\n top1_acc = accuracy\n print('saving model...')\n checkpoint = {'controller_config': config_dict['controller'],\n 'memory_config': config_dict['memory'], 'state_dict':\n network.state_dict(), 'len_embeddings': len(\n pre_trained_embeddings)}\n torch.save(checkpoint, path_model)\n\n\ndef main(argv):\n path_model = FLAGS.path_model\n path_training = FLAGS.path_training\n path_val = FLAGS.path_val\n top_k = FLAGS.top_k\n use_surr = FLAGS.use_surrogate\n run_training(path_training, path_val, path_model, top_k, use_surr)\n print('Training process ended! The new model is stored on {}.'.format(\n path_model))\n\n\n<mask token>\n", "step-3": "<mask token>\nabsl.flags.DEFINE_string('path_model', None, 'Path of the trained model')\nabsl.flags.DEFINE_string('path_training', None,\n 'Path where is stored the csv dataset')\nabsl.flags.DEFINE_string('path_val', None,\n 'Path where is stored the csv dataset')\nabsl.flags.DEFINE_integer('top_k', 25,\n 'Number of read cells considered for each step')\nabsl.flags.DEFINE_boolean('use_surrogate', False,\n ' Whether to extract surrogate ground truth for explanation')\nabsl.flags.mark_flag_as_required('path_model')\nabsl.flags.mark_flag_as_required('path_training')\nabsl.flags.mark_flag_as_required('path_val')\n\n\ndef run_explanations(network, explanation_module, data_iterator):\n network.eval()\n best_accuracy = 0\n worst_accuracy = 0\n best_correct = 0\n worst_correct = 0\n covered = 0\n total = 0\n pbar = tqdm()\n pbar.reset(total=len(data_iterator))\n for _, data in enumerate(data_iterator):\n (_, p1, p2, p3, p4, a1, a2), y = data\n y = y - 1\n story = torch.cat((p1, p2, p3, p4), 1)\n background = [p1, p2, p3, p4]\n answers = [a1, a2]\n total += y.size(0)\n with torch.no_grad():\n outcome, rh, wh = network(story, [a1, a2])\n predicted = torch.argmax(outcome, 1)\n for index_elem in range(p1.shape[0]):\n elem_background = [p1[index_elem:index_elem + 1, :], p2[\n index_elem:index_elem + 1, :], p3[index_elem:index_elem +\n 1, :], p4[index_elem:index_elem + 1, :]]\n elem_answers = [a1[index_elem:index_elem + 1, :], a2[\n index_elem:index_elem + 1, :]]\n elem_predicted = predicted[index_elem]\n sgt = explanation_module.get_sgt(network, elem_background,\n elem_answers)\n if len(set(sgt)) > 1:\n covered += 1\n rank, _ = explanation_module.get_rank(elem_background,\n wh[0][0], rh[elem_predicted.item() + 1][0])\n best_prediction = sgt[rank[0] - 1]\n best_correct += (elem_predicted == best_prediction).sum(\n ).item()\n worst_prediction = sgt[rank[-1] - 1]\n worst_correct += (elem_predicted == worst_prediction).sum(\n ).item()\n best_accuracy = float(best_correct / covered\n ) if best_correct > 0 else 0\n worst_accuracy = float(worst_correct / covered\n ) if worst_correct > 0 else 0\n pbar.set_postfix({'Best': best_accuracy, 'Worst': worst_accuracy,\n 'cov': covered / total})\n pbar.update()\n pbar.close()\n return best_accuracy, worst_accuracy\n\n\ndef run_training_epoch(network, data_iterator, loss_function, optimizer,\n max_grad_norm):\n network.train()\n accuracy = 0\n correct = 0\n total = 0\n losses = []\n pbar = tqdm()\n pbar.reset(total=len(data_iterator))\n for _, data in enumerate(data_iterator):\n optimizer.zero_grad()\n (_, p1, p2, p3, p4, a1, a2), y = data\n y = y - 1\n story = torch.cat((p1, p2, p3, p4), 1)\n outcome, _, _ = network(story, [a1, a2])\n predicted = torch.argmax(outcome, 1)\n loss = loss_function(outcome, y)\n loss.backward()\n losses.append(loss.item())\n correct += (predicted == y).sum().item()\n total += y.size(0)\n accuracy = float(correct / total) if correct > 0 else 0\n nn.utils.clip_grad_norm_(network.parameters(), max_norm=max_grad_norm)\n optimizer.step()\n pbar.set_postfix({'Acc': accuracy})\n pbar.update()\n pbar.close()\n return accuracy, np.mean(losses)\n\n\ndef run_val_epoch(network, data_iterator):\n network.eval()\n accuracy = 0\n correct = 0\n total = 0\n pbar = tqdm()\n pbar.reset(total=len(data_iterator))\n with torch.no_grad():\n for _, data in enumerate(data_iterator):\n (_, p1, p2, p3, p4, a1, a2), y = data\n y = y - 1\n story = torch.cat((p1, p2, p3, p4), 1)\n outcome, _, _ = network(story, [a1, a2])\n predicted = torch.argmax(outcome, 1)\n correct += (predicted == y).sum().item()\n total += y.size(0)\n accuracy = float(correct / total) if correct > 0 else 0\n pbar.set_postfix({'Acc': accuracy})\n pbar.update()\n pbar.close()\n return accuracy\n\n\ndef run_training(path_training, path_val, path_model, top_k,\n required_explanation):\n config_dict = yaml.safe_load(open('config.yaml', 'r'))\n controller_config = ControllerConfig(**config_dict['controller'])\n memory_config = MemoryConfig(**config_dict['memory'])\n training_parameters = TrainingConfig(**config_dict['training'])\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n train_dataset = functions.get_cloze_dataset(path_training)\n val_dataset = functions.get_cloze_dataset(path_val)\n train_iterator = torchtext.data.Iterator(train_dataset, batch_size=\n training_parameters.batch_size, train=True, shuffle=True, device=DEVICE\n )\n val_iterator = torchtext.data.Iterator(val_dataset, batch_size=\n training_parameters.batch_size, train=False, sort=False, device=DEVICE)\n vocab = torch.load('dataset/vocab')['vocab']\n embedding_pretrained_weights = vocab.vectors\n pre_trained_embeddings = torch.as_tensor(embedding_pretrained_weights).to(\n DEVICE)\n padding_index = 1\n embedding_dim = len(embedding_pretrained_weights[0])\n network = model.ClozeModel(controller_config, memory_config,\n embedding_dim, len(pre_trained_embeddings), dropout=\n training_parameters.dropout).to(DEVICE)\n network.embeddings.weight.data.copy_(pre_trained_embeddings)\n network.embeddings.weight.requires_grad = True\n explanation_mod = core.dnc.explanation.ExplanationModule(padding_value=\n padding_index, top_k=top_k)\n loss_function = nn.CrossEntropyLoss()\n optimizer = optim.Adam(network.parameters(), lr=training_parameters.\n learning_rate, eps=1e-07)\n top1_acc = 0.0\n for epoch in range(1, 11):\n print('Running epoch {}'.format(epoch))\n _, _ = run_training_epoch(network, train_iterator, loss_function,\n optimizer, training_parameters.max_grad_norm)\n print('Validation epoch {}'.format(epoch))\n accuracy = run_val_epoch(network, val_iterator)\n if required_explanation:\n print('Explaining training dataset')\n run_explanations(network, explanation_mod, train_iterator)\n print('Explain validation dataset')\n run_explanations(network, explanation_mod, val_iterator)\n if accuracy > top1_acc:\n top1_acc = accuracy\n print('saving model...')\n checkpoint = {'controller_config': config_dict['controller'],\n 'memory_config': config_dict['memory'], 'state_dict':\n network.state_dict(), 'len_embeddings': len(\n pre_trained_embeddings)}\n torch.save(checkpoint, path_model)\n\n\ndef main(argv):\n path_model = FLAGS.path_model\n path_training = FLAGS.path_training\n path_val = FLAGS.path_val\n top_k = FLAGS.top_k\n use_surr = FLAGS.use_surrogate\n run_training(path_training, path_val, path_model, top_k, use_surr)\n print('Training process ended! The new model is stored on {}.'.format(\n path_model))\n\n\nif __name__ == '__main__':\n absl.app.run(main)\n", "step-4": "<mask token>\nFLAGS = absl.flags.FLAGS\nabsl.flags.DEFINE_string('path_model', None, 'Path of the trained model')\nabsl.flags.DEFINE_string('path_training', None,\n 'Path where is stored the csv dataset')\nabsl.flags.DEFINE_string('path_val', None,\n 'Path where is stored the csv dataset')\nabsl.flags.DEFINE_integer('top_k', 25,\n 'Number of read cells considered for each step')\nabsl.flags.DEFINE_boolean('use_surrogate', False,\n ' Whether to extract surrogate ground truth for explanation')\nabsl.flags.mark_flag_as_required('path_model')\nabsl.flags.mark_flag_as_required('path_training')\nabsl.flags.mark_flag_as_required('path_val')\n\n\ndef run_explanations(network, explanation_module, data_iterator):\n network.eval()\n best_accuracy = 0\n worst_accuracy = 0\n best_correct = 0\n worst_correct = 0\n covered = 0\n total = 0\n pbar = tqdm()\n pbar.reset(total=len(data_iterator))\n for _, data in enumerate(data_iterator):\n (_, p1, p2, p3, p4, a1, a2), y = data\n y = y - 1\n story = torch.cat((p1, p2, p3, p4), 1)\n background = [p1, p2, p3, p4]\n answers = [a1, a2]\n total += y.size(0)\n with torch.no_grad():\n outcome, rh, wh = network(story, [a1, a2])\n predicted = torch.argmax(outcome, 1)\n for index_elem in range(p1.shape[0]):\n elem_background = [p1[index_elem:index_elem + 1, :], p2[\n index_elem:index_elem + 1, :], p3[index_elem:index_elem +\n 1, :], p4[index_elem:index_elem + 1, :]]\n elem_answers = [a1[index_elem:index_elem + 1, :], a2[\n index_elem:index_elem + 1, :]]\n elem_predicted = predicted[index_elem]\n sgt = explanation_module.get_sgt(network, elem_background,\n elem_answers)\n if len(set(sgt)) > 1:\n covered += 1\n rank, _ = explanation_module.get_rank(elem_background,\n wh[0][0], rh[elem_predicted.item() + 1][0])\n best_prediction = sgt[rank[0] - 1]\n best_correct += (elem_predicted == best_prediction).sum(\n ).item()\n worst_prediction = sgt[rank[-1] - 1]\n worst_correct += (elem_predicted == worst_prediction).sum(\n ).item()\n best_accuracy = float(best_correct / covered\n ) if best_correct > 0 else 0\n worst_accuracy = float(worst_correct / covered\n ) if worst_correct > 0 else 0\n pbar.set_postfix({'Best': best_accuracy, 'Worst': worst_accuracy,\n 'cov': covered / total})\n pbar.update()\n pbar.close()\n return best_accuracy, worst_accuracy\n\n\ndef run_training_epoch(network, data_iterator, loss_function, optimizer,\n max_grad_norm):\n network.train()\n accuracy = 0\n correct = 0\n total = 0\n losses = []\n pbar = tqdm()\n pbar.reset(total=len(data_iterator))\n for _, data in enumerate(data_iterator):\n optimizer.zero_grad()\n (_, p1, p2, p3, p4, a1, a2), y = data\n y = y - 1\n story = torch.cat((p1, p2, p3, p4), 1)\n outcome, _, _ = network(story, [a1, a2])\n predicted = torch.argmax(outcome, 1)\n loss = loss_function(outcome, y)\n loss.backward()\n losses.append(loss.item())\n correct += (predicted == y).sum().item()\n total += y.size(0)\n accuracy = float(correct / total) if correct > 0 else 0\n nn.utils.clip_grad_norm_(network.parameters(), max_norm=max_grad_norm)\n optimizer.step()\n pbar.set_postfix({'Acc': accuracy})\n pbar.update()\n pbar.close()\n return accuracy, np.mean(losses)\n\n\ndef run_val_epoch(network, data_iterator):\n network.eval()\n accuracy = 0\n correct = 0\n total = 0\n pbar = tqdm()\n pbar.reset(total=len(data_iterator))\n with torch.no_grad():\n for _, data in enumerate(data_iterator):\n (_, p1, p2, p3, p4, a1, a2), y = data\n y = y - 1\n story = torch.cat((p1, p2, p3, p4), 1)\n outcome, _, _ = network(story, [a1, a2])\n predicted = torch.argmax(outcome, 1)\n correct += (predicted == y).sum().item()\n total += y.size(0)\n accuracy = float(correct / total) if correct > 0 else 0\n pbar.set_postfix({'Acc': accuracy})\n pbar.update()\n pbar.close()\n return accuracy\n\n\ndef run_training(path_training, path_val, path_model, top_k,\n required_explanation):\n config_dict = yaml.safe_load(open('config.yaml', 'r'))\n controller_config = ControllerConfig(**config_dict['controller'])\n memory_config = MemoryConfig(**config_dict['memory'])\n training_parameters = TrainingConfig(**config_dict['training'])\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n train_dataset = functions.get_cloze_dataset(path_training)\n val_dataset = functions.get_cloze_dataset(path_val)\n train_iterator = torchtext.data.Iterator(train_dataset, batch_size=\n training_parameters.batch_size, train=True, shuffle=True, device=DEVICE\n )\n val_iterator = torchtext.data.Iterator(val_dataset, batch_size=\n training_parameters.batch_size, train=False, sort=False, device=DEVICE)\n vocab = torch.load('dataset/vocab')['vocab']\n embedding_pretrained_weights = vocab.vectors\n pre_trained_embeddings = torch.as_tensor(embedding_pretrained_weights).to(\n DEVICE)\n padding_index = 1\n embedding_dim = len(embedding_pretrained_weights[0])\n network = model.ClozeModel(controller_config, memory_config,\n embedding_dim, len(pre_trained_embeddings), dropout=\n training_parameters.dropout).to(DEVICE)\n network.embeddings.weight.data.copy_(pre_trained_embeddings)\n network.embeddings.weight.requires_grad = True\n explanation_mod = core.dnc.explanation.ExplanationModule(padding_value=\n padding_index, top_k=top_k)\n loss_function = nn.CrossEntropyLoss()\n optimizer = optim.Adam(network.parameters(), lr=training_parameters.\n learning_rate, eps=1e-07)\n top1_acc = 0.0\n for epoch in range(1, 11):\n print('Running epoch {}'.format(epoch))\n _, _ = run_training_epoch(network, train_iterator, loss_function,\n optimizer, training_parameters.max_grad_norm)\n print('Validation epoch {}'.format(epoch))\n accuracy = run_val_epoch(network, val_iterator)\n if required_explanation:\n print('Explaining training dataset')\n run_explanations(network, explanation_mod, train_iterator)\n print('Explain validation dataset')\n run_explanations(network, explanation_mod, val_iterator)\n if accuracy > top1_acc:\n top1_acc = accuracy\n print('saving model...')\n checkpoint = {'controller_config': config_dict['controller'],\n 'memory_config': config_dict['memory'], 'state_dict':\n network.state_dict(), 'len_embeddings': len(\n pre_trained_embeddings)}\n torch.save(checkpoint, path_model)\n\n\ndef main(argv):\n path_model = FLAGS.path_model\n path_training = FLAGS.path_training\n path_val = FLAGS.path_val\n top_k = FLAGS.top_k\n use_surr = FLAGS.use_surrogate\n run_training(path_training, path_val, path_model, top_k, use_surr)\n print('Training process ended! The new model is stored on {}.'.format(\n path_model))\n\n\nif __name__ == '__main__':\n absl.app.run(main)\n", "step-5": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchtext\nimport absl.flags\nimport absl.app\nimport pickle\nimport yaml\nimport numpy as np\nfrom tqdm import tqdm\nfrom core import model\nimport core.dnc.explanation\nfrom core import functions\nfrom core.config import ControllerConfig, MemoryConfig, TrainingConfig\n\n# user flags\nFLAGS = absl.flags.FLAGS\n\nabsl.flags.DEFINE_string(\"path_model\", None, \"Path of the trained model\")\nabsl.flags.DEFINE_string(\"path_training\", None, \"Path where is stored the csv dataset\")\nabsl.flags.DEFINE_string(\"path_val\", None, \"Path where is stored the csv dataset\")\nabsl.flags.DEFINE_integer(\"top_k\", 25, \"Number of read cells considered for each step\")\nabsl.flags.DEFINE_boolean(\"use_surrogate\", False, \" Whether to extract surrogate ground truth for explanation\")\n\nabsl.flags.mark_flag_as_required(\"path_model\")\nabsl.flags.mark_flag_as_required(\"path_training\")\nabsl.flags.mark_flag_as_required(\"path_val\")\n\n\ndef run_explanations(network, explanation_module, data_iterator):\n network.eval()\n best_accuracy = 0\n worst_accuracy = 0\n best_correct = 0\n worst_correct = 0\n covered = 0\n total = 0\n\n #print stuff \n pbar = tqdm()\n pbar.reset(total=len(data_iterator))\n\n for _, data in enumerate(data_iterator): \n \n (_, p1, p2, p3, p4, a1, a2), y = data\n\n y = y - 1 # gold index\n story = torch.cat((p1,p2,p3,p4),1)\n background = [p1,p2,p3,p4]\n answers = [a1,a2]\n total += y.size(0)\n #get output \n with torch.no_grad():\n outcome, rh, wh = network(story,[a1,a2])\n predicted = torch.argmax(outcome, 1)\n\n for index_elem in range(p1.shape[0]):\n elem_background = [p1[index_elem:index_elem+1,:], p2[index_elem:index_elem+1,:],p3[index_elem:index_elem+1,:],p4[index_elem:index_elem+1,:]]\n elem_answers = [a1[index_elem:index_elem+1,:], a2[index_elem:index_elem+1,:]]\n elem_predicted = predicted[index_elem]\n sgt = explanation_module.get_sgt(network, elem_background,elem_answers )\n \n # case where there are contraddictory surrogate ground truth\n if len(set(sgt)) > 1:\n covered += 1\n rank, _ = explanation_module.get_rank(elem_background,wh[0][0],rh[elem_predicted.item()+1][0] )\n best_prediction = sgt[rank[0]-1]\n best_correct += (elem_predicted == best_prediction).sum().item()\n worst_prediction = sgt[rank[-1]-1]\n worst_correct += (elem_predicted == worst_prediction).sum().item()\n best_accuracy = float(best_correct / covered) if best_correct > 0 else 0\n worst_accuracy = float(worst_correct / covered) if worst_correct > 0 else 0\n #print\n pbar.set_postfix({'Best':best_accuracy,'Worst':worst_accuracy, \n 'cov':covered/total})\n pbar.update()\n\n pbar.close()\n return best_accuracy, worst_accuracy\n\ndef run_training_epoch(network, data_iterator, loss_function, optimizer, max_grad_norm):\n network.train()\n\n # init cumulative variables\n accuracy = 0\n correct = 0\n total = 0\n losses = []\n\n # print utility\n pbar = tqdm()\n pbar.reset(total=len(data_iterator))\n\n #data_iterator.init_epoch()\n\n for _, data in enumerate(data_iterator): \n \n optimizer.zero_grad()\n (_, p1, p2, p3, p4, a1, a2), y = data\n y = y - 1 # gold index\n story = torch.cat((p1,p2,p3,p4),1)\n\n # get output\n outcome, _, _ = network(story,[a1,a2])\n predicted = torch.argmax(outcome, 1)\n\n # get loss\n loss = loss_function(outcome,y)\n loss.backward()\n losses.append(loss.item())\n \n # update metrics\n correct += (predicted == y).sum().item()\n total += y.size(0)\n accuracy = float(correct / total) if correct > 0 else 0 \n \n # update weights\n nn.utils.clip_grad_norm_(network.parameters(), max_norm=max_grad_norm)\n optimizer.step()\n pbar.set_postfix({'Acc':accuracy})\n #print\n pbar.update()\n\n pbar.close()\n return accuracy, np.mean(losses)\n\ndef run_val_epoch(network, data_iterator):\n network.eval()\n\n accuracy = 0\n correct = 0\n total = 0\n pbar = tqdm()\n pbar.reset(total=len(data_iterator))\n\n with torch.no_grad():\n for _, data in enumerate(data_iterator):\n (_,p1, p2, p3, p4, a1, a2), y = data\n\n y = y - 1 # gold index\n story = torch.cat((p1,p2,p3,p4),1)\n\n\n outcome, _, _ = network(story,[a1,a2])\n # update metrics\n predicted = torch.argmax(outcome, 1)\n correct += (predicted == y).sum().item()\n total += y.size(0)\n accuracy = float(correct / total) if correct > 0 else 0 \n\n \n #print\n pbar.set_postfix({'Acc':accuracy})\n pbar.update()\n pbar.close()\n return accuracy\n\ndef run_training(path_training, path_val, path_model, top_k, required_explanation):\n #get configuration from dict and user\n config_dict = yaml.safe_load(open(\"config.yaml\", 'r'))\n controller_config = ControllerConfig(**config_dict['controller'])\n memory_config = MemoryConfig(**config_dict['memory'])\n training_parameters = TrainingConfig(**config_dict['training'])\n\n # get available device\n DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n train_dataset = functions.get_cloze_dataset(path_training)\n val_dataset = functions.get_cloze_dataset(path_val)\n\n train_iterator = torchtext.data.Iterator(train_dataset,batch_size=training_parameters.batch_size, train=True, shuffle=True, device=DEVICE)\n val_iterator = torchtext.data.Iterator(val_dataset,batch_size=training_parameters.batch_size, train=False, sort=False,device=DEVICE)\n\n # Get Embedding\n vocab = torch.load(\"dataset/vocab\")['vocab']\n embedding_pretrained_weights = vocab.vectors\n pre_trained_embeddings = torch.as_tensor(embedding_pretrained_weights).to(DEVICE)\n padding_index=1\n embedding_dim = len(embedding_pretrained_weights[0])\n\n\n #init model\n network = model.ClozeModel(controller_config, memory_config, embedding_dim,len(pre_trained_embeddings),dropout=training_parameters.dropout).to(DEVICE)\n network.embeddings.weight.data.copy_(pre_trained_embeddings)\n network.embeddings.weight.requires_grad = True\n \n explanation_mod = core.dnc.explanation.ExplanationModule(padding_value=padding_index,top_k=top_k)\n loss_function = nn.CrossEntropyLoss()\n optimizer = optim.Adam(network.parameters(), lr=training_parameters.learning_rate, eps=1e-7)\n\n # initialize variables\n top1_acc = 0.0\n for epoch in range(1,11):\n print(\"Running epoch {}\".format(epoch))\n _,_ = run_training_epoch(network,train_iterator,loss_function,optimizer,training_parameters.max_grad_norm)\n print(\"Validation epoch {}\".format(epoch))\n accuracy = run_val_epoch(network,val_iterator)\n if required_explanation:\n print(\"Explaining training dataset\")\n run_explanations(network,explanation_mod,train_iterator)\n print(\"Explain validation dataset\")\n run_explanations(network,explanation_mod,val_iterator)\n\n if accuracy > top1_acc:\n top1_acc = accuracy\n print(\"saving model...\")\n checkpoint = {'controller_config':config_dict['controller'], 'memory_config':config_dict['memory'],\n 'state_dict':network.state_dict(), 'len_embeddings':len(pre_trained_embeddings)}\n torch.save(checkpoint, path_model)\n\ndef main(argv):\n path_model = FLAGS.path_model\n path_training = FLAGS.path_training\n path_val = FLAGS.path_val\n top_k = FLAGS.top_k\n use_surr = FLAGS.use_surrogate\n run_training(path_training,path_val, path_model, top_k, use_surr)\n print(\"Training process ended! The new model is stored on {}.\".format(path_model))\n\nif __name__ == '__main__':\n absl.app.run(main)", "step-ids": [ 3, 5, 6, 7, 9 ] }
[ 3, 5, 6, 7, 9 ]
<|reserved_special_token_0|> def test_creating_objects(): teacher = Teacher('Daniil', 'Shadrin') student = Student('Roman', 'Petrov') homework = teacher.create_homework('Learn OOP', 1) homework_result = student.do_homework(homework, 'I have done this hw') assert isinstance(teacher, Teacher) assert isinstance(student, Student) assert isinstance(homework, Homework) assert isinstance(homework_result, HomeworkResult) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def test_creating_objects(): teacher = Teacher('Daniil', 'Shadrin') student = Student('Roman', 'Petrov') homework = teacher.create_homework('Learn OOP', 1) homework_result = student.do_homework(homework, 'I have done this hw') assert isinstance(teacher, Teacher) assert isinstance(student, Student) assert isinstance(homework, Homework) assert isinstance(homework_result, HomeworkResult) <|reserved_special_token_0|> def test_creating_and_resetting_homework_results_by_teacher(): teacher = Teacher('Daniil', 'Shadrin') student = Student('Roman', 'Petrov') homework_1 = teacher.create_homework('Learn OOP', 1) homework_1_result = student.do_homework(homework_1, 'I have done this hw') assert teacher.check_homework(homework_1_result) is True assert homework_1_result in teacher.homework_done[homework_1] homework_2 = teacher.create_homework('homework 2', 1) homework_2_result = student.do_homework(homework_2, 'zero') assert teacher.check_homework(homework_2_result) is False assert teacher.homework_done.get(homework_2) is None homework_3 = teacher.create_homework('homework 3', 1) homework_3_result = student.do_homework(homework_3, 'I have done this hw') assert teacher.check_homework(homework_3_result) is True assert homework_3_result in teacher.homework_done.get(homework_3) assert len(teacher.homework_done) == 2 Teacher.reset_results(homework_3) assert len(teacher.homework_done) == 1 Teacher.reset_results() assert len(teacher.homework_done) == 0 <|reserved_special_token_1|> <|reserved_special_token_0|> def test_creating_objects(): teacher = Teacher('Daniil', 'Shadrin') student = Student('Roman', 'Petrov') homework = teacher.create_homework('Learn OOP', 1) homework_result = student.do_homework(homework, 'I have done this hw') assert isinstance(teacher, Teacher) assert isinstance(student, Student) assert isinstance(homework, Homework) assert isinstance(homework_result, HomeworkResult) def test_do_homework_exception(): teacher = Teacher('Daniil', 'Shadrin') student = Student('Lev', 'Sokolov') homework = teacher.create_homework('Learn OOP', 0) with pytest.raises(DeadLineError, match='You are late'): student.do_homework(homework, 'I have done this hw') def test_creating_and_resetting_homework_results_by_teacher(): teacher = Teacher('Daniil', 'Shadrin') student = Student('Roman', 'Petrov') homework_1 = teacher.create_homework('Learn OOP', 1) homework_1_result = student.do_homework(homework_1, 'I have done this hw') assert teacher.check_homework(homework_1_result) is True assert homework_1_result in teacher.homework_done[homework_1] homework_2 = teacher.create_homework('homework 2', 1) homework_2_result = student.do_homework(homework_2, 'zero') assert teacher.check_homework(homework_2_result) is False assert teacher.homework_done.get(homework_2) is None homework_3 = teacher.create_homework('homework 3', 1) homework_3_result = student.do_homework(homework_3, 'I have done this hw') assert teacher.check_homework(homework_3_result) is True assert homework_3_result in teacher.homework_done.get(homework_3) assert len(teacher.homework_done) == 2 Teacher.reset_results(homework_3) assert len(teacher.homework_done) == 1 Teacher.reset_results() assert len(teacher.homework_done) == 0 <|reserved_special_token_1|> import pytest from homeworks.homework6.oop_2 import DeadLineError, Homework, HomeworkResult, Student, Teacher def test_creating_objects(): teacher = Teacher('Daniil', 'Shadrin') student = Student('Roman', 'Petrov') homework = teacher.create_homework('Learn OOP', 1) homework_result = student.do_homework(homework, 'I have done this hw') assert isinstance(teacher, Teacher) assert isinstance(student, Student) assert isinstance(homework, Homework) assert isinstance(homework_result, HomeworkResult) def test_do_homework_exception(): teacher = Teacher('Daniil', 'Shadrin') student = Student('Lev', 'Sokolov') homework = teacher.create_homework('Learn OOP', 0) with pytest.raises(DeadLineError, match='You are late'): student.do_homework(homework, 'I have done this hw') def test_creating_and_resetting_homework_results_by_teacher(): teacher = Teacher('Daniil', 'Shadrin') student = Student('Roman', 'Petrov') homework_1 = teacher.create_homework('Learn OOP', 1) homework_1_result = student.do_homework(homework_1, 'I have done this hw') assert teacher.check_homework(homework_1_result) is True assert homework_1_result in teacher.homework_done[homework_1] homework_2 = teacher.create_homework('homework 2', 1) homework_2_result = student.do_homework(homework_2, 'zero') assert teacher.check_homework(homework_2_result) is False assert teacher.homework_done.get(homework_2) is None homework_3 = teacher.create_homework('homework 3', 1) homework_3_result = student.do_homework(homework_3, 'I have done this hw') assert teacher.check_homework(homework_3_result) is True assert homework_3_result in teacher.homework_done.get(homework_3) assert len(teacher.homework_done) == 2 Teacher.reset_results(homework_3) assert len(teacher.homework_done) == 1 Teacher.reset_results() assert len(teacher.homework_done) == 0 <|reserved_special_token_1|> import pytest from homeworks.homework6.oop_2 import ( DeadLineError, Homework, HomeworkResult, Student, Teacher, ) def test_creating_objects(): teacher = Teacher("Daniil", "Shadrin") student = Student("Roman", "Petrov") homework = teacher.create_homework("Learn OOP", 1) homework_result = student.do_homework(homework, "I have done this hw") assert isinstance(teacher, Teacher) assert isinstance(student, Student) assert isinstance(homework, Homework) assert isinstance(homework_result, HomeworkResult) def test_do_homework_exception(): teacher = Teacher("Daniil", "Shadrin") student = Student("Lev", "Sokolov") homework = teacher.create_homework("Learn OOP", 0) with pytest.raises(DeadLineError, match=r"You are late"): student.do_homework(homework, "I have done this hw") def test_creating_and_resetting_homework_results_by_teacher(): teacher = Teacher("Daniil", "Shadrin") student = Student("Roman", "Petrov") homework_1 = teacher.create_homework("Learn OOP", 1) homework_1_result = student.do_homework(homework_1, "I have done this hw") assert teacher.check_homework(homework_1_result) is True assert homework_1_result in teacher.homework_done[homework_1] homework_2 = teacher.create_homework("homework 2", 1) homework_2_result = student.do_homework(homework_2, "zero") assert teacher.check_homework(homework_2_result) is False assert teacher.homework_done.get(homework_2) is None homework_3 = teacher.create_homework("homework 3", 1) homework_3_result = student.do_homework(homework_3, "I have done this hw") assert teacher.check_homework(homework_3_result) is True assert homework_3_result in teacher.homework_done.get(homework_3) assert len(teacher.homework_done) == 2 Teacher.reset_results(homework_3) assert len(teacher.homework_done) == 1 Teacher.reset_results() assert len(teacher.homework_done) == 0
flexible
{ "blob_id": "8f971ee3b98691a887ee0632afd613bbf4f19aa0", "index": 3505, "step-1": "<mask token>\n\n\ndef test_creating_objects():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Roman', 'Petrov')\n homework = teacher.create_homework('Learn OOP', 1)\n homework_result = student.do_homework(homework, 'I have done this hw')\n assert isinstance(teacher, Teacher)\n assert isinstance(student, Student)\n assert isinstance(homework, Homework)\n assert isinstance(homework_result, HomeworkResult)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef test_creating_objects():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Roman', 'Petrov')\n homework = teacher.create_homework('Learn OOP', 1)\n homework_result = student.do_homework(homework, 'I have done this hw')\n assert isinstance(teacher, Teacher)\n assert isinstance(student, Student)\n assert isinstance(homework, Homework)\n assert isinstance(homework_result, HomeworkResult)\n\n\n<mask token>\n\n\ndef test_creating_and_resetting_homework_results_by_teacher():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Roman', 'Petrov')\n homework_1 = teacher.create_homework('Learn OOP', 1)\n homework_1_result = student.do_homework(homework_1, 'I have done this hw')\n assert teacher.check_homework(homework_1_result) is True\n assert homework_1_result in teacher.homework_done[homework_1]\n homework_2 = teacher.create_homework('homework 2', 1)\n homework_2_result = student.do_homework(homework_2, 'zero')\n assert teacher.check_homework(homework_2_result) is False\n assert teacher.homework_done.get(homework_2) is None\n homework_3 = teacher.create_homework('homework 3', 1)\n homework_3_result = student.do_homework(homework_3, 'I have done this hw')\n assert teacher.check_homework(homework_3_result) is True\n assert homework_3_result in teacher.homework_done.get(homework_3)\n assert len(teacher.homework_done) == 2\n Teacher.reset_results(homework_3)\n assert len(teacher.homework_done) == 1\n Teacher.reset_results()\n assert len(teacher.homework_done) == 0\n", "step-3": "<mask token>\n\n\ndef test_creating_objects():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Roman', 'Petrov')\n homework = teacher.create_homework('Learn OOP', 1)\n homework_result = student.do_homework(homework, 'I have done this hw')\n assert isinstance(teacher, Teacher)\n assert isinstance(student, Student)\n assert isinstance(homework, Homework)\n assert isinstance(homework_result, HomeworkResult)\n\n\ndef test_do_homework_exception():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Lev', 'Sokolov')\n homework = teacher.create_homework('Learn OOP', 0)\n with pytest.raises(DeadLineError, match='You are late'):\n student.do_homework(homework, 'I have done this hw')\n\n\ndef test_creating_and_resetting_homework_results_by_teacher():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Roman', 'Petrov')\n homework_1 = teacher.create_homework('Learn OOP', 1)\n homework_1_result = student.do_homework(homework_1, 'I have done this hw')\n assert teacher.check_homework(homework_1_result) is True\n assert homework_1_result in teacher.homework_done[homework_1]\n homework_2 = teacher.create_homework('homework 2', 1)\n homework_2_result = student.do_homework(homework_2, 'zero')\n assert teacher.check_homework(homework_2_result) is False\n assert teacher.homework_done.get(homework_2) is None\n homework_3 = teacher.create_homework('homework 3', 1)\n homework_3_result = student.do_homework(homework_3, 'I have done this hw')\n assert teacher.check_homework(homework_3_result) is True\n assert homework_3_result in teacher.homework_done.get(homework_3)\n assert len(teacher.homework_done) == 2\n Teacher.reset_results(homework_3)\n assert len(teacher.homework_done) == 1\n Teacher.reset_results()\n assert len(teacher.homework_done) == 0\n", "step-4": "import pytest\nfrom homeworks.homework6.oop_2 import DeadLineError, Homework, HomeworkResult, Student, Teacher\n\n\ndef test_creating_objects():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Roman', 'Petrov')\n homework = teacher.create_homework('Learn OOP', 1)\n homework_result = student.do_homework(homework, 'I have done this hw')\n assert isinstance(teacher, Teacher)\n assert isinstance(student, Student)\n assert isinstance(homework, Homework)\n assert isinstance(homework_result, HomeworkResult)\n\n\ndef test_do_homework_exception():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Lev', 'Sokolov')\n homework = teacher.create_homework('Learn OOP', 0)\n with pytest.raises(DeadLineError, match='You are late'):\n student.do_homework(homework, 'I have done this hw')\n\n\ndef test_creating_and_resetting_homework_results_by_teacher():\n teacher = Teacher('Daniil', 'Shadrin')\n student = Student('Roman', 'Petrov')\n homework_1 = teacher.create_homework('Learn OOP', 1)\n homework_1_result = student.do_homework(homework_1, 'I have done this hw')\n assert teacher.check_homework(homework_1_result) is True\n assert homework_1_result in teacher.homework_done[homework_1]\n homework_2 = teacher.create_homework('homework 2', 1)\n homework_2_result = student.do_homework(homework_2, 'zero')\n assert teacher.check_homework(homework_2_result) is False\n assert teacher.homework_done.get(homework_2) is None\n homework_3 = teacher.create_homework('homework 3', 1)\n homework_3_result = student.do_homework(homework_3, 'I have done this hw')\n assert teacher.check_homework(homework_3_result) is True\n assert homework_3_result in teacher.homework_done.get(homework_3)\n assert len(teacher.homework_done) == 2\n Teacher.reset_results(homework_3)\n assert len(teacher.homework_done) == 1\n Teacher.reset_results()\n assert len(teacher.homework_done) == 0\n", "step-5": "import pytest\n\nfrom homeworks.homework6.oop_2 import (\n DeadLineError,\n Homework,\n HomeworkResult,\n Student,\n Teacher,\n)\n\n\ndef test_creating_objects():\n teacher = Teacher(\"Daniil\", \"Shadrin\")\n student = Student(\"Roman\", \"Petrov\")\n homework = teacher.create_homework(\"Learn OOP\", 1)\n homework_result = student.do_homework(homework, \"I have done this hw\")\n assert isinstance(teacher, Teacher)\n assert isinstance(student, Student)\n assert isinstance(homework, Homework)\n assert isinstance(homework_result, HomeworkResult)\n\n\ndef test_do_homework_exception():\n teacher = Teacher(\"Daniil\", \"Shadrin\")\n student = Student(\"Lev\", \"Sokolov\")\n homework = teacher.create_homework(\"Learn OOP\", 0)\n with pytest.raises(DeadLineError, match=r\"You are late\"):\n student.do_homework(homework, \"I have done this hw\")\n\n\ndef test_creating_and_resetting_homework_results_by_teacher():\n teacher = Teacher(\"Daniil\", \"Shadrin\")\n student = Student(\"Roman\", \"Petrov\")\n homework_1 = teacher.create_homework(\"Learn OOP\", 1)\n homework_1_result = student.do_homework(homework_1, \"I have done this hw\")\n assert teacher.check_homework(homework_1_result) is True\n assert homework_1_result in teacher.homework_done[homework_1]\n\n homework_2 = teacher.create_homework(\"homework 2\", 1)\n homework_2_result = student.do_homework(homework_2, \"zero\")\n assert teacher.check_homework(homework_2_result) is False\n assert teacher.homework_done.get(homework_2) is None\n\n homework_3 = teacher.create_homework(\"homework 3\", 1)\n homework_3_result = student.do_homework(homework_3, \"I have done this hw\")\n assert teacher.check_homework(homework_3_result) is True\n assert homework_3_result in teacher.homework_done.get(homework_3)\n\n assert len(teacher.homework_done) == 2\n Teacher.reset_results(homework_3)\n assert len(teacher.homework_done) == 1\n Teacher.reset_results()\n assert len(teacher.homework_done) == 0\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> class bcolors: HEADER = '\x1b[95m' OKBLUE = '\x1b[94m' OKGREEN = '\x1b[92m' WARNING = '\x1b[93m' FAIL = '\x1b[91m' ENDC = '\x1b[0m' BOLD = '\x1b[1m' UNDERLINE = '\x1b[4m' def get_image(f_sdss): img = f_sdss[0].data return img <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class bcolors: HEADER = '\x1b[95m' OKBLUE = '\x1b[94m' OKGREEN = '\x1b[92m' WARNING = '\x1b[93m' FAIL = '\x1b[91m' ENDC = '\x1b[0m' BOLD = '\x1b[1m' UNDERLINE = '\x1b[4m' def get_image(f_sdss): img = f_sdss[0].data return img <|reserved_special_token_0|> sex.config['PARAMETERS_LIST'].append('FLUX_ISO') sex.config['PARAMETERS_LIST'].append('MAG_ISOCOR') sex.config['PARAMETERS_LIST'].append('MAG_AUTO') sex.config['PARAMETERS_LIST'].append('PETRO_RADIUS') sex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE') sex.config['PARAMETERS_LIST'].append('ALPHA_J2000') sex.config['PARAMETERS_LIST'].append('DELTA_J2000') sex.config['PARAMETERS_LIST'].append('FWHM_WORLD') sex.config['PARAMETERS_LIST'].append('CLASS_STAR') <|reserved_special_token_0|> sex.run(fname) <|reserved_special_token_0|> for i_object in range(13, 14): window_size = 250 filter_seg = 'rSDSS' ra = df_cat['ra'] dec = df_cat['dec'] image_r = fits.open('data/frame-r-002507-4-0226.fits') wcsys = wcs.WCS(header=image_r[0].header) y, x = wcsys.wcs_world2pix(ra, dec, 1) interval = int(round(x[i_object] - window_size / 2)), int(round(x[ i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2) ), int(round(y[i_object] + window_size / 2)) df = pd.DataFrame() df_sky = pd.DataFrame() seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]] for i_gal in range(len(df_fit)): f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal], df_fit['name'][i_gal])) img = get_image(f_sdss) img_cut = img[interval[0]:interval[1], interval[2]:interval[3]] plt.figure(1) plt.clf() plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral') plt.colorbar() band = df_fit['filter'][i_gal] nrows, ncols = img_cut.shape xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows]) table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten()) ) temp = pd.DataFrame(table, columns=['x', 'y', band]) df = pd.concat([df, temp], axis=1) sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal], df_fit['name'][i_gal])) sky = get_image(sky_r) wcsys = wcs.WCS(header=sky_r[0].header) yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1) delta_x = 85 delta_y = 85 interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x / 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2)) img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]: interval_sky[3]] sky_nrows, sky_ncols = img_sky.shape xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows]) table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky. flatten())) temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band]) df_sky = pd.concat([df_sky, temp_sky], axis=1) df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]] df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]] """ Imagem da galaxia, na banda r. """ plt.figure(1) plt.clf() r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal]) img_r = get_image(r_sdss) img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]] cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5) imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral') titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object] plt.title(titulo) plt.colorbar() figura = 'figures/galaxy_#%s' % df_cat['num'][i_object] plt.savefig(figura) """ Imagem segmentada da galaxia, na banda r. """ plt.figure(1) plt.clf() cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5) imgplot = plt.imshow(seg_sex, cmap='spectral') titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object] plt.title(titulo) plt.colorbar() figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object] plt.savefig(figura) """ ================================================================================ Salvando os fluxos de cada galaxia em um arquivo txt ================================================================================ """ saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object] formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f'] headers2 = 'x\ty\tu\tg\tr\ti\tz' np.savetxt(saida_fluxes, df, delimiter='\t', header=headers2, fmt=formats) print('') print('>> Os dados estao em: "%s".' % saida_fluxes) """ ================================================================================ Subtraindo o ceu, na banda r ================================================================================ """ df_aux = df.ix[:, 2:] df_aux1 = df.ix[:, :2] df_sky_aux = df_sky.ix[:, 2:] df_aux3 = df_aux - df_sky_aux.mean() df_rss = df_aux1.join(df_aux3) """ A segmentacao consiste de usar um limiar para separar o objeto do fundo. No nosso caso, usamos limiar = alpha*std_ceu """ """ ================================================================================ SEGMENTACAO ================================================================================ """ limiar = 2.5 * df_sky.r.std() df_seg = df_rss.ix[df_rss['r'] > limiar] print('Pixeis acima do limiar: %d' % len(df_seg)) np.savetxt('fof2.txt', df_seg, delimiter='\t') <|reserved_special_token_0|> print('') print(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC ) <|reserved_special_token_1|> <|reserved_special_token_0|> __author__ = 'pnovais' ini = time.time() class bcolors: HEADER = '\x1b[95m' OKBLUE = '\x1b[94m' OKGREEN = '\x1b[92m' WARNING = '\x1b[93m' FAIL = '\x1b[91m' ENDC = '\x1b[0m' BOLD = '\x1b[1m' UNDERLINE = '\x1b[4m' def get_image(f_sdss): img = f_sdss[0].data return img df_fit = pd.read_csv('data/arquivo_fits.csv') <|reserved_special_token_0|> fname = 'data/frame-r-002507-4-0226.fits' sex = SExtractor() sex.config['PARAMETERS_LIST'].append('FLUX_ISO') sex.config['PARAMETERS_LIST'].append('MAG_ISOCOR') sex.config['PARAMETERS_LIST'].append('MAG_AUTO') sex.config['PARAMETERS_LIST'].append('PETRO_RADIUS') sex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE') sex.config['PARAMETERS_LIST'].append('ALPHA_J2000') sex.config['PARAMETERS_LIST'].append('DELTA_J2000') sex.config['PARAMETERS_LIST'].append('FWHM_WORLD') sex.config['PARAMETERS_LIST'].append('CLASS_STAR') sex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION' sex.run(fname) segmap = fits.open('check.fits')[0].data df_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16) df_cat.columns = ['num', 'flux_best', 'fluxerr_best', 'x', 'y', 'flags', 'fwhm_image', 'flux_iso', 'mag_isocor', 'mag_auto', 'petro_radius', 'ISO_AREA', 'ra', 'dec', 'fwhm_world', 'class_star'] df_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)] df_cat = df_cat.reset_index() df_cat = df_cat.ix[:, 1:15] <|reserved_special_token_0|> df = pd.DataFrame() df_sky = pd.DataFrame() for i_object in range(13, 14): window_size = 250 filter_seg = 'rSDSS' ra = df_cat['ra'] dec = df_cat['dec'] image_r = fits.open('data/frame-r-002507-4-0226.fits') wcsys = wcs.WCS(header=image_r[0].header) y, x = wcsys.wcs_world2pix(ra, dec, 1) interval = int(round(x[i_object] - window_size / 2)), int(round(x[ i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2) ), int(round(y[i_object] + window_size / 2)) df = pd.DataFrame() df_sky = pd.DataFrame() seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]] for i_gal in range(len(df_fit)): f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal], df_fit['name'][i_gal])) img = get_image(f_sdss) img_cut = img[interval[0]:interval[1], interval[2]:interval[3]] plt.figure(1) plt.clf() plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral') plt.colorbar() band = df_fit['filter'][i_gal] nrows, ncols = img_cut.shape xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows]) table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten()) ) temp = pd.DataFrame(table, columns=['x', 'y', band]) df = pd.concat([df, temp], axis=1) sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal], df_fit['name'][i_gal])) sky = get_image(sky_r) wcsys = wcs.WCS(header=sky_r[0].header) yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1) delta_x = 85 delta_y = 85 interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x / 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2)) img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]: interval_sky[3]] sky_nrows, sky_ncols = img_sky.shape xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows]) table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky. flatten())) temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band]) df_sky = pd.concat([df_sky, temp_sky], axis=1) df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]] df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]] """ Imagem da galaxia, na banda r. """ plt.figure(1) plt.clf() r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal]) img_r = get_image(r_sdss) img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]] cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5) imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral') titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object] plt.title(titulo) plt.colorbar() figura = 'figures/galaxy_#%s' % df_cat['num'][i_object] plt.savefig(figura) """ Imagem segmentada da galaxia, na banda r. """ plt.figure(1) plt.clf() cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5) imgplot = plt.imshow(seg_sex, cmap='spectral') titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object] plt.title(titulo) plt.colorbar() figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object] plt.savefig(figura) """ ================================================================================ Salvando os fluxos de cada galaxia em um arquivo txt ================================================================================ """ saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object] formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f'] headers2 = 'x\ty\tu\tg\tr\ti\tz' np.savetxt(saida_fluxes, df, delimiter='\t', header=headers2, fmt=formats) print('') print('>> Os dados estao em: "%s".' % saida_fluxes) """ ================================================================================ Subtraindo o ceu, na banda r ================================================================================ """ df_aux = df.ix[:, 2:] df_aux1 = df.ix[:, :2] df_sky_aux = df_sky.ix[:, 2:] df_aux3 = df_aux - df_sky_aux.mean() df_rss = df_aux1.join(df_aux3) """ A segmentacao consiste de usar um limiar para separar o objeto do fundo. No nosso caso, usamos limiar = alpha*std_ceu """ """ ================================================================================ SEGMENTACAO ================================================================================ """ limiar = 2.5 * df_sky.r.std() df_seg = df_rss.ix[df_rss['r'] > limiar] print('Pixeis acima do limiar: %d' % len(df_seg)) np.savetxt('fof2.txt', df_seg, delimiter='\t') fim = time.time() time_proc = fim - ini print('') print(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC ) <|reserved_special_token_1|> import pandas as pd import numpy as np import datetime import time from sys import exit from matplotlib import colors, pyplot as plt from functools import reduce import matplotlib.cm as cm import seaborn as sns from astropy.io import ascii, fits from astropy.wcs import wcs from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter from scipy.interpolate import interp2d import matplotlib.mlab as mlab import scipy, pylab import rpy2 import cubehelix import math from pysextractor import SExtractor __author__ = 'pnovais' ini = time.time() class bcolors: HEADER = '\x1b[95m' OKBLUE = '\x1b[94m' OKGREEN = '\x1b[92m' WARNING = '\x1b[93m' FAIL = '\x1b[91m' ENDC = '\x1b[0m' BOLD = '\x1b[1m' UNDERLINE = '\x1b[4m' def get_image(f_sdss): img = f_sdss[0].data return img df_fit = pd.read_csv('data/arquivo_fits.csv') <|reserved_special_token_0|> fname = 'data/frame-r-002507-4-0226.fits' sex = SExtractor() sex.config['PARAMETERS_LIST'].append('FLUX_ISO') sex.config['PARAMETERS_LIST'].append('MAG_ISOCOR') sex.config['PARAMETERS_LIST'].append('MAG_AUTO') sex.config['PARAMETERS_LIST'].append('PETRO_RADIUS') sex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE') sex.config['PARAMETERS_LIST'].append('ALPHA_J2000') sex.config['PARAMETERS_LIST'].append('DELTA_J2000') sex.config['PARAMETERS_LIST'].append('FWHM_WORLD') sex.config['PARAMETERS_LIST'].append('CLASS_STAR') sex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION' sex.run(fname) segmap = fits.open('check.fits')[0].data df_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16) df_cat.columns = ['num', 'flux_best', 'fluxerr_best', 'x', 'y', 'flags', 'fwhm_image', 'flux_iso', 'mag_isocor', 'mag_auto', 'petro_radius', 'ISO_AREA', 'ra', 'dec', 'fwhm_world', 'class_star'] df_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)] df_cat = df_cat.reset_index() df_cat = df_cat.ix[:, 1:15] <|reserved_special_token_0|> df = pd.DataFrame() df_sky = pd.DataFrame() for i_object in range(13, 14): window_size = 250 filter_seg = 'rSDSS' ra = df_cat['ra'] dec = df_cat['dec'] image_r = fits.open('data/frame-r-002507-4-0226.fits') wcsys = wcs.WCS(header=image_r[0].header) y, x = wcsys.wcs_world2pix(ra, dec, 1) interval = int(round(x[i_object] - window_size / 2)), int(round(x[ i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2) ), int(round(y[i_object] + window_size / 2)) df = pd.DataFrame() df_sky = pd.DataFrame() seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]] for i_gal in range(len(df_fit)): f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal], df_fit['name'][i_gal])) img = get_image(f_sdss) img_cut = img[interval[0]:interval[1], interval[2]:interval[3]] plt.figure(1) plt.clf() plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral') plt.colorbar() band = df_fit['filter'][i_gal] nrows, ncols = img_cut.shape xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows]) table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten()) ) temp = pd.DataFrame(table, columns=['x', 'y', band]) df = pd.concat([df, temp], axis=1) sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal], df_fit['name'][i_gal])) sky = get_image(sky_r) wcsys = wcs.WCS(header=sky_r[0].header) yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1) delta_x = 85 delta_y = 85 interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x / 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2)) img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]: interval_sky[3]] sky_nrows, sky_ncols = img_sky.shape xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows]) table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky. flatten())) temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band]) df_sky = pd.concat([df_sky, temp_sky], axis=1) df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]] df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]] """ Imagem da galaxia, na banda r. """ plt.figure(1) plt.clf() r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal]) img_r = get_image(r_sdss) img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]] cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5) imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral') titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object] plt.title(titulo) plt.colorbar() figura = 'figures/galaxy_#%s' % df_cat['num'][i_object] plt.savefig(figura) """ Imagem segmentada da galaxia, na banda r. """ plt.figure(1) plt.clf() cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5) imgplot = plt.imshow(seg_sex, cmap='spectral') titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object] plt.title(titulo) plt.colorbar() figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object] plt.savefig(figura) """ ================================================================================ Salvando os fluxos de cada galaxia em um arquivo txt ================================================================================ """ saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object] formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f'] headers2 = 'x\ty\tu\tg\tr\ti\tz' np.savetxt(saida_fluxes, df, delimiter='\t', header=headers2, fmt=formats) print('') print('>> Os dados estao em: "%s".' % saida_fluxes) """ ================================================================================ Subtraindo o ceu, na banda r ================================================================================ """ df_aux = df.ix[:, 2:] df_aux1 = df.ix[:, :2] df_sky_aux = df_sky.ix[:, 2:] df_aux3 = df_aux - df_sky_aux.mean() df_rss = df_aux1.join(df_aux3) """ A segmentacao consiste de usar um limiar para separar o objeto do fundo. No nosso caso, usamos limiar = alpha*std_ceu """ """ ================================================================================ SEGMENTACAO ================================================================================ """ limiar = 2.5 * df_sky.r.std() df_seg = df_rss.ix[df_rss['r'] > limiar] print('Pixeis acima do limiar: %d' % len(df_seg)) np.savetxt('fof2.txt', df_seg, delimiter='\t') fim = time.time() time_proc = fim - ini print('') print(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC ) <|reserved_special_token_1|> #!/usr/bin/python # -*- coding: utf-8 -*- import pandas as pd import numpy as np import datetime import time from sys import exit from matplotlib import colors, pyplot as plt from functools import reduce import matplotlib.cm as cm import seaborn as sns from astropy.io import ascii, fits from astropy.wcs import wcs from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter from scipy.interpolate import interp2d import matplotlib.mlab as mlab import scipy, pylab import rpy2 import cubehelix import math from pysextractor import SExtractor __author__ = 'pnovais' ini=time.time() class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' #definindo a classe que ira ler as imagens fits def get_image(f_sdss): img = f_sdss[0].data # sky = f_sdss[2].data return img #abertura do arquivo com o nome das imagens, nas n bandas df_fit = pd.read_csv('data/arquivo_fits.csv') ''' ================================================================================ Rodando o SExtractor na imagem na banda r, criando uma segmentacao e um catalogo com os objetos obtidos ATUALIZAR NOME DA BANDA DE SEGMENTACAO ================================================================================ ''' fname = 'data/frame-r-002507-4-0226.fits' sex = SExtractor() sex.config['PARAMETERS_LIST'].append('FLUX_ISO') sex.config['PARAMETERS_LIST'].append('MAG_ISOCOR') sex.config['PARAMETERS_LIST'].append('MAG_AUTO') sex.config['PARAMETERS_LIST'].append('PETRO_RADIUS') sex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE') sex.config['PARAMETERS_LIST'].append('ALPHA_J2000') sex.config['PARAMETERS_LIST'].append('DELTA_J2000') sex.config['PARAMETERS_LIST'].append('FWHM_WORLD') sex.config['PARAMETERS_LIST'].append('CLASS_STAR') sex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION' sex.run(fname) segmap = fits.open('check.fits')[0].data df_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16) df_cat.columns = ['num','flux_best','fluxerr_best', 'x','y','flags', 'fwhm_image', 'flux_iso','mag_isocor','mag_auto', 'petro_radius','ISO_AREA','ra','dec', 'fwhm_world','class_star'] #selecao dos objetos que devem ser galaxias df_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)] df_cat = df_cat.reset_index() df_cat = df_cat.ix[:,1:15] ''' ================================================================================ Lendo as imagens, em todas as bandas, e gerando um dataframe para cada galaxia utilizando astropy Calculando o ceu em todas as bandas ATUALIZAR NOME DA BANDA DE SEGMENTACAO ================================================================================ ''' df = pd.DataFrame() df_sky = pd.DataFrame() for i_object in range(13,14): window_size = 250 filter_seg = 'rSDSS' ra = df_cat['ra'] dec = df_cat['dec'] image_r = fits.open('data/frame-r-002507-4-0226.fits') wcsys = wcs.WCS(header=image_r[0].header) y, x = wcsys.wcs_world2pix(ra, dec, 1) interval = (int(round(x[i_object] - window_size / 2)), int(round(x[i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2)), int(round(y[i_object] + window_size / 2))) df = pd.DataFrame() df_sky = pd.DataFrame() seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]] for i_gal in range(len(df_fit)): f_sdss = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal], df_fit['name'][i_gal])) img = get_image(f_sdss) img_cut = img[interval[0]:interval[1], interval[2]:interval[3]] plt.figure(1) plt.clf() plt.imshow(100*np.log10(img_cut/255), cmap='spectral') plt.colorbar() band=df_fit['filter'][i_gal] nrows, ncols = img_cut.shape xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] ) table = np.column_stack(( xx.flatten(), yy.flatten(), img_cut.flatten() )) temp = pd.DataFrame(table, columns=['x','y',band]) df = pd.concat([df,temp], axis=1) sky_r = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal], df_fit['name'][i_gal])) sky = get_image(sky_r) wcsys = wcs.WCS(header=sky_r[0].header) yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1) delta_x = 85 delta_y = 85 interval_sky = (int(round(xc - delta_x / 2)), int(round(xc + delta_x / 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2))) img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:interval_sky[3]] sky_nrows, sky_ncols = img_sky.shape xxc, yyc = np.meshgrid( *np.ogrid[:sky_ncols, :sky_nrows] ) table_sky = np.column_stack(( xxc.flatten(), yyc.flatten(), img_sky.flatten() )) temp_sky = pd.DataFrame(table_sky, columns=['x','y',band]) df_sky = pd.concat([df_sky,temp_sky], axis=1) df = df.ix[:, [0,1,2,5,8,11,14]] df_sky = df_sky.ix[:, [0,1,2,5,8,11,14]] ''' Imagem da galaxia, na banda r. ''' plt.figure(1) plt.clf() r_sdss = fits.open('data/frame-r-%s' %(df_fit['name'][i_gal])) img_r = get_image(r_sdss) img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]] cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5) imgplot = plt.imshow(100*np.log10(img_cut_r/255), cmap='spectral') titulo='Galaxy #%s - banda r' %(df_cat['num'][i_object]) plt.title(titulo) plt.colorbar() figura = 'figures/galaxy_#%s' %df_cat['num'][i_object] plt.savefig(figura) ''' Imagem segmentada da galaxia, na banda r. ''' plt.figure(1) plt.clf() cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5) imgplot = plt.imshow(seg_sex, cmap='spectral') titulo='Segmentation Galaxy #%s - banda r' %(df_cat['num'][i_object]) plt.title(titulo) plt.colorbar() figura = 'figures/seg_galaxy_#%s' %df_cat['num'][i_object] plt.savefig(figura) ''' ================================================================================ Salvando os fluxos de cada galaxia em um arquivo txt ================================================================================ ''' saida_fluxes = 'data/all_band_fluxes_%s.txt' %df_cat['num'][i_object] formats=['%d','%d','%5.4f','%5.4f','%5.4f','%5.4f','%5.4f'] headers2='x\ty\tu\tg\tr\ti\tz' np.savetxt(saida_fluxes,df, delimiter='\t',header=headers2, fmt = formats) print('') print('>> Os dados estao em: "%s".' %saida_fluxes) ''' ================================================================================ Subtraindo o ceu, na banda r ================================================================================ ''' df_aux=df.ix[:,2:] df_aux1=df.ix[:,:2] df_sky_aux = df_sky.ix[:,2:] df_aux3 = (df_aux - df_sky_aux.mean()) df_rss=df_aux1.join(df_aux3) """ A segmentacao consiste de usar um limiar para separar o objeto do fundo. No nosso caso, usamos limiar = alpha*std_ceu """ ''' ================================================================================ SEGMENTACAO ================================================================================ ''' #SELECAO DOS PIXEIS ACIMA DO LIMIAR limiar = 2.5*df_sky.r.std() df_seg = df_rss.ix[df_rss['r'] > limiar] print('Pixeis acima do limiar: %d' %len(df_seg)) np.savetxt('fof2.txt',df_seg,delimiter='\t') fim = time.time() time_proc = fim - ini print('') print(bcolors.HEADER + 'tempo de processamento: %fs' %time_proc + bcolors.ENDC)
flexible
{ "blob_id": "736fee6f9a46b8568b2dd217b81d54d689306630", "index": 970, "step-1": "<mask token>\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\n<mask token>\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\n<mask token>\nsex.run(fname)\n<mask token>\nfor i_object in range(13, 14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = int(round(x[i_object] - window_size / 2)), int(round(x[\n i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2)\n ), int(round(y[i_object] + window_size / 2))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral')\n plt.colorbar()\n band = df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows])\n table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten())\n )\n temp = pd.DataFrame(table, columns=['x', 'y', band])\n df = pd.concat([df, temp], axis=1)\n sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x /\n 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:\n interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows])\n table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky.\n flatten()))\n temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band])\n df_sky = pd.concat([df_sky, temp_sky], axis=1)\n df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n \"\"\"\n Imagem da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal])\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral')\n titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n Imagem segmentada da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n \"\"\"\n saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object]\n formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f']\n headers2 = 'x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes, df, delimiter='\\t', header=headers2, fmt=formats)\n print('')\n print('>> Os dados estao em: \"%s\".' % saida_fluxes)\n \"\"\"\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n \"\"\"\n df_aux = df.ix[:, 2:]\n df_aux1 = df.ix[:, :2]\n df_sky_aux = df_sky.ix[:, 2:]\n df_aux3 = df_aux - df_sky_aux.mean()\n df_rss = df_aux1.join(df_aux3)\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n \"\"\"\n ================================================================================\n SEGMENTACAO\n ================================================================================\n \"\"\"\n limiar = 2.5 * df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' % len(df_seg))\n np.savetxt('fof2.txt', df_seg, delimiter='\\t')\n<mask token>\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC\n )\n", "step-3": "<mask token>\n__author__ = 'pnovais'\nini = time.time()\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\ndf_fit = pd.read_csv('data/arquivo_fits.csv')\n<mask token>\nfname = 'data/frame-r-002507-4-0226.fits'\nsex = SExtractor()\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\nsex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'\nsex.run(fname)\nsegmap = fits.open('check.fits')[0].data\ndf_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)\ndf_cat.columns = ['num', 'flux_best', 'fluxerr_best', 'x', 'y', 'flags',\n 'fwhm_image', 'flux_iso', 'mag_isocor', 'mag_auto', 'petro_radius',\n 'ISO_AREA', 'ra', 'dec', 'fwhm_world', 'class_star']\ndf_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]\ndf_cat = df_cat.reset_index()\ndf_cat = df_cat.ix[:, 1:15]\n<mask token>\ndf = pd.DataFrame()\ndf_sky = pd.DataFrame()\nfor i_object in range(13, 14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = int(round(x[i_object] - window_size / 2)), int(round(x[\n i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2)\n ), int(round(y[i_object] + window_size / 2))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral')\n plt.colorbar()\n band = df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows])\n table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten())\n )\n temp = pd.DataFrame(table, columns=['x', 'y', band])\n df = pd.concat([df, temp], axis=1)\n sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x /\n 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:\n interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows])\n table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky.\n flatten()))\n temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band])\n df_sky = pd.concat([df_sky, temp_sky], axis=1)\n df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n \"\"\"\n Imagem da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal])\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral')\n titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n Imagem segmentada da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n \"\"\"\n saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object]\n formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f']\n headers2 = 'x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes, df, delimiter='\\t', header=headers2, fmt=formats)\n print('')\n print('>> Os dados estao em: \"%s\".' % saida_fluxes)\n \"\"\"\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n \"\"\"\n df_aux = df.ix[:, 2:]\n df_aux1 = df.ix[:, :2]\n df_sky_aux = df_sky.ix[:, 2:]\n df_aux3 = df_aux - df_sky_aux.mean()\n df_rss = df_aux1.join(df_aux3)\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n \"\"\"\n ================================================================================\n SEGMENTACAO\n ================================================================================\n \"\"\"\n limiar = 2.5 * df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' % len(df_seg))\n np.savetxt('fof2.txt', df_seg, delimiter='\\t')\nfim = time.time()\ntime_proc = fim - ini\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC\n )\n", "step-4": "import pandas as pd\nimport numpy as np\nimport datetime\nimport time\nfrom sys import exit\nfrom matplotlib import colors, pyplot as plt\nfrom functools import reduce\nimport matplotlib.cm as cm\nimport seaborn as sns\nfrom astropy.io import ascii, fits\nfrom astropy.wcs import wcs\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom scipy.interpolate import interp2d\nimport matplotlib.mlab as mlab\nimport scipy, pylab\nimport rpy2\nimport cubehelix\nimport math\nfrom pysextractor import SExtractor\n__author__ = 'pnovais'\nini = time.time()\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\ndf_fit = pd.read_csv('data/arquivo_fits.csv')\n<mask token>\nfname = 'data/frame-r-002507-4-0226.fits'\nsex = SExtractor()\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\nsex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'\nsex.run(fname)\nsegmap = fits.open('check.fits')[0].data\ndf_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)\ndf_cat.columns = ['num', 'flux_best', 'fluxerr_best', 'x', 'y', 'flags',\n 'fwhm_image', 'flux_iso', 'mag_isocor', 'mag_auto', 'petro_radius',\n 'ISO_AREA', 'ra', 'dec', 'fwhm_world', 'class_star']\ndf_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]\ndf_cat = df_cat.reset_index()\ndf_cat = df_cat.ix[:, 1:15]\n<mask token>\ndf = pd.DataFrame()\ndf_sky = pd.DataFrame()\nfor i_object in range(13, 14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = int(round(x[i_object] - window_size / 2)), int(round(x[\n i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2)\n ), int(round(y[i_object] + window_size / 2))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral')\n plt.colorbar()\n band = df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows])\n table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten())\n )\n temp = pd.DataFrame(table, columns=['x', 'y', band])\n df = pd.concat([df, temp], axis=1)\n sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x /\n 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:\n interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows])\n table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky.\n flatten()))\n temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band])\n df_sky = pd.concat([df_sky, temp_sky], axis=1)\n df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n \"\"\"\n Imagem da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal])\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral')\n titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n Imagem segmentada da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n \"\"\"\n saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object]\n formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f']\n headers2 = 'x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes, df, delimiter='\\t', header=headers2, fmt=formats)\n print('')\n print('>> Os dados estao em: \"%s\".' % saida_fluxes)\n \"\"\"\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n \"\"\"\n df_aux = df.ix[:, 2:]\n df_aux1 = df.ix[:, :2]\n df_sky_aux = df_sky.ix[:, 2:]\n df_aux3 = df_aux - df_sky_aux.mean()\n df_rss = df_aux1.join(df_aux3)\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n \"\"\"\n ================================================================================\n SEGMENTACAO\n ================================================================================\n \"\"\"\n limiar = 2.5 * df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' % len(df_seg))\n np.savetxt('fof2.txt', df_seg, delimiter='\\t')\nfim = time.time()\ntime_proc = fim - ini\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC\n )\n", "step-5": "\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport time\nfrom sys import exit\nfrom matplotlib import colors, pyplot as plt\nfrom functools import reduce\nimport matplotlib.cm as cm\nimport seaborn as sns\nfrom astropy.io import ascii, fits\nfrom astropy.wcs import wcs\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom scipy.interpolate import interp2d\nimport matplotlib.mlab as mlab\nimport scipy, pylab\nimport rpy2\nimport cubehelix\nimport math\nfrom pysextractor import SExtractor\n\n__author__ = 'pnovais'\n\nini=time.time()\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n#definindo a classe que ira ler as imagens fits\ndef get_image(f_sdss):\n img = f_sdss[0].data\n# sky = f_sdss[2].data\n return img\n\n#abertura do arquivo com o nome das imagens, nas n bandas\ndf_fit = pd.read_csv('data/arquivo_fits.csv')\n\n'''\n================================================================================\nRodando o SExtractor na imagem na banda r, criando uma segmentacao e um catalogo\ncom os objetos obtidos\nATUALIZAR NOME DA BANDA DE SEGMENTACAO\n================================================================================\n'''\nfname = 'data/frame-r-002507-4-0226.fits'\nsex = SExtractor()\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\nsex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'\nsex.run(fname)\nsegmap = fits.open('check.fits')[0].data\n\ndf_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)\ndf_cat.columns = ['num','flux_best','fluxerr_best', 'x','y','flags',\n 'fwhm_image', 'flux_iso','mag_isocor','mag_auto',\n 'petro_radius','ISO_AREA','ra','dec',\n 'fwhm_world','class_star']\n\n#selecao dos objetos que devem ser galaxias\ndf_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]\ndf_cat = df_cat.reset_index()\ndf_cat = df_cat.ix[:,1:15]\n\n'''\n================================================================================\nLendo as imagens, em todas as bandas, e gerando um dataframe para cada galaxia\nutilizando astropy\nCalculando o ceu em todas as bandas\n\nATUALIZAR NOME DA BANDA DE SEGMENTACAO\n================================================================================\n'''\n\ndf = pd.DataFrame()\ndf_sky = pd.DataFrame()\n\n\nfor i_object in range(13,14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = (int(round(x[i_object] - window_size / 2)), int(round(x[i_object] + window_size / 2)),\n int(round(y[i_object] - window_size / 2)), int(round(y[i_object] + window_size / 2)))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100*np.log10(img_cut/255), cmap='spectral')\n plt.colorbar()\n band=df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] )\n table = np.column_stack(( xx.flatten(), yy.flatten(), img_cut.flatten() ))\n temp = pd.DataFrame(table, columns=['x','y',band])\n df = pd.concat([df,temp], axis=1)\n\n sky_r = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = (int(round(xc - delta_x / 2)), int(round(xc + delta_x / 2)), int(round(yc - delta_y / 2)),\n int(round(yc + delta_y / 2)))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid( *np.ogrid[:sky_ncols, :sky_nrows] )\n table_sky = np.column_stack(( xxc.flatten(), yyc.flatten(), img_sky.flatten() ))\n temp_sky = pd.DataFrame(table_sky, columns=['x','y',band])\n df_sky = pd.concat([df_sky,temp_sky], axis=1)\n\n df = df.ix[:, [0,1,2,5,8,11,14]]\n df_sky = df_sky.ix[:, [0,1,2,5,8,11,14]]\n\n '''\n Imagem da galaxia, na banda r.\n '''\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' %(df_fit['name'][i_gal]))\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)\n imgplot = plt.imshow(100*np.log10(img_cut_r/255), cmap='spectral')\n titulo='Galaxy #%s - banda r' %(df_cat['num'][i_object])\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' %df_cat['num'][i_object]\n plt.savefig(figura)\n '''\n Imagem segmentada da galaxia, na banda r.\n '''\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo='Segmentation Galaxy #%s - banda r' %(df_cat['num'][i_object])\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' %df_cat['num'][i_object]\n plt.savefig(figura)\n\n '''\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n '''\n saida_fluxes = 'data/all_band_fluxes_%s.txt' %df_cat['num'][i_object]\n formats=['%d','%d','%5.4f','%5.4f','%5.4f','%5.4f','%5.4f']\n headers2='x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes,df, delimiter='\\t',header=headers2, fmt = formats)\n print('')\n print('>> Os dados estao em: \"%s\".' %saida_fluxes)\n\n '''\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n '''\n df_aux=df.ix[:,2:]\n df_aux1=df.ix[:,:2]\n df_sky_aux = df_sky.ix[:,2:]\n df_aux3 = (df_aux - df_sky_aux.mean())\n df_rss=df_aux1.join(df_aux3)\n\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n '''\n ================================================================================\n SEGMENTACAO\n ================================================================================\n '''\n #SELECAO DOS PIXEIS ACIMA DO LIMIAR\n limiar = 2.5*df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' %len(df_seg))\n np.savetxt('fof2.txt',df_seg,delimiter='\\t')\n\n\n\n\nfim = time.time()\ntime_proc = fim - ini\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' %time_proc + bcolors.ENDC)\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
<|reserved_special_token_0|> class VertexArrayObject: def __init__(self, primitive): self._primitive = primitive self._buffers: List[pxng.BufferObject] = [] self._indices = pxng.BufferObject(data_type=self.index_data_type, array_type=gl.GL_ELEMENT_ARRAY_BUFFER) self._vao = gl.glGenVertexArrays(1) <|reserved_special_token_0|> def add_quad(self, p1, p2, p3, p4): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._buffers[0].set_value(p3) self._buffers[0].set_value(p4) self._indices.set_value(glm.u16vec3(i, i + 1, i + 3)) self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3)) def add_triangle(self, p1, p2, p3): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._buffers[0].set_value(p3) self._indices.set_value(glm.u16vec3(i, i + 1, i + 2)) def add_line(self, p1, p2): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._indices.set_value(glm.u16vec2(i, i + 1)) <|reserved_special_token_0|> def set_colors(self, *args: glm.vec4, target=1): for c in args: self._buffers[target].set_value(c) def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1): for c in args: self._buffers[target].set_value(c) <|reserved_special_token_0|> <|reserved_special_token_0|> def draw(self): index_count = len(self._indices) * self.primitive_component_count gl.glDrawElements(self._primitive, index_count, gl. GL_UNSIGNED_SHORT, None) <|reserved_special_token_0|> @property def primitive_component_count(self): if self._primitive == gl.GL_TRIANGLES: return 3 elif self._primitive == gl.GL_LINES: return 2 elif self._primitive == gl.GL_POINTS: return 1 else: raise UserWarning(f'Unknown primitive type {self._primitive}') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class VertexArrayObject: def __init__(self, primitive): self._primitive = primitive self._buffers: List[pxng.BufferObject] = [] self._indices = pxng.BufferObject(data_type=self.index_data_type, array_type=gl.GL_ELEMENT_ARRAY_BUFFER) self._vao = gl.glGenVertexArrays(1) <|reserved_special_token_0|> def add_quad(self, p1, p2, p3, p4): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._buffers[0].set_value(p3) self._buffers[0].set_value(p4) self._indices.set_value(glm.u16vec3(i, i + 1, i + 3)) self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3)) def add_triangle(self, p1, p2, p3): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._buffers[0].set_value(p3) self._indices.set_value(glm.u16vec3(i, i + 1, i + 2)) def add_line(self, p1, p2): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._indices.set_value(glm.u16vec2(i, i + 1)) <|reserved_special_token_0|> def set_colors(self, *args: glm.vec4, target=1): for c in args: self._buffers[target].set_value(c) def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1): for c in args: self._buffers[target].set_value(c) def create(self): gl.glBindVertexArray(self._vao) for index, vbo in enumerate(self._buffers): vbo.bind(index) self._indices.bind(None) <|reserved_special_token_0|> def draw(self): index_count = len(self._indices) * self.primitive_component_count gl.glDrawElements(self._primitive, index_count, gl. GL_UNSIGNED_SHORT, None) @property def index_data_type(self): if self._primitive == gl.GL_TRIANGLES: return glm.u16vec3 elif self._primitive == gl.GL_LINES: return glm.u16vec2 elif self._primitive == gl.GL_POINTS: return glm.u16vec1 else: raise UserWarning(f'Unknown primitive type {self._primitive}') @property def primitive_component_count(self): if self._primitive == gl.GL_TRIANGLES: return 3 elif self._primitive == gl.GL_LINES: return 2 elif self._primitive == gl.GL_POINTS: return 1 else: raise UserWarning(f'Unknown primitive type {self._primitive}') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class VertexArrayObject: def __init__(self, primitive): self._primitive = primitive self._buffers: List[pxng.BufferObject] = [] self._indices = pxng.BufferObject(data_type=self.index_data_type, array_type=gl.GL_ELEMENT_ARRAY_BUFFER) self._vao = gl.glGenVertexArrays(1) <|reserved_special_token_0|> def add_quad(self, p1, p2, p3, p4): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._buffers[0].set_value(p3) self._buffers[0].set_value(p4) self._indices.set_value(glm.u16vec3(i, i + 1, i + 3)) self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3)) def add_triangle(self, p1, p2, p3): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._buffers[0].set_value(p3) self._indices.set_value(glm.u16vec3(i, i + 1, i + 2)) def add_line(self, p1, p2): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._indices.set_value(glm.u16vec2(i, i + 1)) <|reserved_special_token_0|> def set_colors(self, *args: glm.vec4, target=1): for c in args: self._buffers[target].set_value(c) def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1): for c in args: self._buffers[target].set_value(c) def create(self): gl.glBindVertexArray(self._vao) for index, vbo in enumerate(self._buffers): vbo.bind(index) self._indices.bind(None) <|reserved_special_token_0|> def draw(self): index_count = len(self._indices) * self.primitive_component_count gl.glDrawElements(self._primitive, index_count, gl. GL_UNSIGNED_SHORT, None) @property def index_data_type(self): if self._primitive == gl.GL_TRIANGLES: return glm.u16vec3 elif self._primitive == gl.GL_LINES: return glm.u16vec2 elif self._primitive == gl.GL_POINTS: return glm.u16vec1 else: raise UserWarning(f'Unknown primitive type {self._primitive}') @property def primitive_component_count(self): if self._primitive == gl.GL_TRIANGLES: return 3 elif self._primitive == gl.GL_LINES: return 2 elif self._primitive == gl.GL_POINTS: return 1 else: raise UserWarning(f'Unknown primitive type {self._primitive}') def bind(self): gl.glBindVertexArray(self._vao) if self._indices.bind(None): if any(vbo.changed for vbo in self._buffers): self.create() return True gl.glBindVertexArray(0) return False <|reserved_special_token_1|> <|reserved_special_token_0|> class VertexArrayObject: def __init__(self, primitive): self._primitive = primitive self._buffers: List[pxng.BufferObject] = [] self._indices = pxng.BufferObject(data_type=self.index_data_type, array_type=gl.GL_ELEMENT_ARRAY_BUFFER) self._vao = gl.glGenVertexArrays(1) <|reserved_special_token_0|> def add_quad(self, p1, p2, p3, p4): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._buffers[0].set_value(p3) self._buffers[0].set_value(p4) self._indices.set_value(glm.u16vec3(i, i + 1, i + 3)) self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3)) def add_triangle(self, p1, p2, p3): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._buffers[0].set_value(p3) self._indices.set_value(glm.u16vec3(i, i + 1, i + 2)) def add_line(self, p1, p2): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._indices.set_value(glm.u16vec2(i, i + 1)) <|reserved_special_token_0|> def set_colors(self, *args: glm.vec4, target=1): for c in args: self._buffers[target].set_value(c) def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1): for c in args: self._buffers[target].set_value(c) def create(self): gl.glBindVertexArray(self._vao) for index, vbo in enumerate(self._buffers): vbo.bind(index) self._indices.bind(None) def reset(self): self._indices.reset() for vbo in self._buffers: vbo.reset() def draw(self): index_count = len(self._indices) * self.primitive_component_count gl.glDrawElements(self._primitive, index_count, gl. GL_UNSIGNED_SHORT, None) @property def index_data_type(self): if self._primitive == gl.GL_TRIANGLES: return glm.u16vec3 elif self._primitive == gl.GL_LINES: return glm.u16vec2 elif self._primitive == gl.GL_POINTS: return glm.u16vec1 else: raise UserWarning(f'Unknown primitive type {self._primitive}') @property def primitive_component_count(self): if self._primitive == gl.GL_TRIANGLES: return 3 elif self._primitive == gl.GL_LINES: return 2 elif self._primitive == gl.GL_POINTS: return 1 else: raise UserWarning(f'Unknown primitive type {self._primitive}') def bind(self): gl.glBindVertexArray(self._vao) if self._indices.bind(None): if any(vbo.changed for vbo in self._buffers): self.create() return True gl.glBindVertexArray(0) return False <|reserved_special_token_1|> from typing import List import glm import pxng import OpenGL.GL as gl class VertexArrayObject: def __init__(self, primitive): self._primitive = primitive self._buffers: List[pxng.BufferObject] = [] self._indices = pxng.BufferObject(data_type=self.index_data_type, array_type=gl.GL_ELEMENT_ARRAY_BUFFER) self._vao = gl.glGenVertexArrays(1) def attach_buffer(self, vbo: pxng.BufferObject): self._buffers.append(vbo) return len(self._buffers) - 1 def add_quad(self, p1, p2, p3, p4): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._buffers[0].set_value(p3) self._buffers[0].set_value(p4) self._indices.set_value(glm.u16vec3(i, i + 1, i + 3)) self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3)) def add_triangle(self, p1, p2, p3): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._buffers[0].set_value(p3) self._indices.set_value(glm.u16vec3(i, i + 1, i + 2)) def add_line(self, p1, p2): i = self._buffers[0].index self._buffers[0].set_value(p1) self._buffers[0].set_value(p2) self._indices.set_value(glm.u16vec2(i, i + 1)) def add_point(self, p1): i = self._buffers[0].index self._buffers[0].set_value(p1) self._indices.set_value(glm.u16vec1(i)) def set_colors(self, *args: glm.vec4, target=1): for c in args: self._buffers[target].set_value(c) def set_texture(self, *args: glm.vec2 or glm.uvec2, target=1): for c in args: self._buffers[target].set_value(c) def create(self): gl.glBindVertexArray(self._vao) for index, vbo in enumerate(self._buffers): vbo.bind(index) self._indices.bind(None) def reset(self): self._indices.reset() for vbo in self._buffers: vbo.reset() def draw(self): index_count = len(self._indices) * self.primitive_component_count gl.glDrawElements(self._primitive, index_count, gl.GL_UNSIGNED_SHORT, None) @property def index_data_type(self): if self._primitive == gl.GL_TRIANGLES: return glm.u16vec3 elif self._primitive == gl.GL_LINES: return glm.u16vec2 elif self._primitive == gl.GL_POINTS: return glm.u16vec1 else: raise UserWarning(f'Unknown primitive type {self._primitive}') @property def primitive_component_count(self): if self._primitive == gl.GL_TRIANGLES: return 3 elif self._primitive == gl.GL_LINES: return 2 elif self._primitive == gl.GL_POINTS: return 1 else: raise UserWarning(f'Unknown primitive type {self._primitive}') def bind(self): gl.glBindVertexArray(self._vao) if self._indices.bind(None): if any(vbo.changed for vbo in self._buffers): self.create() return True gl.glBindVertexArray(0) return False
flexible
{ "blob_id": "7530c2c85f83d1714840ba97c1ec702f063658c5", "index": 379, "step-1": "<mask token>\n\n\nclass VertexArrayObject:\n\n def __init__(self, primitive):\n self._primitive = primitive\n self._buffers: List[pxng.BufferObject] = []\n self._indices = pxng.BufferObject(data_type=self.index_data_type,\n array_type=gl.GL_ELEMENT_ARRAY_BUFFER)\n self._vao = gl.glGenVertexArrays(1)\n <mask token>\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n\n def add_triangle(self, p1, p2, p3):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n <mask token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n <mask token>\n <mask token>\n\n def draw(self):\n index_count = len(self._indices) * self.primitive_component_count\n gl.glDrawElements(self._primitive, index_count, gl.\n GL_UNSIGNED_SHORT, None)\n <mask token>\n\n @property\n def primitive_component_count(self):\n if self._primitive == gl.GL_TRIANGLES:\n return 3\n elif self._primitive == gl.GL_LINES:\n return 2\n elif self._primitive == gl.GL_POINTS:\n return 1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n <mask token>\n", "step-2": "<mask token>\n\n\nclass VertexArrayObject:\n\n def __init__(self, primitive):\n self._primitive = primitive\n self._buffers: List[pxng.BufferObject] = []\n self._indices = pxng.BufferObject(data_type=self.index_data_type,\n array_type=gl.GL_ELEMENT_ARRAY_BUFFER)\n self._vao = gl.glGenVertexArrays(1)\n <mask token>\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n\n def add_triangle(self, p1, p2, p3):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n <mask token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def create(self):\n gl.glBindVertexArray(self._vao)\n for index, vbo in enumerate(self._buffers):\n vbo.bind(index)\n self._indices.bind(None)\n <mask token>\n\n def draw(self):\n index_count = len(self._indices) * self.primitive_component_count\n gl.glDrawElements(self._primitive, index_count, gl.\n GL_UNSIGNED_SHORT, None)\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n @property\n def primitive_component_count(self):\n if self._primitive == gl.GL_TRIANGLES:\n return 3\n elif self._primitive == gl.GL_LINES:\n return 2\n elif self._primitive == gl.GL_POINTS:\n return 1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n <mask token>\n", "step-3": "<mask token>\n\n\nclass VertexArrayObject:\n\n def __init__(self, primitive):\n self._primitive = primitive\n self._buffers: List[pxng.BufferObject] = []\n self._indices = pxng.BufferObject(data_type=self.index_data_type,\n array_type=gl.GL_ELEMENT_ARRAY_BUFFER)\n self._vao = gl.glGenVertexArrays(1)\n <mask token>\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n\n def add_triangle(self, p1, p2, p3):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n <mask token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def create(self):\n gl.glBindVertexArray(self._vao)\n for index, vbo in enumerate(self._buffers):\n vbo.bind(index)\n self._indices.bind(None)\n <mask token>\n\n def draw(self):\n index_count = len(self._indices) * self.primitive_component_count\n gl.glDrawElements(self._primitive, index_count, gl.\n GL_UNSIGNED_SHORT, None)\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n @property\n def primitive_component_count(self):\n if self._primitive == gl.GL_TRIANGLES:\n return 3\n elif self._primitive == gl.GL_LINES:\n return 2\n elif self._primitive == gl.GL_POINTS:\n return 1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "step-4": "<mask token>\n\n\nclass VertexArrayObject:\n\n def __init__(self, primitive):\n self._primitive = primitive\n self._buffers: List[pxng.BufferObject] = []\n self._indices = pxng.BufferObject(data_type=self.index_data_type,\n array_type=gl.GL_ELEMENT_ARRAY_BUFFER)\n self._vao = gl.glGenVertexArrays(1)\n <mask token>\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n\n def add_triangle(self, p1, p2, p3):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n <mask token>\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: (glm.vec2 or glm.uvec2), target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def create(self):\n gl.glBindVertexArray(self._vao)\n for index, vbo in enumerate(self._buffers):\n vbo.bind(index)\n self._indices.bind(None)\n\n def reset(self):\n self._indices.reset()\n for vbo in self._buffers:\n vbo.reset()\n\n def draw(self):\n index_count = len(self._indices) * self.primitive_component_count\n gl.glDrawElements(self._primitive, index_count, gl.\n GL_UNSIGNED_SHORT, None)\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n @property\n def primitive_component_count(self):\n if self._primitive == gl.GL_TRIANGLES:\n return 3\n elif self._primitive == gl.GL_LINES:\n return 2\n elif self._primitive == gl.GL_POINTS:\n return 1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "step-5": "from typing import List\n\nimport glm\nimport pxng\n\nimport OpenGL.GL as gl\n\n\nclass VertexArrayObject:\n def __init__(self, primitive):\n self._primitive = primitive\n self._buffers: List[pxng.BufferObject] = []\n self._indices = pxng.BufferObject(data_type=self.index_data_type,\n array_type=gl.GL_ELEMENT_ARRAY_BUFFER)\n self._vao = gl.glGenVertexArrays(1)\n\n def attach_buffer(self, vbo: pxng.BufferObject):\n self._buffers.append(vbo)\n return len(self._buffers) - 1\n\n def add_quad(self, p1, p2, p3, p4):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._buffers[0].set_value(p4)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 3))\n self._indices.set_value(glm.u16vec3(i + 1, i + 2, i + 3))\n\n def add_triangle(self, p1, p2, p3):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._buffers[0].set_value(p3)\n self._indices.set_value(glm.u16vec3(i, i + 1, i + 2))\n\n def add_line(self, p1, p2):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._buffers[0].set_value(p2)\n self._indices.set_value(glm.u16vec2(i, i + 1))\n\n def add_point(self, p1):\n i = self._buffers[0].index\n self._buffers[0].set_value(p1)\n self._indices.set_value(glm.u16vec1(i))\n\n def set_colors(self, *args: glm.vec4, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def set_texture(self, *args: glm.vec2 or glm.uvec2, target=1):\n for c in args:\n self._buffers[target].set_value(c)\n\n def create(self):\n gl.glBindVertexArray(self._vao)\n\n for index, vbo in enumerate(self._buffers):\n vbo.bind(index)\n\n self._indices.bind(None)\n\n def reset(self):\n self._indices.reset()\n for vbo in self._buffers:\n vbo.reset()\n\n def draw(self):\n index_count = len(self._indices) * self.primitive_component_count\n gl.glDrawElements(self._primitive, index_count, gl.GL_UNSIGNED_SHORT, None)\n\n @property\n def index_data_type(self):\n if self._primitive == gl.GL_TRIANGLES:\n return glm.u16vec3\n elif self._primitive == gl.GL_LINES:\n return glm.u16vec2\n elif self._primitive == gl.GL_POINTS:\n return glm.u16vec1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n @property\n def primitive_component_count(self):\n if self._primitive == gl.GL_TRIANGLES:\n return 3\n elif self._primitive == gl.GL_LINES:\n return 2\n elif self._primitive == gl.GL_POINTS:\n return 1\n else:\n raise UserWarning(f'Unknown primitive type {self._primitive}')\n\n def bind(self):\n gl.glBindVertexArray(self._vao)\n if self._indices.bind(None):\n if any(vbo.changed for vbo in self._buffers):\n self.create()\n return True\n gl.glBindVertexArray(0)\n return False\n", "step-ids": [ 9, 11, 12, 13, 17 ] }
[ 9, 11, 12, 13, 17 ]
# ---------------------------------------------------------------------------- # Copyright (c) 2016-2018, q2-chemistree development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- import unittest from q2_chemistree.plugin_setup import plugin as chemistree_plugin class PluginSetupTests(unittest.TestCase): def test_plugin_setup(self): self.assertEqual(chemistree_plugin.name, 'chemistree')
normal
{ "blob_id": "4296dc5b79fd1d2c872eb1115beab52a0f067423", "index": 4816, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass PluginSetupTests(unittest.TestCase):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass PluginSetupTests(unittest.TestCase):\n\n def test_plugin_setup(self):\n self.assertEqual(chemistree_plugin.name, 'chemistree')\n", "step-4": "import unittest\nfrom q2_chemistree.plugin_setup import plugin as chemistree_plugin\n\n\nclass PluginSetupTests(unittest.TestCase):\n\n def test_plugin_setup(self):\n self.assertEqual(chemistree_plugin.name, 'chemistree')\n", "step-5": "# ----------------------------------------------------------------------------\n# Copyright (c) 2016-2018, q2-chemistree development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport unittest\n\nfrom q2_chemistree.plugin_setup import plugin as chemistree_plugin\n\n\nclass PluginSetupTests(unittest.TestCase):\n\n def test_plugin_setup(self):\n self.assertEqual(chemistree_plugin.name, 'chemistree')\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class UserClusters(JsonView): logger = logging.getLogger('mliyweb.views.UserClusters') cluster_service = ClusterService() @log_enter_exit(logger) def get_data(self, context): username = self.request.user.username try: if session_is_okay(self.request.session, 'user_clusters'): self.logger.info('Updating clusters in database') return self.cluster_service.update_by_user(username) else: self.logger.info('Getting clusters from database') return self.cluster_service.get_by_user(username) except Exception as e: self.logger.exception(e) raise class SingleCluster(JsonView): logger = logging.getLogger('mliyweb.views.SingleCluster') cluster_service = ClusterService() @log_enter_exit(logger) def get_data(self, context): cluster_id = self.kwargs['pk'] try: if session_is_okay(self.request.session, 'user_clusters'): self.logger.info('Updating clusters in database') return self.cluster_service.update_single_cluster(cluster_id) else: self.logger.info('Getting clusters from database') return self.cluster_service.get_single_cluster(cluster_id) except Exception as e: self.logger.exception(e) raise class ChangeClusterState(JsonView): log = logging.getLogger('mliyweb.views.ChangeClusterState') cluster_service = ClusterService() @log_enter_exit(log, log_level=10) def get_data(self, context): client = boto3.client('cloudformation', region_name=AWS_REGION) cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid']) client.delete_stack(StackName=cluster.stack_id) if cluster.current_bill: cluster.current_bill.ongoing = False cluster.current_bill.end_time = datetime.now(timezone('UTC')) cluster.current_bill.save() if cluster.state == 'TERMINATED' or cluster.state == 'FAILED': deleteDnsEntry(cluster.cluster_id, cluster.master_ip) else: deleteDnsEntry(cluster.cluster_id, cluster.master_ip) cluster.state = 'TERMINATED' cluster.save() return {'action': 'terminate', 'status': 'ok'} <|reserved_special_token_1|> <|reserved_special_token_0|> class UserGroupClusters(JsonView): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> @log_enter_exit(logger) def get_data(self, context): user = self.request.user try: if session_is_okay(self.request.session, 'group_clusters'): self.logger.info('Updating clusters in database') return self.cluster_service.update_by_user_group(user) else: self.logger.info('Getting clusters from database') return self.cluster_service.get_by_user_group(user) except Exception as e: self.logger.exception(e) return [] class UserClusters(JsonView): logger = logging.getLogger('mliyweb.views.UserClusters') cluster_service = ClusterService() @log_enter_exit(logger) def get_data(self, context): username = self.request.user.username try: if session_is_okay(self.request.session, 'user_clusters'): self.logger.info('Updating clusters in database') return self.cluster_service.update_by_user(username) else: self.logger.info('Getting clusters from database') return self.cluster_service.get_by_user(username) except Exception as e: self.logger.exception(e) raise class SingleCluster(JsonView): logger = logging.getLogger('mliyweb.views.SingleCluster') cluster_service = ClusterService() @log_enter_exit(logger) def get_data(self, context): cluster_id = self.kwargs['pk'] try: if session_is_okay(self.request.session, 'user_clusters'): self.logger.info('Updating clusters in database') return self.cluster_service.update_single_cluster(cluster_id) else: self.logger.info('Getting clusters from database') return self.cluster_service.get_single_cluster(cluster_id) except Exception as e: self.logger.exception(e) raise class ChangeClusterState(JsonView): log = logging.getLogger('mliyweb.views.ChangeClusterState') cluster_service = ClusterService() @log_enter_exit(log, log_level=10) def get_data(self, context): client = boto3.client('cloudformation', region_name=AWS_REGION) cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid']) client.delete_stack(StackName=cluster.stack_id) if cluster.current_bill: cluster.current_bill.ongoing = False cluster.current_bill.end_time = datetime.now(timezone('UTC')) cluster.current_bill.save() if cluster.state == 'TERMINATED' or cluster.state == 'FAILED': deleteDnsEntry(cluster.cluster_id, cluster.master_ip) else: deleteDnsEntry(cluster.cluster_id, cluster.master_ip) cluster.state = 'TERMINATED' cluster.save() return {'action': 'terminate', 'status': 'ok'} <|reserved_special_token_1|> <|reserved_special_token_0|> class UserGroupClusters(JsonView): <|reserved_special_token_0|> logger = logging.getLogger('mliyweb.views.UserClusters') cluster_service = ClusterService() @log_enter_exit(logger) def get_data(self, context): user = self.request.user try: if session_is_okay(self.request.session, 'group_clusters'): self.logger.info('Updating clusters in database') return self.cluster_service.update_by_user_group(user) else: self.logger.info('Getting clusters from database') return self.cluster_service.get_by_user_group(user) except Exception as e: self.logger.exception(e) return [] class UserClusters(JsonView): logger = logging.getLogger('mliyweb.views.UserClusters') cluster_service = ClusterService() @log_enter_exit(logger) def get_data(self, context): username = self.request.user.username try: if session_is_okay(self.request.session, 'user_clusters'): self.logger.info('Updating clusters in database') return self.cluster_service.update_by_user(username) else: self.logger.info('Getting clusters from database') return self.cluster_service.get_by_user(username) except Exception as e: self.logger.exception(e) raise class SingleCluster(JsonView): logger = logging.getLogger('mliyweb.views.SingleCluster') cluster_service = ClusterService() @log_enter_exit(logger) def get_data(self, context): cluster_id = self.kwargs['pk'] try: if session_is_okay(self.request.session, 'user_clusters'): self.logger.info('Updating clusters in database') return self.cluster_service.update_single_cluster(cluster_id) else: self.logger.info('Getting clusters from database') return self.cluster_service.get_single_cluster(cluster_id) except Exception as e: self.logger.exception(e) raise class ChangeClusterState(JsonView): log = logging.getLogger('mliyweb.views.ChangeClusterState') cluster_service = ClusterService() @log_enter_exit(log, log_level=10) def get_data(self, context): client = boto3.client('cloudformation', region_name=AWS_REGION) cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid']) client.delete_stack(StackName=cluster.stack_id) if cluster.current_bill: cluster.current_bill.ongoing = False cluster.current_bill.end_time = datetime.now(timezone('UTC')) cluster.current_bill.save() if cluster.state == 'TERMINATED' or cluster.state == 'FAILED': deleteDnsEntry(cluster.cluster_id, cluster.master_ip) else: deleteDnsEntry(cluster.cluster_id, cluster.master_ip) cluster.state = 'TERMINATED' cluster.save() return {'action': 'terminate', 'status': 'ok'} <|reserved_special_token_1|> <|reserved_special_token_0|> class UserGroupClusters(JsonView): """ Returns a json struct with the current clusters. If the last updated time in the db is greater than the timeout, it returns the current data and launches a background thread to refresh and prune the cluster list. If called with ?forcerefresh as a url argument it'll refresh regardless of the last updated time. """ logger = logging.getLogger('mliyweb.views.UserClusters') cluster_service = ClusterService() @log_enter_exit(logger) def get_data(self, context): user = self.request.user try: if session_is_okay(self.request.session, 'group_clusters'): self.logger.info('Updating clusters in database') return self.cluster_service.update_by_user_group(user) else: self.logger.info('Getting clusters from database') return self.cluster_service.get_by_user_group(user) except Exception as e: self.logger.exception(e) return [] class UserClusters(JsonView): logger = logging.getLogger('mliyweb.views.UserClusters') cluster_service = ClusterService() @log_enter_exit(logger) def get_data(self, context): username = self.request.user.username try: if session_is_okay(self.request.session, 'user_clusters'): self.logger.info('Updating clusters in database') return self.cluster_service.update_by_user(username) else: self.logger.info('Getting clusters from database') return self.cluster_service.get_by_user(username) except Exception as e: self.logger.exception(e) raise class SingleCluster(JsonView): logger = logging.getLogger('mliyweb.views.SingleCluster') cluster_service = ClusterService() @log_enter_exit(logger) def get_data(self, context): cluster_id = self.kwargs['pk'] try: if session_is_okay(self.request.session, 'user_clusters'): self.logger.info('Updating clusters in database') return self.cluster_service.update_single_cluster(cluster_id) else: self.logger.info('Getting clusters from database') return self.cluster_service.get_single_cluster(cluster_id) except Exception as e: self.logger.exception(e) raise class ChangeClusterState(JsonView): log = logging.getLogger('mliyweb.views.ChangeClusterState') cluster_service = ClusterService() @log_enter_exit(log, log_level=10) def get_data(self, context): client = boto3.client('cloudformation', region_name=AWS_REGION) cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid']) client.delete_stack(StackName=cluster.stack_id) if cluster.current_bill: cluster.current_bill.ongoing = False cluster.current_bill.end_time = datetime.now(timezone('UTC')) cluster.current_bill.save() if cluster.state == 'TERMINATED' or cluster.state == 'FAILED': deleteDnsEntry(cluster.cluster_id, cluster.master_ip) else: deleteDnsEntry(cluster.cluster_id, cluster.master_ip) cluster.state = 'TERMINATED' cluster.save() return {'action': 'terminate', 'status': 'ok'} <|reserved_special_token_1|> import logging from datetime import datetime import boto3 from pytz import timezone from mliyweb.api.v1.api_session_limiter import session_is_okay from mliyweb.api.v1.json_view import JsonView from mliyweb.dns import deleteDnsEntry from mliyweb.models import Cluster from mliyweb.resources.clusters import ClusterService from mliyweb.settings import AWS_REGION from mliyweb.utils import log_enter_exit class UserGroupClusters(JsonView): ''' Returns a json struct with the current clusters. If the last updated time in the db is greater than the timeout, it returns the current data and launches a background thread to refresh and prune the cluster list. If called with ?forcerefresh as a url argument it'll refresh regardless of the last updated time. ''' logger = logging.getLogger('mliyweb.views.UserClusters') cluster_service = ClusterService() # global instance refresh time stamp @log_enter_exit(logger) def get_data(self, context): user = self.request.user try: if session_is_okay(self.request.session, "group_clusters"): self.logger.info("Updating clusters in database") return self.cluster_service.update_by_user_group(user) else: self.logger.info("Getting clusters from database") return self.cluster_service.get_by_user_group(user) except Exception as e: self.logger.exception(e) return [] class UserClusters(JsonView): # TODO There needs to be a Cluster Launch thread cleanup/rework logger = logging.getLogger('mliyweb.views.UserClusters') cluster_service = ClusterService() @log_enter_exit(logger) def get_data(self, context): username = self.request.user.username try: if session_is_okay(self.request.session, "user_clusters"): self.logger.info("Updating clusters in database") return self.cluster_service.update_by_user(username) else: self.logger.info("Getting clusters from database") return self.cluster_service.get_by_user(username) except Exception as e: self.logger.exception(e) raise class SingleCluster(JsonView): logger = logging.getLogger('mliyweb.views.SingleCluster') cluster_service = ClusterService() @log_enter_exit(logger) def get_data(self, context): cluster_id = self.kwargs['pk'] try: if session_is_okay(self.request.session, "user_clusters"): self.logger.info("Updating clusters in database") return self.cluster_service.update_single_cluster(cluster_id) else: self.logger.info("Getting clusters from database") return self.cluster_service.get_single_cluster(cluster_id) except Exception as e: self.logger.exception(e) raise class ChangeClusterState(JsonView): log = logging.getLogger('mliyweb.views.ChangeClusterState') cluster_service = ClusterService() @log_enter_exit(log, log_level=10) def get_data(self,context): client = boto3.client('cloudformation', region_name=AWS_REGION) cluster = Cluster.objects.get(cluster_id = self.kwargs['clusterid']) client.delete_stack(StackName=cluster.stack_id) if cluster.current_bill: cluster.current_bill.ongoing = False cluster.current_bill.end_time = datetime.now(timezone('UTC')) cluster.current_bill.save() if cluster.state == 'TERMINATED' or cluster.state == 'FAILED': deleteDnsEntry(cluster.cluster_id,cluster.master_ip) else: deleteDnsEntry(cluster.cluster_id,cluster.master_ip) cluster.state = "TERMINATED" cluster.save() return { 'action' : 'terminate', 'status' : 'ok'}
flexible
{ "blob_id": "f882b73645c6a280a17f40b27c01ecad7e4d85ae", "index": 5860, "step-1": "<mask token>\n\n\nclass UserClusters(JsonView):\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n username = self.request.user.username\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user(username)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user(username)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass SingleCluster(JsonView):\n logger = logging.getLogger('mliyweb.views.SingleCluster')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n cluster_id = self.kwargs['pk']\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_single_cluster(cluster_id)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_single_cluster(cluster_id)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass ChangeClusterState(JsonView):\n log = logging.getLogger('mliyweb.views.ChangeClusterState')\n cluster_service = ClusterService()\n\n @log_enter_exit(log, log_level=10)\n def get_data(self, context):\n client = boto3.client('cloudformation', region_name=AWS_REGION)\n cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid'])\n client.delete_stack(StackName=cluster.stack_id)\n if cluster.current_bill:\n cluster.current_bill.ongoing = False\n cluster.current_bill.end_time = datetime.now(timezone('UTC'))\n cluster.current_bill.save()\n if cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n else:\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n cluster.state = 'TERMINATED'\n cluster.save()\n return {'action': 'terminate', 'status': 'ok'}\n", "step-2": "<mask token>\n\n\nclass UserGroupClusters(JsonView):\n <mask token>\n <mask token>\n <mask token>\n\n @log_enter_exit(logger)\n def get_data(self, context):\n user = self.request.user\n try:\n if session_is_okay(self.request.session, 'group_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user_group(user)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user_group(user)\n except Exception as e:\n self.logger.exception(e)\n return []\n\n\nclass UserClusters(JsonView):\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n username = self.request.user.username\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user(username)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user(username)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass SingleCluster(JsonView):\n logger = logging.getLogger('mliyweb.views.SingleCluster')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n cluster_id = self.kwargs['pk']\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_single_cluster(cluster_id)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_single_cluster(cluster_id)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass ChangeClusterState(JsonView):\n log = logging.getLogger('mliyweb.views.ChangeClusterState')\n cluster_service = ClusterService()\n\n @log_enter_exit(log, log_level=10)\n def get_data(self, context):\n client = boto3.client('cloudformation', region_name=AWS_REGION)\n cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid'])\n client.delete_stack(StackName=cluster.stack_id)\n if cluster.current_bill:\n cluster.current_bill.ongoing = False\n cluster.current_bill.end_time = datetime.now(timezone('UTC'))\n cluster.current_bill.save()\n if cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n else:\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n cluster.state = 'TERMINATED'\n cluster.save()\n return {'action': 'terminate', 'status': 'ok'}\n", "step-3": "<mask token>\n\n\nclass UserGroupClusters(JsonView):\n <mask token>\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n user = self.request.user\n try:\n if session_is_okay(self.request.session, 'group_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user_group(user)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user_group(user)\n except Exception as e:\n self.logger.exception(e)\n return []\n\n\nclass UserClusters(JsonView):\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n username = self.request.user.username\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user(username)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user(username)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass SingleCluster(JsonView):\n logger = logging.getLogger('mliyweb.views.SingleCluster')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n cluster_id = self.kwargs['pk']\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_single_cluster(cluster_id)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_single_cluster(cluster_id)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass ChangeClusterState(JsonView):\n log = logging.getLogger('mliyweb.views.ChangeClusterState')\n cluster_service = ClusterService()\n\n @log_enter_exit(log, log_level=10)\n def get_data(self, context):\n client = boto3.client('cloudformation', region_name=AWS_REGION)\n cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid'])\n client.delete_stack(StackName=cluster.stack_id)\n if cluster.current_bill:\n cluster.current_bill.ongoing = False\n cluster.current_bill.end_time = datetime.now(timezone('UTC'))\n cluster.current_bill.save()\n if cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n else:\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n cluster.state = 'TERMINATED'\n cluster.save()\n return {'action': 'terminate', 'status': 'ok'}\n", "step-4": "<mask token>\n\n\nclass UserGroupClusters(JsonView):\n \"\"\"\n\tReturns a json struct with the current clusters. If the last updated\n\ttime in the db is greater than the timeout, it returns the current data\n\tand launches a background thread to refresh and prune the cluster list.\n\n\tIf called with ?forcerefresh as a url argument it'll refresh regardless\n\tof the last updated time.\n\t\"\"\"\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n user = self.request.user\n try:\n if session_is_okay(self.request.session, 'group_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user_group(user)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user_group(user)\n except Exception as e:\n self.logger.exception(e)\n return []\n\n\nclass UserClusters(JsonView):\n logger = logging.getLogger('mliyweb.views.UserClusters')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n username = self.request.user.username\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_by_user(username)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_by_user(username)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass SingleCluster(JsonView):\n logger = logging.getLogger('mliyweb.views.SingleCluster')\n cluster_service = ClusterService()\n\n @log_enter_exit(logger)\n def get_data(self, context):\n cluster_id = self.kwargs['pk']\n try:\n if session_is_okay(self.request.session, 'user_clusters'):\n self.logger.info('Updating clusters in database')\n return self.cluster_service.update_single_cluster(cluster_id)\n else:\n self.logger.info('Getting clusters from database')\n return self.cluster_service.get_single_cluster(cluster_id)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n\nclass ChangeClusterState(JsonView):\n log = logging.getLogger('mliyweb.views.ChangeClusterState')\n cluster_service = ClusterService()\n\n @log_enter_exit(log, log_level=10)\n def get_data(self, context):\n client = boto3.client('cloudformation', region_name=AWS_REGION)\n cluster = Cluster.objects.get(cluster_id=self.kwargs['clusterid'])\n client.delete_stack(StackName=cluster.stack_id)\n if cluster.current_bill:\n cluster.current_bill.ongoing = False\n cluster.current_bill.end_time = datetime.now(timezone('UTC'))\n cluster.current_bill.save()\n if cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n else:\n deleteDnsEntry(cluster.cluster_id, cluster.master_ip)\n cluster.state = 'TERMINATED'\n cluster.save()\n return {'action': 'terminate', 'status': 'ok'}\n", "step-5": "import logging\nfrom datetime import datetime\n\nimport boto3\nfrom pytz import timezone\n\nfrom mliyweb.api.v1.api_session_limiter import session_is_okay\nfrom mliyweb.api.v1.json_view import JsonView\nfrom mliyweb.dns import deleteDnsEntry\nfrom mliyweb.models import Cluster\nfrom mliyweb.resources.clusters import ClusterService\nfrom mliyweb.settings import AWS_REGION\nfrom mliyweb.utils import log_enter_exit\n\n\nclass UserGroupClusters(JsonView):\n\t'''\n\tReturns a json struct with the current clusters. If the last updated\n\ttime in the db is greater than the timeout, it returns the current data\n\tand launches a background thread to refresh and prune the cluster list.\n\n\tIf called with ?forcerefresh as a url argument it'll refresh regardless\n\tof the last updated time.\n\t'''\n\tlogger = logging.getLogger('mliyweb.views.UserClusters')\n\tcluster_service = ClusterService()\n\n\t# global instance refresh time stamp\n\n\t@log_enter_exit(logger)\n\tdef get_data(self, context):\n\n\t\tuser = self.request.user\n\t\ttry:\n\t\t\tif session_is_okay(self.request.session, \"group_clusters\"):\n\t\t\t\tself.logger.info(\"Updating clusters in database\")\n\t\t\t\treturn self.cluster_service.update_by_user_group(user)\n\t\t\telse:\n\t\t\t\tself.logger.info(\"Getting clusters from database\")\n\t\t\t\treturn self.cluster_service.get_by_user_group(user)\n\n\t\texcept Exception as e:\n\t\t\tself.logger.exception(e)\n\n\t\treturn []\n\n\nclass UserClusters(JsonView):\n\t# TODO There needs to be a Cluster Launch thread cleanup/rework\n\tlogger = logging.getLogger('mliyweb.views.UserClusters')\n\tcluster_service = ClusterService()\n\n\t@log_enter_exit(logger)\n\tdef get_data(self, context):\n\t\tusername = self.request.user.username\n\t\ttry:\n\t\t\tif session_is_okay(self.request.session, \"user_clusters\"):\n\t\t\t\tself.logger.info(\"Updating clusters in database\")\n\t\t\t\treturn self.cluster_service.update_by_user(username)\n\t\t\telse:\n\t\t\t\tself.logger.info(\"Getting clusters from database\")\n\t\t\t\treturn self.cluster_service.get_by_user(username)\n\n\t\texcept Exception as e:\n\t\t\tself.logger.exception(e)\n\t\t\traise\n\nclass SingleCluster(JsonView):\n\tlogger = logging.getLogger('mliyweb.views.SingleCluster')\n\tcluster_service = ClusterService()\n\n\t@log_enter_exit(logger)\n\tdef get_data(self, context):\n\t\tcluster_id = self.kwargs['pk']\n\n\t\ttry:\n\t\t\tif session_is_okay(self.request.session, \"user_clusters\"):\n\t\t\t\tself.logger.info(\"Updating clusters in database\")\n\t\t\t\treturn self.cluster_service.update_single_cluster(cluster_id)\n\t\t\telse:\n\t\t\t\tself.logger.info(\"Getting clusters from database\")\n\t\t\t\treturn self.cluster_service.get_single_cluster(cluster_id)\n\n\t\texcept Exception as e:\n\t\t\tself.logger.exception(e)\n\t\t\traise\n\n\nclass ChangeClusterState(JsonView):\n\tlog = logging.getLogger('mliyweb.views.ChangeClusterState')\n\tcluster_service = ClusterService()\n\n\t@log_enter_exit(log, log_level=10)\n\tdef get_data(self,context):\n\n\t\tclient = boto3.client('cloudformation', region_name=AWS_REGION)\n\t\tcluster = Cluster.objects.get(cluster_id = self.kwargs['clusterid'])\n\n\t\tclient.delete_stack(StackName=cluster.stack_id)\n\t\tif cluster.current_bill:\n\t\t\tcluster.current_bill.ongoing = False\n\t\t\tcluster.current_bill.end_time = datetime.now(timezone('UTC'))\n\t\t\tcluster.current_bill.save()\n\n\t\tif cluster.state == 'TERMINATED' or cluster.state == 'FAILED':\n\t\t\tdeleteDnsEntry(cluster.cluster_id,cluster.master_ip)\n\t\telse:\n\t\t\tdeleteDnsEntry(cluster.cluster_id,cluster.master_ip)\n\n\t\tcluster.state = \"TERMINATED\"\n\t\tcluster.save()\n\n\t\treturn { 'action' : 'terminate', 'status' : 'ok'}", "step-ids": [ 9, 11, 12, 13, 15 ] }
[ 9, 11, 12, 13, 15 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print('...starting export') <|reserved_special_token_0|> logging.basicConfig(filename=timestr + '-export.log') <|reserved_special_token_0|> matCursor.execute(select_all_mat) <|reserved_special_token_0|> for m in materialTypes: materialTypeLookup[m['id']] = m['name'] <|reserved_special_token_0|> locCursor.execute(select_all_loc) <|reserved_special_token_0|> for l in locations: locLookup[l['id']] = l['name'] <|reserved_special_token_0|> callNoTypeCursor.execute(select_all_call_no_types) <|reserved_special_token_0|> for c in callNoTypes: callNoTypeLookup[c['id']] = c['name'] <|reserved_special_token_0|> print('executing query') cursor.execute(select_ids_sql) while True: print('in the while true - fetching...') rows = cursor.fetchmany(cursor.itersize) print('fetch is done') marcRecordCursor = folio_db.cursor(cursor_factory=psycopg2.extras. DictCursor) if rows: save_file = timestr + '.' + str(count) + '.json' writer = open(save_file, 'wt') print('created the file: ' + save_file) count += 1 for row in rows: try: rowId = row['id'] rowInstanceId = row['instance_id'] if rowInstanceId == None: logging.error('BAD RECORD: INSTANCE ID WAS NULL' + str(row) ) continue select_record_sql = ( """ select id, content as marc from {}_mod_source_record_storage.marc_records_lb where id = '{}' limit 1""" .format(TENANT, rowId)) marcRecordCursor.execute(select_record_sql) marcRow = marcRecordCursor.fetchone() marcJsonAsString = json.dumps(marcRow['marc']) marcString = marcJsonAsString.encode('utf-8').strip() for record in JSONReader(marcJsonAsString): if record['6xx'] is not None: logging.error('BAD RECORD: 6xx' + str(row)) continue if record['4xx'] is not None: logging.error('BAD RECORD: 4xx' + str(row)) continue select_holding_sql = ( """ select id, creation_date, callnumbertypeid, jsonb->>'permanentLocationId' as permanentlocationid, jsonb->'holdingsStatements' as holdingstatements, jsonb->>'callNumber' as callNumber from {}_mod_inventory_storage.holdings_record where instanceid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)""" .format(TENANT, rowInstanceId)) marcRecordCursor.execute(select_holding_sql) holdingRows = marcRecordCursor.fetchall() for holding in holdingRows: holdingsStatements = holding['holdingstatements'] rowHoldingsId = holding['id'] newField = Field(tag='998', indicators=[' ', ' '], subfields=['a', holding.get('callnumber', ''), 'l', locLookup.get(holding.get( 'permanentlocationid', ''), '')]) for statement in holdingsStatements: if statement is not None: newField.add_subfield('s', statement.get( 'statement', '').replace( 'Extent of ownership:', '')) record.add_field(newField) select_item_sql = ( """ select id, materialtypeid, jsonb->>'effectiveLocationId' as effectivelocationid, jsonb->>'barcode' as barcode, jsonb->'effectiveCallNumberComponents'->>'prefix' as prefix, jsonb->'effectiveCallNumberComponents'->>'typeId' as callnotype, jsonb->'effectiveCallNumberComponents'->>'callNumber' as callnumber from {}_mod_inventory_storage.item where holdingsrecordid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)""" .format(TENANT, rowHoldingsId)) marcRecordCursor.execute(select_item_sql) itemRows = marcRecordCursor.fetchall() for item in itemRows: callNoToUse = item.get('callnumber', 'na') prefix = item.get('prefix', None) if prefix is not None: callNoToUse = prefix + ' ' + callNoToUse record.add_field(Field(tag='952', indicators=[ ' ', ' '], subfields=['m', item.get( 'barcode', ''), 'j', callNoTypeLookup.get( item.get('callnotype', ''), ''), 'd', locLookup.get(item.get( 'effectivelocationid'), ''), 'i', materialTypeLookup.get(item.get( 'materialtypeid'), ''), 'e', callNoToUse])) if len(record.leader) < 24: logging.error('BAD LEADER' + record.leader + ' ' + str(row)) record.leader = '{:<24}'.format(record.leader) writer.write(record.as_json()) writer.write('\n') except Exception as e: print('ERROR PROCESSING ROW:' + str(row)) print(e) if rowInstanceId == None: rowInstanceId = 'None' logging.error('UNABLE TO WRITE TO FILE: ' + rowInstanceId) logging.error(e) continue writer.close() else: print('in the else --> finishing') break if folio_db: cursor.close() marcRecordCursor.close() folio_db.close() print('complete') <|reserved_special_token_1|> <|reserved_special_token_0|> print('...starting export') timestr = time.strftime('%Y%m%d-%H%M%S') logging.basicConfig(filename=timestr + '-export.log') DATABASE_HOST = 'redacted' DATABASE_USERNAME = 'redacted' DATABASE_PASSWORD = 'redacted' DATABASE_PORT = 5432 DATABASE_NAME = 'redacted' TENANT = 'redacted' count = 0 folio_db = psycopg2.connect(user=DATABASE_USERNAME, password= DATABASE_PASSWORD, host=DATABASE_HOST, port=DATABASE_PORT, database= DATABASE_NAME) materialTypeLookup = {} matCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor) select_all_mat = ( """ select id, jsonb->>'name' as name from {}_mod_inventory_storage.material_type""" .format(TENANT)) matCursor.execute(select_all_mat) materialTypes = matCursor.fetchall() for m in materialTypes: materialTypeLookup[m['id']] = m['name'] locLookup = {} locCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor) select_all_loc = ( """ select id, jsonb->>'name' as name from {}_mod_inventory_storage.location""" .format(TENANT)) locCursor.execute(select_all_loc) locations = locCursor.fetchall() for l in locations: locLookup[l['id']] = l['name'] callNoTypeLookup = {} callNoTypeCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor) select_all_call_no_types = ( """ select id, jsonb->>'name' as name from {}_mod_inventory_storage.call_number_type""" .format(TENANT)) callNoTypeCursor.execute(select_all_call_no_types) callNoTypes = callNoTypeCursor.fetchall() for c in callNoTypes: callNoTypeLookup[c['id']] = c['name'] cursor = folio_db.cursor(name='folio', cursor_factory=psycopg2.extras. DictCursor) cursor.itersize = 300000 select_ids_sql = ( """ select id, instance_id from {}_mod_source_record_storage.records_lb where state = {} and (suppress_discovery = False or suppress_discovery is null)""" .format(TENANT, "'ACTUAL'")) print('executing query') cursor.execute(select_ids_sql) while True: print('in the while true - fetching...') rows = cursor.fetchmany(cursor.itersize) print('fetch is done') marcRecordCursor = folio_db.cursor(cursor_factory=psycopg2.extras. DictCursor) if rows: save_file = timestr + '.' + str(count) + '.json' writer = open(save_file, 'wt') print('created the file: ' + save_file) count += 1 for row in rows: try: rowId = row['id'] rowInstanceId = row['instance_id'] if rowInstanceId == None: logging.error('BAD RECORD: INSTANCE ID WAS NULL' + str(row) ) continue select_record_sql = ( """ select id, content as marc from {}_mod_source_record_storage.marc_records_lb where id = '{}' limit 1""" .format(TENANT, rowId)) marcRecordCursor.execute(select_record_sql) marcRow = marcRecordCursor.fetchone() marcJsonAsString = json.dumps(marcRow['marc']) marcString = marcJsonAsString.encode('utf-8').strip() for record in JSONReader(marcJsonAsString): if record['6xx'] is not None: logging.error('BAD RECORD: 6xx' + str(row)) continue if record['4xx'] is not None: logging.error('BAD RECORD: 4xx' + str(row)) continue select_holding_sql = ( """ select id, creation_date, callnumbertypeid, jsonb->>'permanentLocationId' as permanentlocationid, jsonb->'holdingsStatements' as holdingstatements, jsonb->>'callNumber' as callNumber from {}_mod_inventory_storage.holdings_record where instanceid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)""" .format(TENANT, rowInstanceId)) marcRecordCursor.execute(select_holding_sql) holdingRows = marcRecordCursor.fetchall() for holding in holdingRows: holdingsStatements = holding['holdingstatements'] rowHoldingsId = holding['id'] newField = Field(tag='998', indicators=[' ', ' '], subfields=['a', holding.get('callnumber', ''), 'l', locLookup.get(holding.get( 'permanentlocationid', ''), '')]) for statement in holdingsStatements: if statement is not None: newField.add_subfield('s', statement.get( 'statement', '').replace( 'Extent of ownership:', '')) record.add_field(newField) select_item_sql = ( """ select id, materialtypeid, jsonb->>'effectiveLocationId' as effectivelocationid, jsonb->>'barcode' as barcode, jsonb->'effectiveCallNumberComponents'->>'prefix' as prefix, jsonb->'effectiveCallNumberComponents'->>'typeId' as callnotype, jsonb->'effectiveCallNumberComponents'->>'callNumber' as callnumber from {}_mod_inventory_storage.item where holdingsrecordid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)""" .format(TENANT, rowHoldingsId)) marcRecordCursor.execute(select_item_sql) itemRows = marcRecordCursor.fetchall() for item in itemRows: callNoToUse = item.get('callnumber', 'na') prefix = item.get('prefix', None) if prefix is not None: callNoToUse = prefix + ' ' + callNoToUse record.add_field(Field(tag='952', indicators=[ ' ', ' '], subfields=['m', item.get( 'barcode', ''), 'j', callNoTypeLookup.get( item.get('callnotype', ''), ''), 'd', locLookup.get(item.get( 'effectivelocationid'), ''), 'i', materialTypeLookup.get(item.get( 'materialtypeid'), ''), 'e', callNoToUse])) if len(record.leader) < 24: logging.error('BAD LEADER' + record.leader + ' ' + str(row)) record.leader = '{:<24}'.format(record.leader) writer.write(record.as_json()) writer.write('\n') except Exception as e: print('ERROR PROCESSING ROW:' + str(row)) print(e) if rowInstanceId == None: rowInstanceId = 'None' logging.error('UNABLE TO WRITE TO FILE: ' + rowInstanceId) logging.error(e) continue writer.close() else: print('in the else --> finishing') break if folio_db: cursor.close() marcRecordCursor.close() folio_db.close() print('complete') <|reserved_special_token_1|> import pymarc from pymarc import JSONReader, Field, JSONWriter, XMLWriter import psycopg2 import psycopg2.extras import time import logging import json print('...starting export') timestr = time.strftime('%Y%m%d-%H%M%S') logging.basicConfig(filename=timestr + '-export.log') DATABASE_HOST = 'redacted' DATABASE_USERNAME = 'redacted' DATABASE_PASSWORD = 'redacted' DATABASE_PORT = 5432 DATABASE_NAME = 'redacted' TENANT = 'redacted' count = 0 folio_db = psycopg2.connect(user=DATABASE_USERNAME, password= DATABASE_PASSWORD, host=DATABASE_HOST, port=DATABASE_PORT, database= DATABASE_NAME) materialTypeLookup = {} matCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor) select_all_mat = ( """ select id, jsonb->>'name' as name from {}_mod_inventory_storage.material_type""" .format(TENANT)) matCursor.execute(select_all_mat) materialTypes = matCursor.fetchall() for m in materialTypes: materialTypeLookup[m['id']] = m['name'] locLookup = {} locCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor) select_all_loc = ( """ select id, jsonb->>'name' as name from {}_mod_inventory_storage.location""" .format(TENANT)) locCursor.execute(select_all_loc) locations = locCursor.fetchall() for l in locations: locLookup[l['id']] = l['name'] callNoTypeLookup = {} callNoTypeCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor) select_all_call_no_types = ( """ select id, jsonb->>'name' as name from {}_mod_inventory_storage.call_number_type""" .format(TENANT)) callNoTypeCursor.execute(select_all_call_no_types) callNoTypes = callNoTypeCursor.fetchall() for c in callNoTypes: callNoTypeLookup[c['id']] = c['name'] cursor = folio_db.cursor(name='folio', cursor_factory=psycopg2.extras. DictCursor) cursor.itersize = 300000 select_ids_sql = ( """ select id, instance_id from {}_mod_source_record_storage.records_lb where state = {} and (suppress_discovery = False or suppress_discovery is null)""" .format(TENANT, "'ACTUAL'")) print('executing query') cursor.execute(select_ids_sql) while True: print('in the while true - fetching...') rows = cursor.fetchmany(cursor.itersize) print('fetch is done') marcRecordCursor = folio_db.cursor(cursor_factory=psycopg2.extras. DictCursor) if rows: save_file = timestr + '.' + str(count) + '.json' writer = open(save_file, 'wt') print('created the file: ' + save_file) count += 1 for row in rows: try: rowId = row['id'] rowInstanceId = row['instance_id'] if rowInstanceId == None: logging.error('BAD RECORD: INSTANCE ID WAS NULL' + str(row) ) continue select_record_sql = ( """ select id, content as marc from {}_mod_source_record_storage.marc_records_lb where id = '{}' limit 1""" .format(TENANT, rowId)) marcRecordCursor.execute(select_record_sql) marcRow = marcRecordCursor.fetchone() marcJsonAsString = json.dumps(marcRow['marc']) marcString = marcJsonAsString.encode('utf-8').strip() for record in JSONReader(marcJsonAsString): if record['6xx'] is not None: logging.error('BAD RECORD: 6xx' + str(row)) continue if record['4xx'] is not None: logging.error('BAD RECORD: 4xx' + str(row)) continue select_holding_sql = ( """ select id, creation_date, callnumbertypeid, jsonb->>'permanentLocationId' as permanentlocationid, jsonb->'holdingsStatements' as holdingstatements, jsonb->>'callNumber' as callNumber from {}_mod_inventory_storage.holdings_record where instanceid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)""" .format(TENANT, rowInstanceId)) marcRecordCursor.execute(select_holding_sql) holdingRows = marcRecordCursor.fetchall() for holding in holdingRows: holdingsStatements = holding['holdingstatements'] rowHoldingsId = holding['id'] newField = Field(tag='998', indicators=[' ', ' '], subfields=['a', holding.get('callnumber', ''), 'l', locLookup.get(holding.get( 'permanentlocationid', ''), '')]) for statement in holdingsStatements: if statement is not None: newField.add_subfield('s', statement.get( 'statement', '').replace( 'Extent of ownership:', '')) record.add_field(newField) select_item_sql = ( """ select id, materialtypeid, jsonb->>'effectiveLocationId' as effectivelocationid, jsonb->>'barcode' as barcode, jsonb->'effectiveCallNumberComponents'->>'prefix' as prefix, jsonb->'effectiveCallNumberComponents'->>'typeId' as callnotype, jsonb->'effectiveCallNumberComponents'->>'callNumber' as callnumber from {}_mod_inventory_storage.item where holdingsrecordid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)""" .format(TENANT, rowHoldingsId)) marcRecordCursor.execute(select_item_sql) itemRows = marcRecordCursor.fetchall() for item in itemRows: callNoToUse = item.get('callnumber', 'na') prefix = item.get('prefix', None) if prefix is not None: callNoToUse = prefix + ' ' + callNoToUse record.add_field(Field(tag='952', indicators=[ ' ', ' '], subfields=['m', item.get( 'barcode', ''), 'j', callNoTypeLookup.get( item.get('callnotype', ''), ''), 'd', locLookup.get(item.get( 'effectivelocationid'), ''), 'i', materialTypeLookup.get(item.get( 'materialtypeid'), ''), 'e', callNoToUse])) if len(record.leader) < 24: logging.error('BAD LEADER' + record.leader + ' ' + str(row)) record.leader = '{:<24}'.format(record.leader) writer.write(record.as_json()) writer.write('\n') except Exception as e: print('ERROR PROCESSING ROW:' + str(row)) print(e) if rowInstanceId == None: rowInstanceId = 'None' logging.error('UNABLE TO WRITE TO FILE: ' + rowInstanceId) logging.error(e) continue writer.close() else: print('in the else --> finishing') break if folio_db: cursor.close() marcRecordCursor.close() folio_db.close() print('complete') <|reserved_special_token_1|> import pymarc from pymarc import JSONReader, Field, JSONWriter, XMLWriter import psycopg2 import psycopg2.extras import time import logging import json #WRITTEN W/PYTHON 3.7.3 print("...starting export"); # constructing file and log name timestr = time.strftime("%Y%m%d-%H%M%S") logging.basicConfig(filename=timestr + "-export.log") #LOCAL DB DATABASE_HOST = "redacted" DATABASE_USERNAME = "redacted" DATABASE_PASSWORD = "redacted" DATABASE_PORT = 5432 DATABASE_NAME = "redacted" TENANT = "redacted" count = 0 folio_db = psycopg2.connect( user=DATABASE_USERNAME, password=DATABASE_PASSWORD, host=DATABASE_HOST, port=DATABASE_PORT, database=DATABASE_NAME ) #init a list of material types materialTypeLookup = {} matCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor) select_all_mat = ''' select id, jsonb->>'name' as name from {}_mod_inventory_storage.material_type'''.format(TENANT) matCursor.execute(select_all_mat) materialTypes = matCursor.fetchall() for m in materialTypes: materialTypeLookup[m['id']] = m['name'] #init a list of locations locLookup = {} locCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor) select_all_loc = ''' select id, jsonb->>'name' as name from {}_mod_inventory_storage.location'''.format(TENANT) locCursor.execute(select_all_loc) locations = locCursor.fetchall() for l in locations: locLookup[l['id']] = l['name'] #init a list of call number types callNoTypeLookup = {} callNoTypeCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor) select_all_call_no_types = ''' select id, jsonb->>'name' as name from {}_mod_inventory_storage.call_number_type'''.format(TENANT) callNoTypeCursor.execute(select_all_call_no_types) callNoTypes = callNoTypeCursor.fetchall() for c in callNoTypes: callNoTypeLookup[c['id']] = c['name'] cursor = folio_db.cursor(name='folio',cursor_factory=psycopg2.extras.DictCursor) #THIS COULD BE MODIFIED TO RETREIVE X NUMBER OF RECORDS PER FILE cursor.itersize=300000 #from {}_mod_marc_storage.marc_record'''.format(TENANT) select_ids_sql = ''' select id, instance_id from {}_mod_source_record_storage.records_lb where state = {} and (suppress_discovery = False or suppress_discovery is null)'''.format(TENANT,"'ACTUAL'") print("executing query") cursor.execute(select_ids_sql) while True: print("in the while true - fetching...") rows = cursor.fetchmany(cursor.itersize) print("fetch is done") marcRecordCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor) if rows: save_file = timestr + "." + str(count) + ".json" writer = open(save_file,'wt') print("created the file: " + save_file) count += 1 for row in rows: try: rowId = row['id']; rowInstanceId = row['instance_id']; if rowInstanceId == None: logging.error("BAD RECORD: INSTANCE ID WAS NULL" + str(row)) continue select_record_sql = ''' select id, content as marc from {}_mod_source_record_storage.marc_records_lb where id = '{}' limit 1'''.format(TENANT, rowId) #print(select_record_sql) marcRecordCursor.execute(select_record_sql) marcRow = marcRecordCursor.fetchone() marcJsonAsString = json.dumps(marcRow['marc']) marcString = marcJsonAsString.encode('utf-8').strip() #print(marcJsonAsString); for record in JSONReader(marcJsonAsString): #write MARC JSON to output file #ADD A 998 FOR EACH HOLDING RECORD if record['6xx'] is not None: logging.error("BAD RECORD: 6xx" + str(row)) continue if record['4xx'] is not None: logging.error("BAD RECORD: 4xx" + str(row)) continue select_holding_sql = ''' select id, creation_date, callnumbertypeid, jsonb->>'permanentLocationId' as permanentlocationid, jsonb->'holdingsStatements' as holdingstatements, jsonb->>'callNumber' as callNumber from {}_mod_inventory_storage.holdings_record where instanceid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)'''.format(TENANT,rowInstanceId) #print(select_holding_sql) marcRecordCursor.execute(select_holding_sql) holdingRows = marcRecordCursor.fetchall() for holding in holdingRows: #print(holding['callnumber']) holdingsStatements = holding['holdingstatements'] rowHoldingsId = holding['id'] newField = Field(tag = '998', indicators = [' ',' '], subfields = ['a',holding.get('callnumber',''), 'l',locLookup.get(holding.get('permanentlocationid',''),'')]) for statement in holdingsStatements: if statement is not None: newField.add_subfield('s',statement.get('statement','').replace('Extent of ownership:','')); record.add_field(newField) #ADD AN 952 FOR EACH ITEM select_item_sql = ''' select id, materialtypeid, jsonb->>'effectiveLocationId' as effectivelocationid, jsonb->>'barcode' as barcode, jsonb->'effectiveCallNumberComponents'->>'prefix' as prefix, jsonb->'effectiveCallNumberComponents'->>'typeId' as callnotype, jsonb->'effectiveCallNumberComponents'->>'callNumber' as callnumber from {}_mod_inventory_storage.item where holdingsrecordid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)'''.format(TENANT,rowHoldingsId) #print(select_item_sql) marcRecordCursor.execute(select_item_sql) itemRows = marcRecordCursor.fetchall() for item in itemRows: callNoToUse = item.get('callnumber','na') #print(callNoToUse) prefix = item.get('prefix',None) if (prefix is not None): callNoToUse = prefix + " " + callNoToUse record.add_field( Field(tag = '952', indicators = [' ',' '], subfields = ['m',item.get('barcode',''), 'j',callNoTypeLookup.get(item.get('callnotype',''),''), 'd',locLookup.get(item.get('effectivelocationid'),''), 'i',materialTypeLookup.get(item.get('materialtypeid'),''), 'e',callNoToUse])) if (len(record.leader) < 24): logging.error("BAD LEADER" + record.leader + " " + str(row)) record.leader = "{:<24}".format(record.leader) writer.write(record.as_json()) writer.write('\n') except Exception as e: print("ERROR PROCESSING ROW:" + str(row)) print(e) if rowInstanceId == None: rowInstanceId = "None" #FOR LOGGING logging.error("UNABLE TO WRITE TO FILE: " + rowInstanceId) logging.error(e) continue writer.close() else: print("in the else --> finishing") break if (folio_db): cursor.close() marcRecordCursor.close() folio_db.close() print("complete")
flexible
{ "blob_id": "d81e8478d60c9ee778e1aeb0dd7b05f675e4ecad", "index": 2306, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('...starting export')\n<mask token>\nlogging.basicConfig(filename=timestr + '-export.log')\n<mask token>\nmatCursor.execute(select_all_mat)\n<mask token>\nfor m in materialTypes:\n materialTypeLookup[m['id']] = m['name']\n<mask token>\nlocCursor.execute(select_all_loc)\n<mask token>\nfor l in locations:\n locLookup[l['id']] = l['name']\n<mask token>\ncallNoTypeCursor.execute(select_all_call_no_types)\n<mask token>\nfor c in callNoTypes:\n callNoTypeLookup[c['id']] = c['name']\n<mask token>\nprint('executing query')\ncursor.execute(select_ids_sql)\nwhile True:\n print('in the while true - fetching...')\n rows = cursor.fetchmany(cursor.itersize)\n print('fetch is done')\n marcRecordCursor = folio_db.cursor(cursor_factory=psycopg2.extras.\n DictCursor)\n if rows:\n save_file = timestr + '.' + str(count) + '.json'\n writer = open(save_file, 'wt')\n print('created the file: ' + save_file)\n count += 1\n for row in rows:\n try:\n rowId = row['id']\n rowInstanceId = row['instance_id']\n if rowInstanceId == None:\n logging.error('BAD RECORD: INSTANCE ID WAS NULL' + str(row)\n )\n continue\n select_record_sql = (\n \"\"\"\n\t\t\t\tselect id, \n\t\t\t\tcontent as marc\n\t\t\t\tfrom {}_mod_source_record_storage.marc_records_lb where \n\t\t\t\tid = '{}' limit 1\"\"\"\n .format(TENANT, rowId))\n marcRecordCursor.execute(select_record_sql)\n marcRow = marcRecordCursor.fetchone()\n marcJsonAsString = json.dumps(marcRow['marc'])\n marcString = marcJsonAsString.encode('utf-8').strip()\n for record in JSONReader(marcJsonAsString):\n if record['6xx'] is not None:\n logging.error('BAD RECORD: 6xx' + str(row))\n continue\n if record['4xx'] is not None:\n logging.error('BAD RECORD: 4xx' + str(row))\n continue\n select_holding_sql = (\n \"\"\"\n\t\t\t\t\tselect id, creation_date, callnumbertypeid, \n\t\t\t\t\tjsonb->>'permanentLocationId' as permanentlocationid, \n\t\t\t\t\tjsonb->'holdingsStatements' as holdingstatements,\n\t\t\t\t\tjsonb->>'callNumber' as callNumber from \n\t\t\t\t\t{}_mod_inventory_storage.holdings_record \n\t\t\t\t\twhere instanceid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)\"\"\"\n .format(TENANT, rowInstanceId))\n marcRecordCursor.execute(select_holding_sql)\n holdingRows = marcRecordCursor.fetchall()\n for holding in holdingRows:\n holdingsStatements = holding['holdingstatements']\n rowHoldingsId = holding['id']\n newField = Field(tag='998', indicators=[' ', ' '],\n subfields=['a', holding.get('callnumber', ''),\n 'l', locLookup.get(holding.get(\n 'permanentlocationid', ''), '')])\n for statement in holdingsStatements:\n if statement is not None:\n newField.add_subfield('s', statement.get(\n 'statement', '').replace(\n 'Extent of ownership:', ''))\n record.add_field(newField)\n select_item_sql = (\n \"\"\"\n\t\t\t\t\t\tselect id, materialtypeid, \n\t\t\t\t\t\tjsonb->>'effectiveLocationId' as effectivelocationid, \n\t\t\t\t\t\tjsonb->>'barcode' as barcode, \n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'prefix' as prefix,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'typeId' as callnotype,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'callNumber' as callnumber\n\t\t\t\t\t\tfrom {}_mod_inventory_storage.item where \n\t\t\t\t\t\tholdingsrecordid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)\"\"\"\n .format(TENANT, rowHoldingsId))\n marcRecordCursor.execute(select_item_sql)\n itemRows = marcRecordCursor.fetchall()\n for item in itemRows:\n callNoToUse = item.get('callnumber', 'na')\n prefix = item.get('prefix', None)\n if prefix is not None:\n callNoToUse = prefix + ' ' + callNoToUse\n record.add_field(Field(tag='952', indicators=[\n ' ', ' '], subfields=['m', item.get(\n 'barcode', ''), 'j', callNoTypeLookup.get(\n item.get('callnotype', ''), ''), 'd',\n locLookup.get(item.get(\n 'effectivelocationid'), ''), 'i',\n materialTypeLookup.get(item.get(\n 'materialtypeid'), ''), 'e', callNoToUse]))\n if len(record.leader) < 24:\n logging.error('BAD LEADER' + record.leader +\n ' ' + str(row))\n record.leader = '{:<24}'.format(record.leader)\n writer.write(record.as_json())\n writer.write('\\n')\n except Exception as e:\n print('ERROR PROCESSING ROW:' + str(row))\n print(e)\n if rowInstanceId == None:\n rowInstanceId = 'None'\n logging.error('UNABLE TO WRITE TO FILE: ' + rowInstanceId)\n logging.error(e)\n continue\n writer.close()\n else:\n print('in the else --> finishing')\n break\nif folio_db:\n cursor.close()\n marcRecordCursor.close()\n folio_db.close()\n print('complete')\n", "step-3": "<mask token>\nprint('...starting export')\ntimestr = time.strftime('%Y%m%d-%H%M%S')\nlogging.basicConfig(filename=timestr + '-export.log')\nDATABASE_HOST = 'redacted'\nDATABASE_USERNAME = 'redacted'\nDATABASE_PASSWORD = 'redacted'\nDATABASE_PORT = 5432\nDATABASE_NAME = 'redacted'\nTENANT = 'redacted'\ncount = 0\nfolio_db = psycopg2.connect(user=DATABASE_USERNAME, password=\n DATABASE_PASSWORD, host=DATABASE_HOST, port=DATABASE_PORT, database=\n DATABASE_NAME)\nmaterialTypeLookup = {}\nmatCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_mat = (\n \"\"\"\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.material_type\"\"\"\n .format(TENANT))\nmatCursor.execute(select_all_mat)\nmaterialTypes = matCursor.fetchall()\nfor m in materialTypes:\n materialTypeLookup[m['id']] = m['name']\nlocLookup = {}\nlocCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_loc = (\n \"\"\"\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.location\"\"\"\n .format(TENANT))\nlocCursor.execute(select_all_loc)\nlocations = locCursor.fetchall()\nfor l in locations:\n locLookup[l['id']] = l['name']\ncallNoTypeLookup = {}\ncallNoTypeCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_call_no_types = (\n \"\"\"\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.call_number_type\"\"\"\n .format(TENANT))\ncallNoTypeCursor.execute(select_all_call_no_types)\ncallNoTypes = callNoTypeCursor.fetchall()\nfor c in callNoTypes:\n callNoTypeLookup[c['id']] = c['name']\ncursor = folio_db.cursor(name='folio', cursor_factory=psycopg2.extras.\n DictCursor)\ncursor.itersize = 300000\nselect_ids_sql = (\n \"\"\"\nselect\nid, \ninstance_id \nfrom {}_mod_source_record_storage.records_lb where state = {} and (suppress_discovery = False or suppress_discovery is null)\"\"\"\n .format(TENANT, \"'ACTUAL'\"))\nprint('executing query')\ncursor.execute(select_ids_sql)\nwhile True:\n print('in the while true - fetching...')\n rows = cursor.fetchmany(cursor.itersize)\n print('fetch is done')\n marcRecordCursor = folio_db.cursor(cursor_factory=psycopg2.extras.\n DictCursor)\n if rows:\n save_file = timestr + '.' + str(count) + '.json'\n writer = open(save_file, 'wt')\n print('created the file: ' + save_file)\n count += 1\n for row in rows:\n try:\n rowId = row['id']\n rowInstanceId = row['instance_id']\n if rowInstanceId == None:\n logging.error('BAD RECORD: INSTANCE ID WAS NULL' + str(row)\n )\n continue\n select_record_sql = (\n \"\"\"\n\t\t\t\tselect id, \n\t\t\t\tcontent as marc\n\t\t\t\tfrom {}_mod_source_record_storage.marc_records_lb where \n\t\t\t\tid = '{}' limit 1\"\"\"\n .format(TENANT, rowId))\n marcRecordCursor.execute(select_record_sql)\n marcRow = marcRecordCursor.fetchone()\n marcJsonAsString = json.dumps(marcRow['marc'])\n marcString = marcJsonAsString.encode('utf-8').strip()\n for record in JSONReader(marcJsonAsString):\n if record['6xx'] is not None:\n logging.error('BAD RECORD: 6xx' + str(row))\n continue\n if record['4xx'] is not None:\n logging.error('BAD RECORD: 4xx' + str(row))\n continue\n select_holding_sql = (\n \"\"\"\n\t\t\t\t\tselect id, creation_date, callnumbertypeid, \n\t\t\t\t\tjsonb->>'permanentLocationId' as permanentlocationid, \n\t\t\t\t\tjsonb->'holdingsStatements' as holdingstatements,\n\t\t\t\t\tjsonb->>'callNumber' as callNumber from \n\t\t\t\t\t{}_mod_inventory_storage.holdings_record \n\t\t\t\t\twhere instanceid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)\"\"\"\n .format(TENANT, rowInstanceId))\n marcRecordCursor.execute(select_holding_sql)\n holdingRows = marcRecordCursor.fetchall()\n for holding in holdingRows:\n holdingsStatements = holding['holdingstatements']\n rowHoldingsId = holding['id']\n newField = Field(tag='998', indicators=[' ', ' '],\n subfields=['a', holding.get('callnumber', ''),\n 'l', locLookup.get(holding.get(\n 'permanentlocationid', ''), '')])\n for statement in holdingsStatements:\n if statement is not None:\n newField.add_subfield('s', statement.get(\n 'statement', '').replace(\n 'Extent of ownership:', ''))\n record.add_field(newField)\n select_item_sql = (\n \"\"\"\n\t\t\t\t\t\tselect id, materialtypeid, \n\t\t\t\t\t\tjsonb->>'effectiveLocationId' as effectivelocationid, \n\t\t\t\t\t\tjsonb->>'barcode' as barcode, \n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'prefix' as prefix,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'typeId' as callnotype,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'callNumber' as callnumber\n\t\t\t\t\t\tfrom {}_mod_inventory_storage.item where \n\t\t\t\t\t\tholdingsrecordid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)\"\"\"\n .format(TENANT, rowHoldingsId))\n marcRecordCursor.execute(select_item_sql)\n itemRows = marcRecordCursor.fetchall()\n for item in itemRows:\n callNoToUse = item.get('callnumber', 'na')\n prefix = item.get('prefix', None)\n if prefix is not None:\n callNoToUse = prefix + ' ' + callNoToUse\n record.add_field(Field(tag='952', indicators=[\n ' ', ' '], subfields=['m', item.get(\n 'barcode', ''), 'j', callNoTypeLookup.get(\n item.get('callnotype', ''), ''), 'd',\n locLookup.get(item.get(\n 'effectivelocationid'), ''), 'i',\n materialTypeLookup.get(item.get(\n 'materialtypeid'), ''), 'e', callNoToUse]))\n if len(record.leader) < 24:\n logging.error('BAD LEADER' + record.leader +\n ' ' + str(row))\n record.leader = '{:<24}'.format(record.leader)\n writer.write(record.as_json())\n writer.write('\\n')\n except Exception as e:\n print('ERROR PROCESSING ROW:' + str(row))\n print(e)\n if rowInstanceId == None:\n rowInstanceId = 'None'\n logging.error('UNABLE TO WRITE TO FILE: ' + rowInstanceId)\n logging.error(e)\n continue\n writer.close()\n else:\n print('in the else --> finishing')\n break\nif folio_db:\n cursor.close()\n marcRecordCursor.close()\n folio_db.close()\n print('complete')\n", "step-4": "import pymarc\nfrom pymarc import JSONReader, Field, JSONWriter, XMLWriter\nimport psycopg2\nimport psycopg2.extras\nimport time\nimport logging\nimport json\nprint('...starting export')\ntimestr = time.strftime('%Y%m%d-%H%M%S')\nlogging.basicConfig(filename=timestr + '-export.log')\nDATABASE_HOST = 'redacted'\nDATABASE_USERNAME = 'redacted'\nDATABASE_PASSWORD = 'redacted'\nDATABASE_PORT = 5432\nDATABASE_NAME = 'redacted'\nTENANT = 'redacted'\ncount = 0\nfolio_db = psycopg2.connect(user=DATABASE_USERNAME, password=\n DATABASE_PASSWORD, host=DATABASE_HOST, port=DATABASE_PORT, database=\n DATABASE_NAME)\nmaterialTypeLookup = {}\nmatCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_mat = (\n \"\"\"\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.material_type\"\"\"\n .format(TENANT))\nmatCursor.execute(select_all_mat)\nmaterialTypes = matCursor.fetchall()\nfor m in materialTypes:\n materialTypeLookup[m['id']] = m['name']\nlocLookup = {}\nlocCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_loc = (\n \"\"\"\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.location\"\"\"\n .format(TENANT))\nlocCursor.execute(select_all_loc)\nlocations = locCursor.fetchall()\nfor l in locations:\n locLookup[l['id']] = l['name']\ncallNoTypeLookup = {}\ncallNoTypeCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_call_no_types = (\n \"\"\"\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.call_number_type\"\"\"\n .format(TENANT))\ncallNoTypeCursor.execute(select_all_call_no_types)\ncallNoTypes = callNoTypeCursor.fetchall()\nfor c in callNoTypes:\n callNoTypeLookup[c['id']] = c['name']\ncursor = folio_db.cursor(name='folio', cursor_factory=psycopg2.extras.\n DictCursor)\ncursor.itersize = 300000\nselect_ids_sql = (\n \"\"\"\nselect\nid, \ninstance_id \nfrom {}_mod_source_record_storage.records_lb where state = {} and (suppress_discovery = False or suppress_discovery is null)\"\"\"\n .format(TENANT, \"'ACTUAL'\"))\nprint('executing query')\ncursor.execute(select_ids_sql)\nwhile True:\n print('in the while true - fetching...')\n rows = cursor.fetchmany(cursor.itersize)\n print('fetch is done')\n marcRecordCursor = folio_db.cursor(cursor_factory=psycopg2.extras.\n DictCursor)\n if rows:\n save_file = timestr + '.' + str(count) + '.json'\n writer = open(save_file, 'wt')\n print('created the file: ' + save_file)\n count += 1\n for row in rows:\n try:\n rowId = row['id']\n rowInstanceId = row['instance_id']\n if rowInstanceId == None:\n logging.error('BAD RECORD: INSTANCE ID WAS NULL' + str(row)\n )\n continue\n select_record_sql = (\n \"\"\"\n\t\t\t\tselect id, \n\t\t\t\tcontent as marc\n\t\t\t\tfrom {}_mod_source_record_storage.marc_records_lb where \n\t\t\t\tid = '{}' limit 1\"\"\"\n .format(TENANT, rowId))\n marcRecordCursor.execute(select_record_sql)\n marcRow = marcRecordCursor.fetchone()\n marcJsonAsString = json.dumps(marcRow['marc'])\n marcString = marcJsonAsString.encode('utf-8').strip()\n for record in JSONReader(marcJsonAsString):\n if record['6xx'] is not None:\n logging.error('BAD RECORD: 6xx' + str(row))\n continue\n if record['4xx'] is not None:\n logging.error('BAD RECORD: 4xx' + str(row))\n continue\n select_holding_sql = (\n \"\"\"\n\t\t\t\t\tselect id, creation_date, callnumbertypeid, \n\t\t\t\t\tjsonb->>'permanentLocationId' as permanentlocationid, \n\t\t\t\t\tjsonb->'holdingsStatements' as holdingstatements,\n\t\t\t\t\tjsonb->>'callNumber' as callNumber from \n\t\t\t\t\t{}_mod_inventory_storage.holdings_record \n\t\t\t\t\twhere instanceid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)\"\"\"\n .format(TENANT, rowInstanceId))\n marcRecordCursor.execute(select_holding_sql)\n holdingRows = marcRecordCursor.fetchall()\n for holding in holdingRows:\n holdingsStatements = holding['holdingstatements']\n rowHoldingsId = holding['id']\n newField = Field(tag='998', indicators=[' ', ' '],\n subfields=['a', holding.get('callnumber', ''),\n 'l', locLookup.get(holding.get(\n 'permanentlocationid', ''), '')])\n for statement in holdingsStatements:\n if statement is not None:\n newField.add_subfield('s', statement.get(\n 'statement', '').replace(\n 'Extent of ownership:', ''))\n record.add_field(newField)\n select_item_sql = (\n \"\"\"\n\t\t\t\t\t\tselect id, materialtypeid, \n\t\t\t\t\t\tjsonb->>'effectiveLocationId' as effectivelocationid, \n\t\t\t\t\t\tjsonb->>'barcode' as barcode, \n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'prefix' as prefix,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'typeId' as callnotype,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'callNumber' as callnumber\n\t\t\t\t\t\tfrom {}_mod_inventory_storage.item where \n\t\t\t\t\t\tholdingsrecordid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)\"\"\"\n .format(TENANT, rowHoldingsId))\n marcRecordCursor.execute(select_item_sql)\n itemRows = marcRecordCursor.fetchall()\n for item in itemRows:\n callNoToUse = item.get('callnumber', 'na')\n prefix = item.get('prefix', None)\n if prefix is not None:\n callNoToUse = prefix + ' ' + callNoToUse\n record.add_field(Field(tag='952', indicators=[\n ' ', ' '], subfields=['m', item.get(\n 'barcode', ''), 'j', callNoTypeLookup.get(\n item.get('callnotype', ''), ''), 'd',\n locLookup.get(item.get(\n 'effectivelocationid'), ''), 'i',\n materialTypeLookup.get(item.get(\n 'materialtypeid'), ''), 'e', callNoToUse]))\n if len(record.leader) < 24:\n logging.error('BAD LEADER' + record.leader +\n ' ' + str(row))\n record.leader = '{:<24}'.format(record.leader)\n writer.write(record.as_json())\n writer.write('\\n')\n except Exception as e:\n print('ERROR PROCESSING ROW:' + str(row))\n print(e)\n if rowInstanceId == None:\n rowInstanceId = 'None'\n logging.error('UNABLE TO WRITE TO FILE: ' + rowInstanceId)\n logging.error(e)\n continue\n writer.close()\n else:\n print('in the else --> finishing')\n break\nif folio_db:\n cursor.close()\n marcRecordCursor.close()\n folio_db.close()\n print('complete')\n", "step-5": "import pymarc\nfrom pymarc import JSONReader, Field, JSONWriter, XMLWriter\nimport psycopg2\nimport psycopg2.extras\nimport time\nimport logging\nimport json\n\n#WRITTEN W/PYTHON 3.7.3\n\n\nprint(\"...starting export\");\n\n# constructing file and log name\ntimestr = time.strftime(\"%Y%m%d-%H%M%S\")\nlogging.basicConfig(filename=timestr + \"-export.log\")\n\n#LOCAL DB\nDATABASE_HOST = \"redacted\"\nDATABASE_USERNAME = \"redacted\"\nDATABASE_PASSWORD = \"redacted\"\nDATABASE_PORT = 5432\nDATABASE_NAME = \"redacted\"\nTENANT = \"redacted\"\n\ncount = 0\nfolio_db = psycopg2.connect(\n\tuser=DATABASE_USERNAME,\n\tpassword=DATABASE_PASSWORD,\n\thost=DATABASE_HOST,\n\tport=DATABASE_PORT,\n\tdatabase=DATABASE_NAME\n)\n\n#init a list of material types\nmaterialTypeLookup = {}\nmatCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_mat = '''\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.material_type'''.format(TENANT)\nmatCursor.execute(select_all_mat)\nmaterialTypes = matCursor.fetchall()\nfor m in materialTypes:\n materialTypeLookup[m['id']] = m['name']\n\n#init a list of locations \nlocLookup = {}\nlocCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_loc = '''\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.location'''.format(TENANT)\nlocCursor.execute(select_all_loc)\nlocations = locCursor.fetchall()\nfor l in locations:\n locLookup[l['id']] = l['name']\n\n#init a list of call number types\ncallNoTypeLookup = {}\ncallNoTypeCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_call_no_types = '''\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.call_number_type'''.format(TENANT)\ncallNoTypeCursor.execute(select_all_call_no_types)\ncallNoTypes = callNoTypeCursor.fetchall()\nfor c in callNoTypes:\n callNoTypeLookup[c['id']] = c['name']\n\ncursor = folio_db.cursor(name='folio',cursor_factory=psycopg2.extras.DictCursor)\n#THIS COULD BE MODIFIED TO RETREIVE X NUMBER OF RECORDS PER FILE\ncursor.itersize=300000\n#from {}_mod_marc_storage.marc_record'''.format(TENANT)\nselect_ids_sql = '''\nselect\nid, \ninstance_id \nfrom {}_mod_source_record_storage.records_lb where state = {} and (suppress_discovery = False or suppress_discovery is null)'''.format(TENANT,\"'ACTUAL'\")\nprint(\"executing query\")\ncursor.execute(select_ids_sql)\nwhile True:\n\tprint(\"in the while true - fetching...\")\n\trows = cursor.fetchmany(cursor.itersize)\n\tprint(\"fetch is done\")\n\tmarcRecordCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\tif rows:\n\t\tsave_file = timestr + \".\" + str(count) + \".json\"\n\t\twriter = open(save_file,'wt')\n\t\tprint(\"created the file: \" + save_file)\n\t\tcount += 1\n\t\tfor row in rows:\n\t\t\ttry: \n\t\t\t\trowId = row['id'];\n\t\t\t\trowInstanceId = row['instance_id'];\n\t\t\t\tif rowInstanceId == None:\n\t\t\t\t\t\tlogging.error(\"BAD RECORD: INSTANCE ID WAS NULL\" + str(row))\n\t\t\t\t\t\tcontinue\n\t\t\t\tselect_record_sql = '''\n\t\t\t\tselect id, \n\t\t\t\tcontent as marc\n\t\t\t\tfrom {}_mod_source_record_storage.marc_records_lb where \n\t\t\t\tid = '{}' limit 1'''.format(TENANT, rowId)\n\t\t\t\t#print(select_record_sql)\n\t\t\t\tmarcRecordCursor.execute(select_record_sql)\n\t\t\t\tmarcRow = marcRecordCursor.fetchone()\n\t\t\t\tmarcJsonAsString = json.dumps(marcRow['marc'])\n\t\t\t\tmarcString = marcJsonAsString.encode('utf-8').strip()\n\t\t\t\t#print(marcJsonAsString);\n\t\t\t\tfor record in JSONReader(marcJsonAsString):\n\t\t\t\t\t#write MARC JSON to output file\n\t\t\t\t\t#ADD A 998 FOR EACH HOLDING RECORD\n\t\t\t\t\tif record['6xx'] is not None:\n\t\t\t\t\t\tlogging.error(\"BAD RECORD: 6xx\" + str(row))\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif record['4xx'] is not None:\n\t\t\t\t\t\tlogging.error(\"BAD RECORD: 4xx\" + str(row))\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tselect_holding_sql = '''\n\t\t\t\t\tselect id, creation_date, callnumbertypeid, \n\t\t\t\t\tjsonb->>'permanentLocationId' as permanentlocationid, \n\t\t\t\t\tjsonb->'holdingsStatements' as holdingstatements,\n\t\t\t\t\tjsonb->>'callNumber' as callNumber from \n\t\t\t\t\t{}_mod_inventory_storage.holdings_record \n\t\t\t\t\twhere instanceid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)'''.format(TENANT,rowInstanceId)\n\t\t\t\t\t#print(select_holding_sql)\n\t\t\t\t\tmarcRecordCursor.execute(select_holding_sql)\n\t\t\t\t\tholdingRows = marcRecordCursor.fetchall()\n\t\t\t\t\tfor holding in holdingRows:\n\t\t\t\t\t\t#print(holding['callnumber'])\n\t\t\t\t\t\tholdingsStatements = holding['holdingstatements']\n\t\t\t\t\t\trowHoldingsId = holding['id']\n\t\t\t\t\t\tnewField = Field(tag = '998',\n\t\t\t\t\t\t\t\t indicators = [' ',' '],\n\t\t\t\t\t\t\t\t subfields = ['a',holding.get('callnumber',''),\n\t\t\t\t\t\t\t\t\t\t\t'l',locLookup.get(holding.get('permanentlocationid',''),'')])\n\t\t\t\t\t\tfor statement in holdingsStatements:\n\t\t\t\t\t\t\tif statement is not None:\n\t\t\t\t\t\t\t\tnewField.add_subfield('s',statement.get('statement','').replace('Extent of ownership:',''));\n\t\t\t\t\t\trecord.add_field(newField)\n\t\t\t\t\t\t#ADD AN 952 FOR EACH ITEM\n\t\t\t\t\t\tselect_item_sql = '''\n\t\t\t\t\t\tselect id, materialtypeid, \n\t\t\t\t\t\tjsonb->>'effectiveLocationId' as effectivelocationid, \n\t\t\t\t\t\tjsonb->>'barcode' as barcode, \n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'prefix' as prefix,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'typeId' as callnotype,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'callNumber' as callnumber\n\t\t\t\t\t\tfrom {}_mod_inventory_storage.item where \n\t\t\t\t\t\tholdingsrecordid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)'''.format(TENANT,rowHoldingsId)\n\t\t\t\t\t\t#print(select_item_sql)\n\t\t\t\t\t\tmarcRecordCursor.execute(select_item_sql)\n\t\t\t\t\t\titemRows = marcRecordCursor.fetchall()\n\t\t\t\t\t\tfor item in itemRows:\n\t\t\t\t\t\t\tcallNoToUse = item.get('callnumber','na')\n\t\t\t\t\t\t\t#print(callNoToUse)\n\t\t\t\t\t\t\tprefix = item.get('prefix',None)\n\t\t\t\t\t\t\tif (prefix is not None):\n\t\t\t\t\t\t\t\tcallNoToUse = prefix + \" \" + callNoToUse\n\t\t\t\t\t\t\trecord.add_field(\n\t\t\t\t\t\t\t\tField(tag = '952',\n\t\t\t\t\t\t\t\t\tindicators = [' ',' '],\n\t\t\t\t\t\t\t\t\tsubfields = ['m',item.get('barcode',''),\n\t\t\t\t\t\t\t\t\t'j',callNoTypeLookup.get(item.get('callnotype',''),''),\n\t\t\t\t\t\t\t\t\t'd',locLookup.get(item.get('effectivelocationid'),''),\n\t\t\t\t\t\t\t\t\t'i',materialTypeLookup.get(item.get('materialtypeid'),''),\n\t\t\t\t\t\t\t\t\t'e',callNoToUse]))\n\t\t\t\t\t\t\tif (len(record.leader) < 24):\n\t\t\t\t\t\t\t\tlogging.error(\"BAD LEADER\" + record.leader + \" \" + str(row))\n\t\t\t\t\t\t\t\trecord.leader = \"{:<24}\".format(record.leader)\n\t\t\t\t\twriter.write(record.as_json())\n\t\t\t\t\twriter.write('\\n')\n\t\t\texcept Exception as e:\n\t\t\t\t\tprint(\"ERROR PROCESSING ROW:\" + str(row))\n\t\t\t\t\tprint(e)\n\t\t\t\t\tif rowInstanceId == None:\n\t\t\t\t\t\trowInstanceId = \"None\" #FOR LOGGING\n\t\t\t\t\tlogging.error(\"UNABLE TO WRITE TO FILE: \" + rowInstanceId)\n\t\t\t\t\tlogging.error(e)\n\t\t\t\t\tcontinue\n\t\twriter.close()\n\telse:\n\t\tprint(\"in the else --> finishing\")\n\t\tbreak\n\nif (folio_db):\n\tcursor.close()\n\tmarcRecordCursor.close()\n\tfolio_db.close()\n\tprint(\"complete\")\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from django.urls import reverse from rest_framework import status from rest_framework.test import APITestCase from django.contrib.auth.models import User, Group class UserTests(APITestCase): def test_user_list(self): # must be rejected without validation response = self.client.get('/api/users/', {}, format='json') self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) # must be success user = User.objects.create(username='user', email='[email protected]', password='user123', is_staff=True) self.client.force_authenticate(user=user) response = self.client.get('/api/users/', {}, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) actual = response.data['results'][0] self.assertEqual(actual['username'], user.username) self.assertEqual(actual['email'], user.email)
normal
{ "blob_id": "ca7b0553e55e1c5e6cd23139a158101e72456a50", "index": 8844, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass UserTests(APITestCase):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass UserTests(APITestCase):\n\n def test_user_list(self):\n response = self.client.get('/api/users/', {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n user = User.objects.create(username='user', email=\n '[email protected]', password='user123', is_staff=True)\n self.client.force_authenticate(user=user)\n response = self.client.get('/api/users/', {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 1)\n actual = response.data['results'][0]\n self.assertEqual(actual['username'], user.username)\n self.assertEqual(actual['email'], user.email)\n", "step-4": "from django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom django.contrib.auth.models import User, Group\n\n\nclass UserTests(APITestCase):\n\n def test_user_list(self):\n response = self.client.get('/api/users/', {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n user = User.objects.create(username='user', email=\n '[email protected]', password='user123', is_staff=True)\n self.client.force_authenticate(user=user)\n response = self.client.get('/api/users/', {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 1)\n actual = response.data['results'][0]\n self.assertEqual(actual['username'], user.username)\n self.assertEqual(actual['email'], user.email)\n", "step-5": "from django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom django.contrib.auth.models import User, Group\n\nclass UserTests(APITestCase): \n \n def test_user_list(self):\n # must be rejected without validation\n response = self.client.get('/api/users/', {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # must be success\n user = User.objects.create(username='user', email='[email protected]', password='user123', is_staff=True)\n self.client.force_authenticate(user=user)\n response = self.client.get('/api/users/', {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 1)\n actual = response.data['results'][0]\n self.assertEqual(actual['username'], user.username)\n self.assertEqual(actual['email'], user.email)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from xai.brain.wordbase.verbs._essay import _ESSAY #calss header class _ESSAYED(_ESSAY, ): def __init__(self,): _ESSAY.__init__(self) self.name = "ESSAYED" self.specie = 'verbs' self.basic = "essay" self.jsondata = {}
normal
{ "blob_id": "dc2cbbaca3c35f76ac09c93a2e8ad13eb0bdfce6", "index": 4086, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass _ESSAYED(_ESSAY):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass _ESSAYED(_ESSAY):\n\n def __init__(self):\n _ESSAY.__init__(self)\n self.name = 'ESSAYED'\n self.specie = 'verbs'\n self.basic = 'essay'\n self.jsondata = {}\n", "step-4": "from xai.brain.wordbase.verbs._essay import _ESSAY\n\n\nclass _ESSAYED(_ESSAY):\n\n def __init__(self):\n _ESSAY.__init__(self)\n self.name = 'ESSAYED'\n self.specie = 'verbs'\n self.basic = 'essay'\n self.jsondata = {}\n", "step-5": "\n\nfrom xai.brain.wordbase.verbs._essay import _ESSAY\n\n#calss header\nclass _ESSAYED(_ESSAY, ):\n\tdef __init__(self,): \n\t\t_ESSAY.__init__(self)\n\t\tself.name = \"ESSAYED\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"essay\"\n\t\tself.jsondata = {}\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import os import sys try: from setuptools import setup, find_packages except ImportError: from distutils.core import setup, find_packages setup( name='stripe-requests', version='1.9.1-dev', description='Stripe python bindings using requests', author='Allan Lei', author_email='[email protected]', url='https://github.com/allanlei/stripe-requests', license=open('LICENSE').read(), packages=find_packages(), package_data={'stripe': ['data/ca-certificates.crt']}, install_requires=[ 'requests >= 1.2.0, < 1.3.0', ], test_suite='stripe.tests', classifiers=( 'Intended Audience :: Developers', 'Natural Language :: English', 'Programming Language :: Python', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: Implementation :: PyPy', ), )
normal
{ "blob_id": "a6ee2be7bed59b419fa66fd6cfe4b5fff3fac260", "index": 2596, "step-1": "<mask token>\n", "step-2": "<mask token>\ntry:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup, find_packages\nsetup(name='stripe-requests', version='1.9.1-dev', description=\n 'Stripe python bindings using requests', author='Allan Lei',\n author_email='[email protected]', url=\n 'https://github.com/allanlei/stripe-requests', license=open('LICENSE').\n read(), packages=find_packages(), package_data={'stripe': [\n 'data/ca-certificates.crt']}, install_requires=[\n 'requests >= 1.2.0, < 1.3.0'], test_suite='stripe.tests', classifiers=(\n 'Intended Audience :: Developers', 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.1',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: Implementation :: PyPy'))\n", "step-3": "import os\nimport sys\ntry:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup, find_packages\nsetup(name='stripe-requests', version='1.9.1-dev', description=\n 'Stripe python bindings using requests', author='Allan Lei',\n author_email='[email protected]', url=\n 'https://github.com/allanlei/stripe-requests', license=open('LICENSE').\n read(), packages=find_packages(), package_data={'stripe': [\n 'data/ca-certificates.crt']}, install_requires=[\n 'requests >= 1.2.0, < 1.3.0'], test_suite='stripe.tests', classifiers=(\n 'Intended Audience :: Developers', 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.1',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: Implementation :: PyPy'))\n", "step-4": "import os\nimport sys\n\ntry:\n from setuptools import setup, find_packages\nexcept ImportError:\n from distutils.core import setup, find_packages\n\n\nsetup(\n name='stripe-requests',\n version='1.9.1-dev',\n description='Stripe python bindings using requests',\n author='Allan Lei',\n author_email='[email protected]',\n url='https://github.com/allanlei/stripe-requests',\n license=open('LICENSE').read(),\n packages=find_packages(),\n package_data={'stripe': ['data/ca-certificates.crt']},\n install_requires=[\n 'requests >= 1.2.0, < 1.3.0',\n ],\n test_suite='stripe.tests',\n classifiers=(\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.1',\n 'Programming Language :: Python :: 3.2',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ),\n)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Sep 18 13:36:13 2019 @author: gennachiaro """ import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns; sns.set() import pyrolite.plot from pyrolite.plot.spider import spider #read in data df = pd.read_csv('/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv', index_col=0) #set values MG = df.loc[['ORA-2A-001','ORA-2A-005','ORA-2A-018','ORA-2A-031','ORA-2A-032','ORA-2A-035','ORA-2A-040']] VCCR = df.loc [['ORA-5B-402','ORA-5B-404A','ORA-5B-404B','ORA-5B-405','ORA-5B-406','ORA-5B-407','ORA-5B-408-SITE2','ORA-5B-408-SITE7','ORA-5B-408-SITE8','ORA-5B-409','ORA-5B-411','ORA-5B-412A-CG','ORA-5B-412B-CG','ORA-5B-413','ORA-5B-414-CG','ORA-5B-415','ORA-5B-416','ORA-5B-417']] FG = df.loc [['ORA-5B-410','ORA-5B-412A-FG','ORA-5B-412B-FG','ORA-5B-414-FG']] FGCP = df.loc[['ORA-2A-002_Type1','ORA-2A-002_Type2','ORA-2A-002','ORA-2A-003','ORA-2A-016_Type1','ORA-2A-016-Type2','ORA-2A-016-Type3','ORA-2A-016-Type4','ORA-2A-023','ORA-2A-024','MINGLED1-ORA-2A-024','MINGLED2-ORA-2A-024','MINGLED3-ORA-2A-024']] #plot diagrams MG.pyroplot.spider(color="green",alpha = 0.5, mode = "fill") VCCR.pyroplot.spider(color="red",alpha = 0.5, mode = "fill") FG.pyroplot.spider(color="purple",alpha = 0.5, mode = "fill") FGCP.pyroplot.spider(color="blue",alpha = 0.5, mode = "fill") #set background sns.set_style("darkgrid") #plot graph plt.show()
normal
{ "blob_id": "f6fee18898636ad6b0dc6d96d28dead4e09b8035", "index": 1650, "step-1": "<mask token>\n", "step-2": "<mask token>\nsns.set()\n<mask token>\nMG.pyroplot.spider(color='green', alpha=0.5, mode='fill')\nVCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')\nFG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')\nFGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')\nsns.set_style('darkgrid')\nplt.show()\n", "step-3": "<mask token>\nsns.set()\n<mask token>\ndf = pd.read_csv(\n '/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv'\n , index_col=0)\nMG = df.loc[['ORA-2A-001', 'ORA-2A-005', 'ORA-2A-018', 'ORA-2A-031',\n 'ORA-2A-032', 'ORA-2A-035', 'ORA-2A-040']]\nVCCR = df.loc[['ORA-5B-402', 'ORA-5B-404A', 'ORA-5B-404B', 'ORA-5B-405',\n 'ORA-5B-406', 'ORA-5B-407', 'ORA-5B-408-SITE2', 'ORA-5B-408-SITE7',\n 'ORA-5B-408-SITE8', 'ORA-5B-409', 'ORA-5B-411', 'ORA-5B-412A-CG',\n 'ORA-5B-412B-CG', 'ORA-5B-413', 'ORA-5B-414-CG', 'ORA-5B-415',\n 'ORA-5B-416', 'ORA-5B-417']]\nFG = df.loc[['ORA-5B-410', 'ORA-5B-412A-FG', 'ORA-5B-412B-FG', 'ORA-5B-414-FG']\n ]\nFGCP = df.loc[['ORA-2A-002_Type1', 'ORA-2A-002_Type2', 'ORA-2A-002',\n 'ORA-2A-003', 'ORA-2A-016_Type1', 'ORA-2A-016-Type2',\n 'ORA-2A-016-Type3', 'ORA-2A-016-Type4', 'ORA-2A-023', 'ORA-2A-024',\n 'MINGLED1-ORA-2A-024', 'MINGLED2-ORA-2A-024', 'MINGLED3-ORA-2A-024']]\nMG.pyroplot.spider(color='green', alpha=0.5, mode='fill')\nVCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')\nFG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')\nFGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')\nsns.set_style('darkgrid')\nplt.show()\n", "step-4": "<mask token>\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\nimport pyrolite.plot\nfrom pyrolite.plot.spider import spider\ndf = pd.read_csv(\n '/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv'\n , index_col=0)\nMG = df.loc[['ORA-2A-001', 'ORA-2A-005', 'ORA-2A-018', 'ORA-2A-031',\n 'ORA-2A-032', 'ORA-2A-035', 'ORA-2A-040']]\nVCCR = df.loc[['ORA-5B-402', 'ORA-5B-404A', 'ORA-5B-404B', 'ORA-5B-405',\n 'ORA-5B-406', 'ORA-5B-407', 'ORA-5B-408-SITE2', 'ORA-5B-408-SITE7',\n 'ORA-5B-408-SITE8', 'ORA-5B-409', 'ORA-5B-411', 'ORA-5B-412A-CG',\n 'ORA-5B-412B-CG', 'ORA-5B-413', 'ORA-5B-414-CG', 'ORA-5B-415',\n 'ORA-5B-416', 'ORA-5B-417']]\nFG = df.loc[['ORA-5B-410', 'ORA-5B-412A-FG', 'ORA-5B-412B-FG', 'ORA-5B-414-FG']\n ]\nFGCP = df.loc[['ORA-2A-002_Type1', 'ORA-2A-002_Type2', 'ORA-2A-002',\n 'ORA-2A-003', 'ORA-2A-016_Type1', 'ORA-2A-016-Type2',\n 'ORA-2A-016-Type3', 'ORA-2A-016-Type4', 'ORA-2A-023', 'ORA-2A-024',\n 'MINGLED1-ORA-2A-024', 'MINGLED2-ORA-2A-024', 'MINGLED3-ORA-2A-024']]\nMG.pyroplot.spider(color='green', alpha=0.5, mode='fill')\nVCCR.pyroplot.spider(color='red', alpha=0.5, mode='fill')\nFG.pyroplot.spider(color='purple', alpha=0.5, mode='fill')\nFGCP.pyroplot.spider(color='blue', alpha=0.5, mode='fill')\nsns.set_style('darkgrid')\nplt.show()\n", "step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 18 13:36:13 2019\n\n@author: gennachiaro\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\nimport pyrolite.plot\nfrom pyrolite.plot.spider import spider\n\n#read in data\ndf = pd.read_csv('/users/gennachiaro/Documents/Vanderbilt/Research/Ora Caldera/Trace Elements/Rare Earth Elements/REE_Mean_Normalized.csv', index_col=0)\n\n#set values\nMG = df.loc[['ORA-2A-001','ORA-2A-005','ORA-2A-018','ORA-2A-031','ORA-2A-032','ORA-2A-035','ORA-2A-040']]\nVCCR = df.loc [['ORA-5B-402','ORA-5B-404A','ORA-5B-404B','ORA-5B-405','ORA-5B-406','ORA-5B-407','ORA-5B-408-SITE2','ORA-5B-408-SITE7','ORA-5B-408-SITE8','ORA-5B-409','ORA-5B-411','ORA-5B-412A-CG','ORA-5B-412B-CG','ORA-5B-413','ORA-5B-414-CG','ORA-5B-415','ORA-5B-416','ORA-5B-417']]\nFG = df.loc [['ORA-5B-410','ORA-5B-412A-FG','ORA-5B-412B-FG','ORA-5B-414-FG']]\nFGCP = df.loc[['ORA-2A-002_Type1','ORA-2A-002_Type2','ORA-2A-002','ORA-2A-003','ORA-2A-016_Type1','ORA-2A-016-Type2','ORA-2A-016-Type3','ORA-2A-016-Type4','ORA-2A-023','ORA-2A-024','MINGLED1-ORA-2A-024','MINGLED2-ORA-2A-024','MINGLED3-ORA-2A-024']]\n\n#plot diagrams\nMG.pyroplot.spider(color=\"green\",alpha = 0.5, mode = \"fill\")\n\nVCCR.pyroplot.spider(color=\"red\",alpha = 0.5, mode = \"fill\")\n\nFG.pyroplot.spider(color=\"purple\",alpha = 0.5, mode = \"fill\")\n\nFGCP.pyroplot.spider(color=\"blue\",alpha = 0.5, mode = \"fill\")\n\n\n#set background\nsns.set_style(\"darkgrid\")\n\n\n#plot graph\nplt.show()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> default_app_config = 'reman.apps.RemanConfig'
flexible
{ "blob_id": "0b0b928aef9a4e9953b02639bf5e7769cc4389d7", "index": 2488, "step-1": "<mask token>\n", "step-2": "default_app_config = 'reman.apps.RemanConfig'\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
# -*- coding: utf-8 -*- class Solution: """ @param head: The first node of the linked list. @return: The node where the cycle begins. if there is no cycle, return null """ def detectCycle(self, head): # write your code here # 先确定是否有环,然后确定环的大小,再遍历确定位置。 cycle_len = -1 one_node, two_node = head, head while two_node: for i in xrange(2): if two_node: two_node = two_node.next if two_node == one_node: cycle_len = 1 two_node = one_node.next while two_node != one_node: # 算出环的长度 cycle_len += 1 two_node = two_node.next break else: break one_node = one_node.next if (not two_node) or (cycle_len != -1): break if cycle_len == -1: return None one_node, two_node = head, head # two_node先前进的距离等于环的长度 i = 0 while i < cycle_len: two_node = two_node.next i += 1 while one_node != two_node: one_node = one_node.next two_node = two_node.next return one_node
normal
{ "blob_id": "3319614d154b16190f3cd8f4f65c3b0e0da277e9", "index": 9751, "step-1": "<mask token>\n", "step-2": "class Solution:\n <mask token>\n <mask token>\n", "step-3": "class Solution:\n <mask token>\n\n def detectCycle(self, head):\n cycle_len = -1\n one_node, two_node = head, head\n while two_node:\n for i in xrange(2):\n if two_node:\n two_node = two_node.next\n if two_node == one_node:\n cycle_len = 1\n two_node = one_node.next\n while two_node != one_node:\n cycle_len += 1\n two_node = two_node.next\n break\n else:\n break\n one_node = one_node.next\n if not two_node or cycle_len != -1:\n break\n if cycle_len == -1:\n return None\n one_node, two_node = head, head\n i = 0\n while i < cycle_len:\n two_node = two_node.next\n i += 1\n while one_node != two_node:\n one_node = one_node.next\n two_node = two_node.next\n return one_node\n", "step-4": "class Solution:\n \"\"\"\n @param head: The first node of the linked list.\n @return: The node where the cycle begins. \n if there is no cycle, return null\n \"\"\"\n\n def detectCycle(self, head):\n cycle_len = -1\n one_node, two_node = head, head\n while two_node:\n for i in xrange(2):\n if two_node:\n two_node = two_node.next\n if two_node == one_node:\n cycle_len = 1\n two_node = one_node.next\n while two_node != one_node:\n cycle_len += 1\n two_node = two_node.next\n break\n else:\n break\n one_node = one_node.next\n if not two_node or cycle_len != -1:\n break\n if cycle_len == -1:\n return None\n one_node, two_node = head, head\n i = 0\n while i < cycle_len:\n two_node = two_node.next\n i += 1\n while one_node != two_node:\n one_node = one_node.next\n two_node = two_node.next\n return one_node\n", "step-5": "# -*- coding: utf-8 -*-\n\nclass Solution:\n \"\"\"\n @param head: The first node of the linked list.\n @return: The node where the cycle begins. \n if there is no cycle, return null\n \"\"\"\n def detectCycle(self, head):\n # write your code here\n # 先确定是否有环,然后确定环的大小,再遍历确定位置。\n cycle_len = -1\n one_node, two_node = head, head\n while two_node:\n for i in xrange(2):\n if two_node:\n two_node = two_node.next\n if two_node == one_node:\n cycle_len = 1\n two_node = one_node.next\n while two_node != one_node: # 算出环的长度\n cycle_len += 1\n two_node = two_node.next\n break\n else:\n break\n one_node = one_node.next\n if (not two_node) or (cycle_len != -1):\n break\n if cycle_len == -1:\n return None\n one_node, two_node = head, head # two_node先前进的距离等于环的长度\n i = 0\n while i < cycle_len:\n two_node = two_node.next\n i += 1\n while one_node != two_node:\n one_node = one_node.next\n two_node = two_node.next\n return one_node", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> urlpatterns = [path('country', Country_Data, name='country_data'), path( 'tours', Scrape_Data, name='scrape_data'), path('draws', Draw_Data, name='Draw_data')] <|reserved_special_token_1|> from django.urls import path from .views import * urlpatterns = [path('country', Country_Data, name='country_data'), path( 'tours', Scrape_Data, name='scrape_data'), path('draws', Draw_Data, name='Draw_data')]
flexible
{ "blob_id": "b39c783cbaff2915c8864ce0b081b5bf052baee5", "index": 6731, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [path('country', Country_Data, name='country_data'), path(\n 'tours', Scrape_Data, name='scrape_data'), path('draws', Draw_Data,\n name='Draw_data')]\n", "step-3": "from django.urls import path\nfrom .views import *\nurlpatterns = [path('country', Country_Data, name='country_data'), path(\n 'tours', Scrape_Data, name='scrape_data'), path('draws', Draw_Data,\n name='Draw_data')]\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import cv2 img = cv2.imread('imgs/1.png') pixel = img[100, 100] img[100, 100] = [57, 63, 99] # 设置像素值 b = img[100, 100, 0] # 57, 获取(100, 100)处, blue通道像素值 g = img[100, 100, 1] # 63 r = img[100, 100, 2] # 68 r = img[100, 100, 2] = 99 # 设置red通道 # 获取和设置 piexl = img.item(100, 100, 2) img.itemset((100, 100, 2), 99)
normal
{ "blob_id": "d13f06afeac938fc2cf4d3506b3f68c6de9de210", "index": 6596, "step-1": "<mask token>\n", "step-2": "<mask token>\nimg.itemset((100, 100, 2), 99)\n", "step-3": "<mask token>\nimg = cv2.imread('imgs/1.png')\npixel = img[100, 100]\nimg[100, 100] = [57, 63, 99]\nb = img[100, 100, 0]\ng = img[100, 100, 1]\nr = img[100, 100, 2]\nr = img[100, 100, 2] = 99\npiexl = img.item(100, 100, 2)\nimg.itemset((100, 100, 2), 99)\n", "step-4": "import cv2\nimg = cv2.imread('imgs/1.png')\npixel = img[100, 100]\nimg[100, 100] = [57, 63, 99]\nb = img[100, 100, 0]\ng = img[100, 100, 1]\nr = img[100, 100, 2]\nr = img[100, 100, 2] = 99\npiexl = img.item(100, 100, 2)\nimg.itemset((100, 100, 2), 99)\n", "step-5": "import cv2\nimg = cv2.imread('imgs/1.png')\npixel = img[100, 100]\nimg[100, 100] = [57, 63, 99] # 设置像素值\nb = img[100, 100, 0] # 57, 获取(100, 100)处, blue通道像素值\ng = img[100, 100, 1] # 63\nr = img[100, 100, 2] # 68\nr = img[100, 100, 2] = 99 # 设置red通道\n# 获取和设置\npiexl = img.item(100, 100, 2)\nimg.itemset((100, 100, 2), 99)\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
""" Estructuras que extraen valores de una función y se almacenan en objetos iterables (que se pueden recorrer Son mas eficientes que las funciones tradicionales muy útiles con listas de valores infinitos Bajos determinados escenarios, será muy útil que un generador devuelva los valores de uno en uno Un generador usar la palabra reservada yield """ #haremos que nos genere una función de números pares def generarPares(limite): num=1 milista=[] while num<limite: milista.append(num*2) num+=1 return milista print(generarPares(10)) def generarPares2(limite): num = 1 while num < limite: yield num*2 num += 1 devuelvePares=generarPares2(10) for i in devuelvePares: print(i) #Ahora con la instruccion yield from #Simplifica el código del generador en caso tengamos que usar bucles anidados. #*el asterisoc en python significa que no se sabe cuantos argumentos se incluiran y que estos se entregaran en forma de tupla def devuelveCiudades(*ciudades): for e in ciudades: yield e ciudadesDevueltas=devuelveCiudades("Madrid","Barcelona","Bilbao","Valencia") #next imprime uno a uno print(next(ciudadesDevueltas)) print(next(ciudadesDevueltas)) #si quisieramos acceder a las letras def devuelveCiudades2(*ciudades): for e in ciudades: for subelemento in e: yield subelemento ciudadesDevueltas2=devuelveCiudades2("Madrid","Barcelona","Bilbao","Valencia") print(next(ciudadesDevueltas2)) print(next(ciudadesDevueltas2)) def devuelveCiudades3(*ciudades): for e in ciudades: yield from e #devuelve lo mismo que la funcion 2 ciudadesDevueltas3=devuelveCiudades3("Madrid","Barcelona","Bilbao","Valencia") print(next(ciudadesDevueltas3)) print(next(ciudadesDevueltas3))
normal
{ "blob_id": "29abcfc010453e3a67346ea2df238e07b85502a8", "index": 3107, "step-1": "<mask token>\n\n\ndef generarPares(limite):\n num = 1\n milista = []\n while num < limite:\n milista.append(num * 2)\n num += 1\n return milista\n\n\n<mask token>\n\n\ndef devuelveCiudades2(*ciudades):\n for e in ciudades:\n for subelemento in e:\n yield subelemento\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef generarPares(limite):\n num = 1\n milista = []\n while num < limite:\n milista.append(num * 2)\n num += 1\n return milista\n\n\n<mask token>\n\n\ndef generarPares2(limite):\n num = 1\n while num < limite:\n yield num * 2\n num += 1\n\n\n<mask token>\n\n\ndef devuelveCiudades(*ciudades):\n for e in ciudades:\n yield e\n\n\n<mask token>\n\n\ndef devuelveCiudades2(*ciudades):\n for e in ciudades:\n for subelemento in e:\n yield subelemento\n\n\n<mask token>\n\n\ndef devuelveCiudades3(*ciudades):\n for e in ciudades:\n yield from e\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef generarPares(limite):\n num = 1\n milista = []\n while num < limite:\n milista.append(num * 2)\n num += 1\n return milista\n\n\nprint(generarPares(10))\n\n\ndef generarPares2(limite):\n num = 1\n while num < limite:\n yield num * 2\n num += 1\n\n\n<mask token>\nfor i in devuelvePares:\n print(i)\n\n\ndef devuelveCiudades(*ciudades):\n for e in ciudades:\n yield e\n\n\n<mask token>\nprint(next(ciudadesDevueltas))\nprint(next(ciudadesDevueltas))\n\n\ndef devuelveCiudades2(*ciudades):\n for e in ciudades:\n for subelemento in e:\n yield subelemento\n\n\n<mask token>\nprint(next(ciudadesDevueltas2))\nprint(next(ciudadesDevueltas2))\n\n\ndef devuelveCiudades3(*ciudades):\n for e in ciudades:\n yield from e\n\n\n<mask token>\nprint(next(ciudadesDevueltas3))\nprint(next(ciudadesDevueltas3))\n", "step-4": "<mask token>\n\n\ndef generarPares(limite):\n num = 1\n milista = []\n while num < limite:\n milista.append(num * 2)\n num += 1\n return milista\n\n\nprint(generarPares(10))\n\n\ndef generarPares2(limite):\n num = 1\n while num < limite:\n yield num * 2\n num += 1\n\n\ndevuelvePares = generarPares2(10)\nfor i in devuelvePares:\n print(i)\n\n\ndef devuelveCiudades(*ciudades):\n for e in ciudades:\n yield e\n\n\nciudadesDevueltas = devuelveCiudades('Madrid', 'Barcelona', 'Bilbao',\n 'Valencia')\nprint(next(ciudadesDevueltas))\nprint(next(ciudadesDevueltas))\n\n\ndef devuelveCiudades2(*ciudades):\n for e in ciudades:\n for subelemento in e:\n yield subelemento\n\n\nciudadesDevueltas2 = devuelveCiudades2('Madrid', 'Barcelona', 'Bilbao',\n 'Valencia')\nprint(next(ciudadesDevueltas2))\nprint(next(ciudadesDevueltas2))\n\n\ndef devuelveCiudades3(*ciudades):\n for e in ciudades:\n yield from e\n\n\nciudadesDevueltas3 = devuelveCiudades3('Madrid', 'Barcelona', 'Bilbao',\n 'Valencia')\nprint(next(ciudadesDevueltas3))\nprint(next(ciudadesDevueltas3))\n", "step-5": "\"\"\"\r\nEstructuras que extraen valores de una función y se almacenan en objetos iterables (que se pueden recorrer\r\nSon mas eficientes que las funciones tradicionales\r\nmuy útiles con listas de valores infinitos\r\nBajos determinados escenarios, será muy útil que un generador devuelva los valores de uno en uno\r\nUn generador usar la palabra reservada yield\r\n\r\n\r\n\"\"\"\r\n\r\n#haremos que nos genere una función de números pares\r\n\r\ndef generarPares(limite):\r\n num=1\r\n milista=[]\r\n while num<limite:\r\n milista.append(num*2)\r\n num+=1\r\n return milista\r\n\r\nprint(generarPares(10))\r\n\r\n\r\ndef generarPares2(limite):\r\n num = 1\r\n\r\n while num < limite:\r\n yield num*2\r\n num += 1\r\n\r\ndevuelvePares=generarPares2(10)\r\n\r\nfor i in devuelvePares:\r\n print(i)\r\n\r\n\r\n#Ahora con la instruccion yield from\r\n#Simplifica el código del generador en caso tengamos que usar bucles anidados.\r\n\r\n#*el asterisoc en python significa que no se sabe cuantos argumentos se incluiran y que estos se entregaran en forma de tupla\r\n\r\ndef devuelveCiudades(*ciudades):\r\n for e in ciudades:\r\n yield e\r\n\r\n\r\nciudadesDevueltas=devuelveCiudades(\"Madrid\",\"Barcelona\",\"Bilbao\",\"Valencia\")\r\n #next imprime uno a uno\r\nprint(next(ciudadesDevueltas))\r\nprint(next(ciudadesDevueltas))\r\n\r\n\r\n#si quisieramos acceder a las letras\r\n\r\ndef devuelveCiudades2(*ciudades):\r\n for e in ciudades:\r\n for subelemento in e:\r\n yield subelemento\r\n\r\nciudadesDevueltas2=devuelveCiudades2(\"Madrid\",\"Barcelona\",\"Bilbao\",\"Valencia\")\r\n\r\nprint(next(ciudadesDevueltas2))\r\nprint(next(ciudadesDevueltas2))\r\n\r\n\r\n\r\n\r\ndef devuelveCiudades3(*ciudades):\r\n for e in ciudades:\r\n yield from e #devuelve lo mismo que la funcion 2\r\n\r\nciudadesDevueltas3=devuelveCiudades3(\"Madrid\",\"Barcelona\",\"Bilbao\",\"Valencia\")\r\n\r\nprint(next(ciudadesDevueltas3))\r\nprint(next(ciudadesDevueltas3))", "step-ids": [ 2, 5, 6, 7, 8 ] }
[ 2, 5, 6, 7, 8 ]
""" - Define a new class Student which is derived from Human and has: grade field. do_hobby - print 'dancing' or some another hobby """ import andy.Lesson_7.exercise_1 class Student(andy.Lesson_7.exercise_1.Human): def __init__(self, firstname, lastname, grade): super().__init__(firstname, lastname) self.grade = grade def do_hobby(self): return self.full_name + " ebet Petra Kovarskogo" a = Student("Artem", "Nizhnik", "Shkolnik") print(a.do_hobby()) print(a.grade)
normal
{ "blob_id": "497f56891670f635feff983058e86055e54be493", "index": 2618, "step-1": "<mask token>\n\n\nclass Student(andy.Lesson_7.exercise_1.Human):\n\n def __init__(self, firstname, lastname, grade):\n super().__init__(firstname, lastname)\n self.grade = grade\n\n def do_hobby(self):\n return self.full_name + ' ebet Petra Kovarskogo'\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Student(andy.Lesson_7.exercise_1.Human):\n\n def __init__(self, firstname, lastname, grade):\n super().__init__(firstname, lastname)\n self.grade = grade\n\n def do_hobby(self):\n return self.full_name + ' ebet Petra Kovarskogo'\n\n\n<mask token>\nprint(a.do_hobby())\nprint(a.grade)\n", "step-3": "<mask token>\n\n\nclass Student(andy.Lesson_7.exercise_1.Human):\n\n def __init__(self, firstname, lastname, grade):\n super().__init__(firstname, lastname)\n self.grade = grade\n\n def do_hobby(self):\n return self.full_name + ' ebet Petra Kovarskogo'\n\n\na = Student('Artem', 'Nizhnik', 'Shkolnik')\nprint(a.do_hobby())\nprint(a.grade)\n", "step-4": "<mask token>\nimport andy.Lesson_7.exercise_1\n\n\nclass Student(andy.Lesson_7.exercise_1.Human):\n\n def __init__(self, firstname, lastname, grade):\n super().__init__(firstname, lastname)\n self.grade = grade\n\n def do_hobby(self):\n return self.full_name + ' ebet Petra Kovarskogo'\n\n\na = Student('Artem', 'Nizhnik', 'Shkolnik')\nprint(a.do_hobby())\nprint(a.grade)\n", "step-5": "\"\"\"\n- Define a new class Student which is derived from Human and has:\n grade field.\n do_hobby - print 'dancing' or some another hobby\n\"\"\"\nimport andy.Lesson_7.exercise_1\n\n\nclass Student(andy.Lesson_7.exercise_1.Human):\n\n def __init__(self, firstname, lastname, grade):\n super().__init__(firstname, lastname)\n self.grade = grade\n\n def do_hobby(self):\n return self.full_name + \" ebet Petra Kovarskogo\"\n\n\na = Student(\"Artem\", \"Nizhnik\", \"Shkolnik\")\nprint(a.do_hobby())\nprint(a.grade)\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html import scrapy class CnnArticleItem(scrapy.Item): title = scrapy.Field() developments = scrapy.Field() body = scrapy.Field() date = scrapy.Field() class GoogleArticleItem(scrapy.Item): title = scrapy.Field() date = scrapy.Field() snippet = scrapy.Field() source = scrapy.Field()
normal
{ "blob_id": "cf0eb9685cdfc412871d3b36270ddab3e520bb8f", "index": 104, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass CnnArticleItem(scrapy.Item):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass GoogleArticleItem(scrapy.Item):\n title = scrapy.Field()\n date = scrapy.Field()\n snippet = scrapy.Field()\n source = scrapy.Field()\n", "step-3": "<mask token>\n\n\nclass CnnArticleItem(scrapy.Item):\n title = scrapy.Field()\n developments = scrapy.Field()\n body = scrapy.Field()\n date = scrapy.Field()\n\n\nclass GoogleArticleItem(scrapy.Item):\n title = scrapy.Field()\n date = scrapy.Field()\n snippet = scrapy.Field()\n source = scrapy.Field()\n", "step-4": "import scrapy\n\n\nclass CnnArticleItem(scrapy.Item):\n title = scrapy.Field()\n developments = scrapy.Field()\n body = scrapy.Field()\n date = scrapy.Field()\n\n\nclass GoogleArticleItem(scrapy.Item):\n title = scrapy.Field()\n date = scrapy.Field()\n snippet = scrapy.Field()\n source = scrapy.Field()\n", "step-5": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\nclass CnnArticleItem(scrapy.Item):\n title = scrapy.Field()\n developments = scrapy.Field()\n body = scrapy.Field()\n date = scrapy.Field()\n\nclass GoogleArticleItem(scrapy.Item):\n title = scrapy.Field()\n date = scrapy.Field()\n snippet = scrapy.Field()\n source = scrapy.Field()", "step-ids": [ 0, 3, 4, 5, 6 ] }
[ 0, 3, 4, 5, 6 ]
<|reserved_special_token_0|> class Game(models.Model): gameName = models.CharField(max_length=100) genre = models.ForeignKey(GameGenre) def __str__(self): return '%s, %s' % (self.gameName, self.genre) class Players(models.Model): playerName = models.CharField(max_length=100) games = models.ManyToManyField(Game) def __str__(self): return '%s' % self.playerName <|reserved_special_token_1|> <|reserved_special_token_0|> class GameGenre(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> def __str__(self): return '%s' % self.genreName class Game(models.Model): gameName = models.CharField(max_length=100) genre = models.ForeignKey(GameGenre) def __str__(self): return '%s, %s' % (self.gameName, self.genre) class Players(models.Model): playerName = models.CharField(max_length=100) games = models.ManyToManyField(Game) def __str__(self): return '%s' % self.playerName <|reserved_special_token_1|> <|reserved_special_token_0|> class GameGenre(models.Model): genreName = models.CharField(max_length=100) genreDescription = models.CharField(max_length=300) def __str__(self): return '%s' % self.genreName class Game(models.Model): gameName = models.CharField(max_length=100) genre = models.ForeignKey(GameGenre) def __str__(self): return '%s, %s' % (self.gameName, self.genre) class Players(models.Model): playerName = models.CharField(max_length=100) games = models.ManyToManyField(Game) def __str__(self): return '%s' % self.playerName <|reserved_special_token_1|> from django.db import models class GameGenre(models.Model): genreName = models.CharField(max_length=100) genreDescription = models.CharField(max_length=300) def __str__(self): return '%s' % self.genreName class Game(models.Model): gameName = models.CharField(max_length=100) genre = models.ForeignKey(GameGenre) def __str__(self): return '%s, %s' % (self.gameName, self.genre) class Players(models.Model): playerName = models.CharField(max_length=100) games = models.ManyToManyField(Game) def __str__(self): return '%s' % self.playerName <|reserved_special_token_1|> from django.db import models # Create your models here. class GameGenre(models.Model): genreName = models.CharField(max_length=100) genreDescription = models.CharField(max_length=300) def __str__(self): return "%s" % (self.genreName) class Game(models.Model): gameName = models.CharField(max_length=100) genre = models.ForeignKey(GameGenre) def __str__(self): return "%s, %s" % (self.gameName, self.genre) class Players(models.Model): playerName = models.CharField(max_length=100) games = models.ManyToManyField(Game) def __str__(self): return "%s" % (self.playerName)
flexible
{ "blob_id": "092242cdb231e09ccf3dd4dccfb6d786c3e4aad2", "index": 8036, "step-1": "<mask token>\n\n\nclass Game(models.Model):\n gameName = models.CharField(max_length=100)\n genre = models.ForeignKey(GameGenre)\n\n def __str__(self):\n return '%s, %s' % (self.gameName, self.genre)\n\n\nclass Players(models.Model):\n playerName = models.CharField(max_length=100)\n games = models.ManyToManyField(Game)\n\n def __str__(self):\n return '%s' % self.playerName\n", "step-2": "<mask token>\n\n\nclass GameGenre(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return '%s' % self.genreName\n\n\nclass Game(models.Model):\n gameName = models.CharField(max_length=100)\n genre = models.ForeignKey(GameGenre)\n\n def __str__(self):\n return '%s, %s' % (self.gameName, self.genre)\n\n\nclass Players(models.Model):\n playerName = models.CharField(max_length=100)\n games = models.ManyToManyField(Game)\n\n def __str__(self):\n return '%s' % self.playerName\n", "step-3": "<mask token>\n\n\nclass GameGenre(models.Model):\n genreName = models.CharField(max_length=100)\n genreDescription = models.CharField(max_length=300)\n\n def __str__(self):\n return '%s' % self.genreName\n\n\nclass Game(models.Model):\n gameName = models.CharField(max_length=100)\n genre = models.ForeignKey(GameGenre)\n\n def __str__(self):\n return '%s, %s' % (self.gameName, self.genre)\n\n\nclass Players(models.Model):\n playerName = models.CharField(max_length=100)\n games = models.ManyToManyField(Game)\n\n def __str__(self):\n return '%s' % self.playerName\n", "step-4": "from django.db import models\n\n\nclass GameGenre(models.Model):\n genreName = models.CharField(max_length=100)\n genreDescription = models.CharField(max_length=300)\n\n def __str__(self):\n return '%s' % self.genreName\n\n\nclass Game(models.Model):\n gameName = models.CharField(max_length=100)\n genre = models.ForeignKey(GameGenre)\n\n def __str__(self):\n return '%s, %s' % (self.gameName, self.genre)\n\n\nclass Players(models.Model):\n playerName = models.CharField(max_length=100)\n games = models.ManyToManyField(Game)\n\n def __str__(self):\n return '%s' % self.playerName\n", "step-5": "from django.db import models\n\n# Create your models here.\n\n\nclass GameGenre(models.Model):\n\n genreName = models.CharField(max_length=100)\n genreDescription = models.CharField(max_length=300)\n\n def __str__(self):\n return \"%s\" % (self.genreName)\n\n\nclass Game(models.Model):\n\n gameName = models.CharField(max_length=100)\n genre = models.ForeignKey(GameGenre)\n\n def __str__(self):\n return \"%s, %s\" % (self.gameName, self.genre)\n\n\nclass Players(models.Model):\n\n playerName = models.CharField(max_length=100)\n games = models.ManyToManyField(Game)\n\n def __str__(self):\n return \"%s\" % (self.playerName)\n", "step-ids": [ 6, 8, 9, 10, 11 ] }
[ 6, 8, 9, 10, 11 ]
from django import forms from .models import Profile class ImageForm(forms.ModelForm): userimage = forms.ImageField(required=False, error_messages={'invalid':("Image file only")}, widget=forms.FileInput) class Meta: model = Profile fields = ['userimage',]
normal
{ "blob_id": "9081d0f75ac53ab8d0bafb39cd46a2fec8a5135f", "index": 3813, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass ImageForm(forms.ModelForm):\n <mask token>\n\n\n class Meta:\n model = Profile\n fields = ['userimage']\n", "step-3": "<mask token>\n\n\nclass ImageForm(forms.ModelForm):\n userimage = forms.ImageField(required=False, error_messages={'invalid':\n 'Image file only'}, widget=forms.FileInput)\n\n\n class Meta:\n model = Profile\n fields = ['userimage']\n", "step-4": "from django import forms\nfrom .models import Profile\n\n\nclass ImageForm(forms.ModelForm):\n userimage = forms.ImageField(required=False, error_messages={'invalid':\n 'Image file only'}, widget=forms.FileInput)\n\n\n class Meta:\n model = Profile\n fields = ['userimage']\n", "step-5": "from django import forms\nfrom .models import Profile\n\n\n\n\n \nclass ImageForm(forms.ModelForm):\n userimage = forms.ImageField(required=False, error_messages={'invalid':(\"Image file only\")}, widget=forms.FileInput)\n class Meta:\n model = Profile\n fields = ['userimage',]\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# program name: an2_colour.py # no optional arguments: Uses Wine data to display information about the relationship of # various attributes with colour and hue print('========================================================================================') print('========================================================================================') print('> start of program an2_colour.py') print('> import libraries') import argparse import os.path as op import csv import matplotlib.pyplot as plt import pandas as pd import numpy as np from numpy.polynomial.polynomial import polyfit print('> define convert_type function') def convert_type(data_value): try: return int(data_value) except ValueError: try: return float(data_value) except ValueError: return data_value print("> define get_delim function") def get_delim(sourcefile1): print('> executing get_delim function') data = open(sourcefile1, 'r') my_read_data = data.read() if my_read_data.find(',') > 0: print(' delimiter: comma') return ',' else: print(' delimiter: space') return ' ' print(' ') def lines_to_dict(lines, header=False): print('> executing lines_to_dict') # column_titles = ['Class','Alcohol','Malic acid','Ash','Alcalinity of ash','Magnesium','Total phenols','Flavanoids','Nonflavanoid phenols','Proanthocyanins','Color intensity','Hue', # 'OD280/OD315 of diluted wines','Proline'] column_titles = ['class','alc','ma','ash','alkash','mg','tphen','flav','nfphen','pac','ci','hue', 'od','proline'] data_dict = {} for idx, column in enumerate(column_titles): data_dict[column] = [] for row in lines: data_dict[column] += [row[idx]] return data_dict def parse_file(data_file, dlm, debug=False): # took delimiter out print('> executing parse_file') # Verify the file exists assert(op.isfile(data_file)) # open it as a csv with open(data_file, 'r') as fhandle: csv_reader = csv.reader(fhandle, delimiter=dlm) # Add each line in the file to a list lines = [] if debug: count = 0 for line in csv_reader: if debug: if count > 2: break count += 1 newline = [] for value in line: newline += [convert_type(value)] if len(newline) > 0: lines += [newline] print('> view a few lines') print(' ') for line in lines[0:2]: print(line) print(' ') # Return all the contents of our file return lines # class','alc','ma','ash','alkash','mg','tphen','flav','nfphen','pac','ci','hue', # 'od','proline def plot_data3(dd, col1, label1, col2a, col2b, label2a, label2b, n, debug=False): df = pd.DataFrame.from_dict(dd) x = np.fromiter(dd[col1], dtype=float) # need these for the lines below y1 = np.fromiter(dd[col2a], dtype=float) y2 = np.fromiter(dd[col2b], dtype=float) # print(df) fig, ax1 = plt.subplots() plt.title(label1 + ' by ' + label2a + ' and ' + label2b) clra = 'indigo' ax1.set_xlabel(label1) ax1.set_ylabel(label2a, color=clra) # left side ax1.scatter(df[col1], df[col2a], color=clra, marker = '^') xp = np.linspace(np.amin(x), np.amax(x), 100) #only works for numpy arrays weights = np.polyfit(x, y1, 1) model = np.poly1d(weights) plt.plot(xp, model(xp), '-', c=clra) ax1.tick_params(axis='y', labelcolor=clra) ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis clrb = 'darkgreen' ax2.set_ylabel(label2b, color=clrb) # we already handled the x-label with ax1 # ax2.plot(df[col1], df[col2b], color=color) ax2.scatter(df[col1], df[col2b], color= clrb) ax2.tick_params(axis='y', labelcolor=clrb) xp = np.linspace(np.amin(x), np.amax(x), 100) #only works for numpy arrays weights = np.polyfit(x, y2, 1) model = np.poly1d(weights) plt.plot(xp, model(xp), '-', c=clrb) ax1.tick_params(axis='y', labelcolor=clra) fig.tight_layout() # otherwise the right y-label is slightly clipped plt.savefig('an2_colour' + n + '.png') plt.show() # Cases where there is a possible correlation with colour intensity or hue. # color intensity: # check against : alc, flav, od, proline # hue: # check against: ma, tphen, flav, pac, od def main(): data_file = "wine.data" dlm = get_delim(data_file) my_data = parse_file(data_file, dlm) data_dictionary = lines_to_dict(my_data) #print(data_dictionary) plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav', 'Alcohol', 'Flavonoids', '1') plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od', 'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2') if __name__ == "__main__": main()
normal
{ "blob_id": "594479c22cada665dcdc76737085ce342d7d5faf", "index": 1480, "step-1": "<mask token>\n\n\ndef convert_type(data_value):\n try:\n return int(data_value)\n except ValueError:\n try:\n return float(data_value)\n except ValueError:\n return data_value\n\n\n<mask token>\n\n\ndef get_delim(sourcefile1):\n print('> executing get_delim function')\n data = open(sourcefile1, 'r')\n my_read_data = data.read()\n if my_read_data.find(',') > 0:\n print(' delimiter: comma')\n return ','\n else:\n print(' delimiter: space')\n return ' '\n print(' ')\n\n\ndef lines_to_dict(lines, header=False):\n print('> executing lines_to_dict')\n column_titles = ['class', 'alc', 'ma', 'ash', 'alkash', 'mg', 'tphen',\n 'flav', 'nfphen', 'pac', 'ci', 'hue', 'od', 'proline']\n data_dict = {}\n for idx, column in enumerate(column_titles):\n data_dict[column] = []\n for row in lines:\n data_dict[column] += [row[idx]]\n return data_dict\n\n\n<mask token>\n\n\ndef plot_data3(dd, col1, label1, col2a, col2b, label2a, label2b, n, debug=False\n ):\n df = pd.DataFrame.from_dict(dd)\n x = np.fromiter(dd[col1], dtype=float)\n y1 = np.fromiter(dd[col2a], dtype=float)\n y2 = np.fromiter(dd[col2b], dtype=float)\n fig, ax1 = plt.subplots()\n plt.title(label1 + ' by ' + label2a + ' and ' + label2b)\n clra = 'indigo'\n ax1.set_xlabel(label1)\n ax1.set_ylabel(label2a, color=clra)\n ax1.scatter(df[col1], df[col2a], color=clra, marker='^')\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y1, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clra)\n ax1.tick_params(axis='y', labelcolor=clra)\n ax2 = ax1.twinx()\n clrb = 'darkgreen'\n ax2.set_ylabel(label2b, color=clrb)\n ax2.scatter(df[col1], df[col2b], color=clrb)\n ax2.tick_params(axis='y', labelcolor=clrb)\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y2, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clrb)\n ax1.tick_params(axis='y', labelcolor=clra)\n fig.tight_layout()\n plt.savefig('an2_colour' + n + '.png')\n plt.show()\n\n\ndef main():\n data_file = 'wine.data'\n dlm = get_delim(data_file)\n my_data = parse_file(data_file, dlm)\n data_dictionary = lines_to_dict(my_data)\n plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav',\n 'Alcohol', 'Flavonoids', '1')\n plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od',\n 'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef convert_type(data_value):\n try:\n return int(data_value)\n except ValueError:\n try:\n return float(data_value)\n except ValueError:\n return data_value\n\n\n<mask token>\n\n\ndef get_delim(sourcefile1):\n print('> executing get_delim function')\n data = open(sourcefile1, 'r')\n my_read_data = data.read()\n if my_read_data.find(',') > 0:\n print(' delimiter: comma')\n return ','\n else:\n print(' delimiter: space')\n return ' '\n print(' ')\n\n\ndef lines_to_dict(lines, header=False):\n print('> executing lines_to_dict')\n column_titles = ['class', 'alc', 'ma', 'ash', 'alkash', 'mg', 'tphen',\n 'flav', 'nfphen', 'pac', 'ci', 'hue', 'od', 'proline']\n data_dict = {}\n for idx, column in enumerate(column_titles):\n data_dict[column] = []\n for row in lines:\n data_dict[column] += [row[idx]]\n return data_dict\n\n\ndef parse_file(data_file, dlm, debug=False):\n print('> executing parse_file')\n assert op.isfile(data_file)\n with open(data_file, 'r') as fhandle:\n csv_reader = csv.reader(fhandle, delimiter=dlm)\n lines = []\n if debug:\n count = 0\n for line in csv_reader:\n if debug:\n if count > 2:\n break\n count += 1\n newline = []\n for value in line:\n newline += [convert_type(value)]\n if len(newline) > 0:\n lines += [newline]\n print('> view a few lines')\n print(' ')\n for line in lines[0:2]:\n print(line)\n print(' ')\n return lines\n\n\ndef plot_data3(dd, col1, label1, col2a, col2b, label2a, label2b, n, debug=False\n ):\n df = pd.DataFrame.from_dict(dd)\n x = np.fromiter(dd[col1], dtype=float)\n y1 = np.fromiter(dd[col2a], dtype=float)\n y2 = np.fromiter(dd[col2b], dtype=float)\n fig, ax1 = plt.subplots()\n plt.title(label1 + ' by ' + label2a + ' and ' + label2b)\n clra = 'indigo'\n ax1.set_xlabel(label1)\n ax1.set_ylabel(label2a, color=clra)\n ax1.scatter(df[col1], df[col2a], color=clra, marker='^')\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y1, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clra)\n ax1.tick_params(axis='y', labelcolor=clra)\n ax2 = ax1.twinx()\n clrb = 'darkgreen'\n ax2.set_ylabel(label2b, color=clrb)\n ax2.scatter(df[col1], df[col2b], color=clrb)\n ax2.tick_params(axis='y', labelcolor=clrb)\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y2, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clrb)\n ax1.tick_params(axis='y', labelcolor=clra)\n fig.tight_layout()\n plt.savefig('an2_colour' + n + '.png')\n plt.show()\n\n\ndef main():\n data_file = 'wine.data'\n dlm = get_delim(data_file)\n my_data = parse_file(data_file, dlm)\n data_dictionary = lines_to_dict(my_data)\n plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav',\n 'Alcohol', 'Flavonoids', '1')\n plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od',\n 'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')\n\n\n<mask token>\n", "step-3": "print(\n '========================================================================================'\n )\nprint(\n '========================================================================================'\n )\nprint('> start of program an2_colour.py')\nprint('> import libraries')\n<mask token>\nprint('> define convert_type function')\n\n\ndef convert_type(data_value):\n try:\n return int(data_value)\n except ValueError:\n try:\n return float(data_value)\n except ValueError:\n return data_value\n\n\nprint('> define get_delim function')\n\n\ndef get_delim(sourcefile1):\n print('> executing get_delim function')\n data = open(sourcefile1, 'r')\n my_read_data = data.read()\n if my_read_data.find(',') > 0:\n print(' delimiter: comma')\n return ','\n else:\n print(' delimiter: space')\n return ' '\n print(' ')\n\n\ndef lines_to_dict(lines, header=False):\n print('> executing lines_to_dict')\n column_titles = ['class', 'alc', 'ma', 'ash', 'alkash', 'mg', 'tphen',\n 'flav', 'nfphen', 'pac', 'ci', 'hue', 'od', 'proline']\n data_dict = {}\n for idx, column in enumerate(column_titles):\n data_dict[column] = []\n for row in lines:\n data_dict[column] += [row[idx]]\n return data_dict\n\n\ndef parse_file(data_file, dlm, debug=False):\n print('> executing parse_file')\n assert op.isfile(data_file)\n with open(data_file, 'r') as fhandle:\n csv_reader = csv.reader(fhandle, delimiter=dlm)\n lines = []\n if debug:\n count = 0\n for line in csv_reader:\n if debug:\n if count > 2:\n break\n count += 1\n newline = []\n for value in line:\n newline += [convert_type(value)]\n if len(newline) > 0:\n lines += [newline]\n print('> view a few lines')\n print(' ')\n for line in lines[0:2]:\n print(line)\n print(' ')\n return lines\n\n\ndef plot_data3(dd, col1, label1, col2a, col2b, label2a, label2b, n, debug=False\n ):\n df = pd.DataFrame.from_dict(dd)\n x = np.fromiter(dd[col1], dtype=float)\n y1 = np.fromiter(dd[col2a], dtype=float)\n y2 = np.fromiter(dd[col2b], dtype=float)\n fig, ax1 = plt.subplots()\n plt.title(label1 + ' by ' + label2a + ' and ' + label2b)\n clra = 'indigo'\n ax1.set_xlabel(label1)\n ax1.set_ylabel(label2a, color=clra)\n ax1.scatter(df[col1], df[col2a], color=clra, marker='^')\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y1, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clra)\n ax1.tick_params(axis='y', labelcolor=clra)\n ax2 = ax1.twinx()\n clrb = 'darkgreen'\n ax2.set_ylabel(label2b, color=clrb)\n ax2.scatter(df[col1], df[col2b], color=clrb)\n ax2.tick_params(axis='y', labelcolor=clrb)\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y2, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clrb)\n ax1.tick_params(axis='y', labelcolor=clra)\n fig.tight_layout()\n plt.savefig('an2_colour' + n + '.png')\n plt.show()\n\n\ndef main():\n data_file = 'wine.data'\n dlm = get_delim(data_file)\n my_data = parse_file(data_file, dlm)\n data_dictionary = lines_to_dict(my_data)\n plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav',\n 'Alcohol', 'Flavonoids', '1')\n plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od',\n 'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "print(\n '========================================================================================'\n )\nprint(\n '========================================================================================'\n )\nprint('> start of program an2_colour.py')\nprint('> import libraries')\nimport argparse\nimport os.path as op\nimport csv\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom numpy.polynomial.polynomial import polyfit\nprint('> define convert_type function')\n\n\ndef convert_type(data_value):\n try:\n return int(data_value)\n except ValueError:\n try:\n return float(data_value)\n except ValueError:\n return data_value\n\n\nprint('> define get_delim function')\n\n\ndef get_delim(sourcefile1):\n print('> executing get_delim function')\n data = open(sourcefile1, 'r')\n my_read_data = data.read()\n if my_read_data.find(',') > 0:\n print(' delimiter: comma')\n return ','\n else:\n print(' delimiter: space')\n return ' '\n print(' ')\n\n\ndef lines_to_dict(lines, header=False):\n print('> executing lines_to_dict')\n column_titles = ['class', 'alc', 'ma', 'ash', 'alkash', 'mg', 'tphen',\n 'flav', 'nfphen', 'pac', 'ci', 'hue', 'od', 'proline']\n data_dict = {}\n for idx, column in enumerate(column_titles):\n data_dict[column] = []\n for row in lines:\n data_dict[column] += [row[idx]]\n return data_dict\n\n\ndef parse_file(data_file, dlm, debug=False):\n print('> executing parse_file')\n assert op.isfile(data_file)\n with open(data_file, 'r') as fhandle:\n csv_reader = csv.reader(fhandle, delimiter=dlm)\n lines = []\n if debug:\n count = 0\n for line in csv_reader:\n if debug:\n if count > 2:\n break\n count += 1\n newline = []\n for value in line:\n newline += [convert_type(value)]\n if len(newline) > 0:\n lines += [newline]\n print('> view a few lines')\n print(' ')\n for line in lines[0:2]:\n print(line)\n print(' ')\n return lines\n\n\ndef plot_data3(dd, col1, label1, col2a, col2b, label2a, label2b, n, debug=False\n ):\n df = pd.DataFrame.from_dict(dd)\n x = np.fromiter(dd[col1], dtype=float)\n y1 = np.fromiter(dd[col2a], dtype=float)\n y2 = np.fromiter(dd[col2b], dtype=float)\n fig, ax1 = plt.subplots()\n plt.title(label1 + ' by ' + label2a + ' and ' + label2b)\n clra = 'indigo'\n ax1.set_xlabel(label1)\n ax1.set_ylabel(label2a, color=clra)\n ax1.scatter(df[col1], df[col2a], color=clra, marker='^')\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y1, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clra)\n ax1.tick_params(axis='y', labelcolor=clra)\n ax2 = ax1.twinx()\n clrb = 'darkgreen'\n ax2.set_ylabel(label2b, color=clrb)\n ax2.scatter(df[col1], df[col2b], color=clrb)\n ax2.tick_params(axis='y', labelcolor=clrb)\n xp = np.linspace(np.amin(x), np.amax(x), 100)\n weights = np.polyfit(x, y2, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clrb)\n ax1.tick_params(axis='y', labelcolor=clra)\n fig.tight_layout()\n plt.savefig('an2_colour' + n + '.png')\n plt.show()\n\n\ndef main():\n data_file = 'wine.data'\n dlm = get_delim(data_file)\n my_data = parse_file(data_file, dlm)\n data_dictionary = lines_to_dict(my_data)\n plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav',\n 'Alcohol', 'Flavonoids', '1')\n plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od',\n 'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "# program name: an2_colour.py\n\n# no optional arguments: Uses Wine data to display information about the relationship of \n# various attributes with colour and hue \n\nprint('========================================================================================')\nprint('========================================================================================')\n\nprint('> start of program an2_colour.py')\nprint('> import libraries')\n\nimport argparse\nimport os.path as op\nimport csv\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom numpy.polynomial.polynomial import polyfit\n\nprint('> define convert_type function')\ndef convert_type(data_value):\n try:\n return int(data_value)\n except ValueError:\n try:\n return float(data_value)\n except ValueError:\n return data_value\n\nprint(\"> define get_delim function\")\ndef get_delim(sourcefile1):\n print('> executing get_delim function')\n data = open(sourcefile1, 'r') \n my_read_data = data.read()\n if my_read_data.find(',') > 0:\n print(' delimiter: comma')\n return ','\n else:\n print(' delimiter: space')\n return ' ' \n print(' ')\n\ndef lines_to_dict(lines, header=False):\n print('> executing lines_to_dict')\n # column_titles = ['Class','Alcohol','Malic acid','Ash','Alcalinity of ash','Magnesium','Total phenols','Flavanoids','Nonflavanoid phenols','Proanthocyanins','Color intensity','Hue',\n # 'OD280/OD315 of diluted wines','Proline']\n column_titles = ['class','alc','ma','ash','alkash','mg','tphen','flav','nfphen','pac','ci','hue',\n 'od','proline']\n \n data_dict = {}\n for idx, column in enumerate(column_titles):\n data_dict[column] = []\n for row in lines:\n data_dict[column] += [row[idx]]\n return data_dict\n\ndef parse_file(data_file, dlm, debug=False): # took delimiter out\n print('> executing parse_file')\n # Verify the file exists\n assert(op.isfile(data_file))\n\n # open it as a csv \n with open(data_file, 'r') as fhandle:\n csv_reader = csv.reader(fhandle, delimiter=dlm)\n # Add each line in the file to a list\n lines = []\n if debug:\n count = 0\n for line in csv_reader:\n if debug:\n if count > 2:\n break\n count += 1\n newline = []\n for value in line:\n newline += [convert_type(value)]\n if len(newline) > 0:\n lines += [newline]\n\n print('> view a few lines')\n print(' ')\n for line in lines[0:2]:\n print(line)\n print(' ')\n # Return all the contents of our file\n return lines\n\n\n# class','alc','ma','ash','alkash','mg','tphen','flav','nfphen','pac','ci','hue',\n# 'od','proline\n\n \ndef plot_data3(dd, col1, label1, \n col2a, col2b,\n label2a, label2b, n,\n debug=False):\n df = pd.DataFrame.from_dict(dd) \n x = np.fromiter(dd[col1], dtype=float) # need these for the lines below\n y1 = np.fromiter(dd[col2a], dtype=float)\n y2 = np.fromiter(dd[col2b], dtype=float)\n\n # print(df) \n fig, ax1 = plt.subplots()\n plt.title(label1 + ' by ' + label2a + ' and ' + label2b)\n\n clra = 'indigo'\n ax1.set_xlabel(label1)\n ax1.set_ylabel(label2a, color=clra) # left side\n\n ax1.scatter(df[col1], df[col2a], color=clra, marker = '^')\n\n xp = np.linspace(np.amin(x), np.amax(x), 100) #only works for numpy arrays\n weights = np.polyfit(x, y1, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clra)\n ax1.tick_params(axis='y', labelcolor=clra)\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n clrb = 'darkgreen'\n ax2.set_ylabel(label2b, color=clrb) # we already handled the x-label with ax1\n # ax2.plot(df[col1], df[col2b], color=color)\n ax2.scatter(df[col1], df[col2b], color= clrb)\n ax2.tick_params(axis='y', labelcolor=clrb)\n\n xp = np.linspace(np.amin(x), np.amax(x), 100) #only works for numpy arrays\n weights = np.polyfit(x, y2, 1)\n model = np.poly1d(weights)\n plt.plot(xp, model(xp), '-', c=clrb)\n ax1.tick_params(axis='y', labelcolor=clra)\n\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n plt.savefig('an2_colour' + n + '.png')\n plt.show()\n\n# Cases where there is a possible correlation with colour intensity or hue. \n# color intensity:\n# check against : alc, flav, od, proline\n# hue:\n# check against: ma, tphen, flav, pac, od\n\ndef main():\n\n data_file = \"wine.data\"\n dlm = get_delim(data_file) \n my_data = parse_file(data_file, dlm)\n data_dictionary = lines_to_dict(my_data)\n #print(data_dictionary)\n\n plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav', 'Alcohol', 'Flavonoids', '1')\n plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od', 'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')\n\nif __name__ == \"__main__\":\n main()\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print('hello', end='!') print('python') print('010', '1234', '1111', sep='-') <|reserved_special_token_0|> print('입력한 숫자 :', num) print('num type :', type(num)) <|reserved_special_token_0|> print('result :', result) print('result type :', type(result)) print('%d년 %d월 %d일 %s요일' % (2021, 4, 27, '화')) print('이름 : {}, 나이 : {}, 주소 : {}'.format('김유신', 23, '김해시')) <|reserved_special_token_1|> <|reserved_special_token_0|> print('hello', end='!') print('python') print('010', '1234', '1111', sep='-') num = input('숫자입력 : ') print('입력한 숫자 :', num) print('num type :', type(num)) result = int(num) print('result :', result) print('result type :', type(result)) print('%d년 %d월 %d일 %s요일' % (2021, 4, 27, '화')) print('이름 : {}, 나이 : {}, 주소 : {}'.format('김유신', 23, '김해시')) <|reserved_special_token_1|> # Input Output test (입출력 테스트 ) """ 날짜 : 2021/04/27 이름 : 이지영 내용 : 파이썬 표준입출력 실습 _ 교재 p42 """ # 파이썬 표준 출력 print('hello', end='!') #print : 출력함수 (자바에선 document.write('hello');) print('python') print('010', '1234', '1111', sep='-') # seperate 값 # 파이썬 표준 입력 num = input('숫자입력 : ') print('입력한 숫자 :', num) print('num type :', type(num)) # 입력받은 문자열을 숫자로 변환하는 작업이 필요함. <class 'str'> 문자열로 읽히기 때문 result = int(num) print('result :', result) print('result type :', type(result)) # 서식문자 출력 print('%d년 %d월 %d일 %s요일' % (2021, 4, 27, '화')) # %s: string 문자열을 나타냄 # 포맷문자 출력 print('이름 : {}, 나이 : {}, 주소 : {}' .format('김유신', 23, '김해시'))
flexible
{ "blob_id": "cc628270a973866025a5e2a5d07e39b4dbdcd324", "index": 1718, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('hello', end='!')\nprint('python')\nprint('010', '1234', '1111', sep='-')\n<mask token>\nprint('입력한 숫자 :', num)\nprint('num type :', type(num))\n<mask token>\nprint('result :', result)\nprint('result type :', type(result))\nprint('%d년 %d월 %d일 %s요일' % (2021, 4, 27, '화'))\nprint('이름 : {}, 나이 : {}, 주소 : {}'.format('김유신', 23, '김해시'))\n", "step-3": "<mask token>\nprint('hello', end='!')\nprint('python')\nprint('010', '1234', '1111', sep='-')\nnum = input('숫자입력 : ')\nprint('입력한 숫자 :', num)\nprint('num type :', type(num))\nresult = int(num)\nprint('result :', result)\nprint('result type :', type(result))\nprint('%d년 %d월 %d일 %s요일' % (2021, 4, 27, '화'))\nprint('이름 : {}, 나이 : {}, 주소 : {}'.format('김유신', 23, '김해시'))\n", "step-4": "# Input Output test (입출력 테스트 )\n\"\"\"\n날짜 : 2021/04/27\n이름 : 이지영\n내용 : 파이썬 표준입출력 실습 _ 교재 p42\n\"\"\"\n\n# 파이썬 표준 출력\nprint('hello', end='!') #print : 출력함수 (자바에선 document.write('hello');)\nprint('python')\n\nprint('010', '1234', '1111', sep='-') # seperate 값\n\n# 파이썬 표준 입력\nnum = input('숫자입력 : ')\n\nprint('입력한 숫자 :', num)\nprint('num type :', type(num))\n\n# 입력받은 문자열을 숫자로 변환하는 작업이 필요함. <class 'str'> 문자열로 읽히기 때문\nresult = int(num)\nprint('result :', result)\nprint('result type :', type(result))\n\n\n# 서식문자 출력\nprint('%d년 %d월 %d일 %s요일' % (2021, 4, 27, '화')) # %s: string 문자열을 나타냄\n\n# 포맷문자 출력\nprint('이름 : {}, 나이 : {}, 주소 : {}' .format('김유신', 23, '김해시'))\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): dependencies = [('aposta', '0003_aposta_nome')] operations = [migrations.CreateModel(name='Aposta2', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize= False, verbose_name='ID')), ('aposta_identificacao', models. CharField(max_length=200)), ('valor', models.IntegerField(default=0 ))]), migrations.CreateModel(name='Concurso2', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize= False, verbose_name='ID')), ('concurso_edicao', models.CharField( max_length=20)), ('pub_data', models.DateTimeField(verbose_name= 'data de publicacao'))]), migrations.AlterField(model_name='aposta', name='dataAposta', field=models.DateField()), migrations.AddField( model_name='aposta2', name='Concurso2_identificao', field=models. ForeignKey(on_delete=django.db.models.deletion.CASCADE, to= 'aposta.Concurso2'))] <|reserved_special_token_1|> from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [('aposta', '0003_aposta_nome')] operations = [migrations.CreateModel(name='Aposta2', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize= False, verbose_name='ID')), ('aposta_identificacao', models. CharField(max_length=200)), ('valor', models.IntegerField(default=0 ))]), migrations.CreateModel(name='Concurso2', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize= False, verbose_name='ID')), ('concurso_edicao', models.CharField( max_length=20)), ('pub_data', models.DateTimeField(verbose_name= 'data de publicacao'))]), migrations.AlterField(model_name='aposta', name='dataAposta', field=models.DateField()), migrations.AddField( model_name='aposta2', name='Concurso2_identificao', field=models. ForeignKey(on_delete=django.db.models.deletion.CASCADE, to= 'aposta.Concurso2'))] <|reserved_special_token_1|> # -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-04-15 18:46 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('aposta', '0003_aposta_nome'), ] operations = [ migrations.CreateModel( name='Aposta2', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('aposta_identificacao', models.CharField(max_length=200)), ('valor', models.IntegerField(default=0)), ], ), migrations.CreateModel( name='Concurso2', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('concurso_edicao', models.CharField(max_length=20)), ('pub_data', models.DateTimeField(verbose_name='data de publicacao')), ], ), migrations.AlterField( model_name='aposta', name='dataAposta', field=models.DateField(), ), migrations.AddField( model_name='aposta2', name='Concurso2_identificao', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='aposta.Concurso2'), ), ]
flexible
{ "blob_id": "a917dd6171a78142fefa8c8bfad0110729fc1bb0", "index": 3190, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('aposta', '0003_aposta_nome')]\n operations = [migrations.CreateModel(name='Aposta2', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('aposta_identificacao', models.\n CharField(max_length=200)), ('valor', models.IntegerField(default=0\n ))]), migrations.CreateModel(name='Concurso2', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('concurso_edicao', models.CharField(\n max_length=20)), ('pub_data', models.DateTimeField(verbose_name=\n 'data de publicacao'))]), migrations.AlterField(model_name='aposta',\n name='dataAposta', field=models.DateField()), migrations.AddField(\n model_name='aposta2', name='Concurso2_identificao', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'aposta.Concurso2'))]\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('aposta', '0003_aposta_nome')]\n operations = [migrations.CreateModel(name='Aposta2', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('aposta_identificacao', models.\n CharField(max_length=200)), ('valor', models.IntegerField(default=0\n ))]), migrations.CreateModel(name='Concurso2', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('concurso_edicao', models.CharField(\n max_length=20)), ('pub_data', models.DateTimeField(verbose_name=\n 'data de publicacao'))]), migrations.AlterField(model_name='aposta',\n name='dataAposta', field=models.DateField()), migrations.AddField(\n model_name='aposta2', name='Concurso2_identificao', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'aposta.Concurso2'))]\n", "step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-04-15 18:46\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('aposta', '0003_aposta_nome'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Aposta2',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('aposta_identificacao', models.CharField(max_length=200)),\n ('valor', models.IntegerField(default=0)),\n ],\n ),\n migrations.CreateModel(\n name='Concurso2',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('concurso_edicao', models.CharField(max_length=20)),\n ('pub_data', models.DateTimeField(verbose_name='data de publicacao')),\n ],\n ),\n migrations.AlterField(\n model_name='aposta',\n name='dataAposta',\n field=models.DateField(),\n ),\n migrations.AddField(\n model_name='aposta2',\n name='Concurso2_identificao',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='aposta.Concurso2'),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class Migration(SchemaMigration): def forwards(self, orm): db.add_column(u'main_videoad', 'compress', self.gf( 'django.db.models.fields.BooleanField')(default=False), keep_default=False) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(SchemaMigration): def forwards(self, orm): db.add_column(u'main_videoad', 'compress', self.gf( 'django.db.models.fields.BooleanField')(default=False), keep_default=False) def backwards(self, orm): db.delete_column(u'main_videoad', 'compress') <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(SchemaMigration): def forwards(self, orm): db.add_column(u'main_videoad', 'compress', self.gf( 'django.db.models.fields.BooleanField')(default=False), keep_default=False) def backwards(self, orm): db.delete_column(u'main_videoad', 'compress') models = {u'contenttypes.contenttype': {'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], { 'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ( 'django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})}, u'main.days': {'Meta': {'object_name': 'Days'}, 'date': ( 'django.db.models.fields.DateField', [], {}), u'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.ImageAd']", 'null': 'True', 'blank': 'True'}), 'show_text': ( 'django.db.models.fields.BooleanField', [], {}), 'show_video': ( 'django.db.models.fields.BooleanField', [], {}), 'start_time': ( 'django.db.models.fields.TimeField', [], {'default': 'datetime.time(8, 0)'}), 'stop_time': ( 'django.db.models.fields.TimeField', [], {'default': 'datetime.time(22, 0)'}), 'terminal': ( 'django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Terminal']"}), 'text_ad': ( 'django.db.models.fields.related.ManyToManyField', [], { 'symmetrical': 'False', 'to': u"orm['main.TextAd']", 'null': 'True', 'blank': 'True'}), 'text_count': ( 'django.db.models.fields.PositiveIntegerField', [], {'default': '0' }), 'video_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.VideoAd']", 'null': 'True', 'blank': 'True'}), 'video_count': ( 'django.db.models.fields.PositiveIntegerField', [], {'default': '0' })}, u'main.imagead': {'Meta': {'object_name': 'ImageAd'}, 'datelist': ('main.fields.DateArrayField', [], {}), u'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255'}), 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Partner']"}), 'prolongation': ( 'django.db.models.fields.TimeField', [], {}), 'terminals': ( 'django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Terminal']", 'symmetrical': 'False'})}, u'main.immediatelyad': {'Meta': {'object_name': 'ImmediatelyAd'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), 'day': ( 'django.db.models.fields.related.ForeignKey', [], {'related_name': "u'immediatelies'", 'to': u"orm['main.Days']"}), u'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], { }), 'time': ('django.db.models.fields.TimeField', [], {})}, u'main.oscommandlog': {'Meta': {'object_name': 'OsCommandLog'}, 'command': ('django.db.models.fields.TextField', [], {}), 'datetime': ('django.db.models.fields.DateTimeField', [], { 'auto_now': 'True', 'blank': 'True'}), 'errors': ( 'django.db.models.fields.TextField', [], {}), u'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ouput': ('django.db.models.fields.TextField', [], {}), 'return_code': ('django.db.models.fields.CharField', [], { 'max_length': '255'})}, u'main.partner': {'Meta': {'object_name': 'Partner'}, 'account_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'bank': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'bik': ( 'django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'director': ( 'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'full_name': ( 'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), u'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'inn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'kpp': ( 'django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'ks': ( 'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'legal_address': ( 'django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}), 'name': ( 'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'ogrn': ( 'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'partner_type': ( 'django.db.models.fields.PositiveIntegerField', [], {'default': '0' }), 'passport': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'phones': ( 'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'short_name': ( 'django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})}, u'main.terminal': {'Meta': { 'object_name': 'Terminal'}, 'config': ( 'django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], { 'primary_key': 'True'}), 'text': ( 'django.db.models.fields.TextField', [], {})}, u'main.textad': { 'Meta': {'object_name': 'TextAd'}, 'datelist': ( 'main.fields.DateArrayField', [], {}), u'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Partner']"}), 'terminals': ( 'django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Terminal']", 'symmetrical': 'False'}), 'text': ( 'django.db.models.fields.TextField', [], {})}, u'main.videoad': { 'Meta': {'object_name': 'VideoAd'}, 'compress': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'datelist': ('main.fields.DateArrayField', [], {}), 'file_video': ( 'filebrowser.fields.FileBrowseField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), u'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Partner']"}), 'prolongation': ( 'django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}), 'terminals': ( 'django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Terminal']", 'symmetrical': 'False'})}} complete_apps = ['main'] <|reserved_special_token_1|> from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): db.add_column(u'main_videoad', 'compress', self.gf( 'django.db.models.fields.BooleanField')(default=False), keep_default=False) def backwards(self, orm): db.delete_column(u'main_videoad', 'compress') models = {u'contenttypes.contenttype': {'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], { 'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ( 'django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})}, u'main.days': {'Meta': {'object_name': 'Days'}, 'date': ( 'django.db.models.fields.DateField', [], {}), u'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.ImageAd']", 'null': 'True', 'blank': 'True'}), 'show_text': ( 'django.db.models.fields.BooleanField', [], {}), 'show_video': ( 'django.db.models.fields.BooleanField', [], {}), 'start_time': ( 'django.db.models.fields.TimeField', [], {'default': 'datetime.time(8, 0)'}), 'stop_time': ( 'django.db.models.fields.TimeField', [], {'default': 'datetime.time(22, 0)'}), 'terminal': ( 'django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Terminal']"}), 'text_ad': ( 'django.db.models.fields.related.ManyToManyField', [], { 'symmetrical': 'False', 'to': u"orm['main.TextAd']", 'null': 'True', 'blank': 'True'}), 'text_count': ( 'django.db.models.fields.PositiveIntegerField', [], {'default': '0' }), 'video_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.VideoAd']", 'null': 'True', 'blank': 'True'}), 'video_count': ( 'django.db.models.fields.PositiveIntegerField', [], {'default': '0' })}, u'main.imagead': {'Meta': {'object_name': 'ImageAd'}, 'datelist': ('main.fields.DateArrayField', [], {}), u'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255'}), 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Partner']"}), 'prolongation': ( 'django.db.models.fields.TimeField', [], {}), 'terminals': ( 'django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Terminal']", 'symmetrical': 'False'})}, u'main.immediatelyad': {'Meta': {'object_name': 'ImmediatelyAd'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), 'day': ( 'django.db.models.fields.related.ForeignKey', [], {'related_name': "u'immediatelies'", 'to': u"orm['main.Days']"}), u'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], { }), 'time': ('django.db.models.fields.TimeField', [], {})}, u'main.oscommandlog': {'Meta': {'object_name': 'OsCommandLog'}, 'command': ('django.db.models.fields.TextField', [], {}), 'datetime': ('django.db.models.fields.DateTimeField', [], { 'auto_now': 'True', 'blank': 'True'}), 'errors': ( 'django.db.models.fields.TextField', [], {}), u'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ouput': ('django.db.models.fields.TextField', [], {}), 'return_code': ('django.db.models.fields.CharField', [], { 'max_length': '255'})}, u'main.partner': {'Meta': {'object_name': 'Partner'}, 'account_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'bank': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'bik': ( 'django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'director': ( 'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'full_name': ( 'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), u'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'inn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'kpp': ( 'django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'ks': ( 'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'legal_address': ( 'django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}), 'name': ( 'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'ogrn': ( 'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'partner_type': ( 'django.db.models.fields.PositiveIntegerField', [], {'default': '0' }), 'passport': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'phones': ( 'django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'short_name': ( 'django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})}, u'main.terminal': {'Meta': { 'object_name': 'Terminal'}, 'config': ( 'django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], { 'primary_key': 'True'}), 'text': ( 'django.db.models.fields.TextField', [], {})}, u'main.textad': { 'Meta': {'object_name': 'TextAd'}, 'datelist': ( 'main.fields.DateArrayField', [], {}), u'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Partner']"}), 'terminals': ( 'django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Terminal']", 'symmetrical': 'False'}), 'text': ( 'django.db.models.fields.TextField', [], {})}, u'main.videoad': { 'Meta': {'object_name': 'VideoAd'}, 'compress': ( 'django.db.models.fields.BooleanField', [], {'default': 'False'}), 'datelist': ('main.fields.DateArrayField', [], {}), 'file_video': ( 'filebrowser.fields.FileBrowseField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), u'id': ( 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Partner']"}), 'prolongation': ( 'django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}), 'terminals': ( 'django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Terminal']", 'symmetrical': 'False'})}} complete_apps = ['main'] <|reserved_special_token_1|> # -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'VideoAd.compress' db.add_column(u'main_videoad', 'compress', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False) def backwards(self, orm): # Deleting field 'VideoAd.compress' db.delete_column(u'main_videoad', 'compress') models = { u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'main.days': { 'Meta': {'object_name': 'Days'}, 'date': ('django.db.models.fields.DateField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.ImageAd']", 'null': 'True', 'blank': 'True'}), 'show_text': ('django.db.models.fields.BooleanField', [], {}), 'show_video': ('django.db.models.fields.BooleanField', [], {}), 'start_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.time(8, 0)'}), 'stop_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.time(22, 0)'}), 'terminal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Terminal']"}), 'text_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.TextAd']", 'null': 'True', 'blank': 'True'}), 'text_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'video_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.VideoAd']", 'null': 'True', 'blank': 'True'}), 'video_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}) }, u'main.imagead': { 'Meta': {'object_name': 'ImageAd'}, 'datelist': ('main.fields.DateArrayField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255'}), 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Partner']"}), 'prolongation': ('django.db.models.fields.TimeField', [], {}), 'terminals': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Terminal']", 'symmetrical': 'False'}) }, u'main.immediatelyad': { 'Meta': {'object_name': 'ImmediatelyAd'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), 'day': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'immediatelies'", 'to': u"orm['main.Days']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'time': ('django.db.models.fields.TimeField', [], {}) }, u'main.oscommandlog': { 'Meta': {'object_name': 'OsCommandLog'}, 'command': ('django.db.models.fields.TextField', [], {}), 'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'errors': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ouput': ('django.db.models.fields.TextField', [], {}), 'return_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'main.partner': { 'Meta': {'object_name': 'Partner'}, 'account_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'bank': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'bik': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'director': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'inn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'kpp': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'ks': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'legal_address': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'ogrn': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'partner_type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'passport': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'phones': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'short_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}) }, u'main.terminal': { 'Meta': {'object_name': 'Terminal'}, 'config': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'text': ('django.db.models.fields.TextField', [], {}) }, u'main.textad': { 'Meta': {'object_name': 'TextAd'}, 'datelist': ('main.fields.DateArrayField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Partner']"}), 'terminals': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Terminal']", 'symmetrical': 'False'}), 'text': ('django.db.models.fields.TextField', [], {}) }, u'main.videoad': { 'Meta': {'object_name': 'VideoAd'}, 'compress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'datelist': ('main.fields.DateArrayField', [], {}), 'file_video': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Partner']"}), 'prolongation': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}), 'terminals': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Terminal']", 'symmetrical': 'False'}) } } complete_apps = ['main']
flexible
{ "blob_id": "b4bcf9903f4a34c8b256c65cada29e952a436f74", "index": 2215, "step-1": "<mask token>\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'main_videoad', 'compress', self.gf(\n 'django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'main_videoad', 'compress', self.gf(\n 'django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'main_videoad', 'compress')\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'main_videoad', 'compress', self.gf(\n 'django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'main_videoad', 'compress')\n models = {u'contenttypes.contenttype': {'Meta': {'ordering':\n \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\",\n 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},\n 'app_label': ('django.db.models.fields.CharField', [], {\n 'max_length': '100'}), u'id': ('django.db.models.fields.AutoField',\n [], {'primary_key': 'True'}), 'model': (\n 'django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length':\n '100'})}, u'main.days': {'Meta': {'object_name': 'Days'}, 'date': (\n 'django.db.models.fields.DateField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image_ad': ('django.db.models.fields.related.ManyToManyField', [],\n {'symmetrical': 'False', 'to': u\"orm['main.ImageAd']\", 'null':\n 'True', 'blank': 'True'}), 'show_text': (\n 'django.db.models.fields.BooleanField', [], {}), 'show_video': (\n 'django.db.models.fields.BooleanField', [], {}), 'start_time': (\n 'django.db.models.fields.TimeField', [], {'default':\n 'datetime.time(8, 0)'}), 'stop_time': (\n 'django.db.models.fields.TimeField', [], {'default':\n 'datetime.time(22, 0)'}), 'terminal': (\n 'django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['main.Terminal']\"}), 'text_ad': (\n 'django.db.models.fields.related.ManyToManyField', [], {\n 'symmetrical': 'False', 'to': u\"orm['main.TextAd']\", 'null': 'True',\n 'blank': 'True'}), 'text_count': (\n 'django.db.models.fields.PositiveIntegerField', [], {'default': '0'\n }), 'video_ad': ('django.db.models.fields.related.ManyToManyField',\n [], {'symmetrical': 'False', 'to': u\"orm['main.VideoAd']\", 'null':\n 'True', 'blank': 'True'}), 'video_count': (\n 'django.db.models.fields.PositiveIntegerField', [], {'default': '0'\n })}, u'main.imagead': {'Meta': {'object_name': 'ImageAd'},\n 'datelist': ('main.fields.DateArrayField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image': ('filebrowser.fields.FileBrowseField', [], {'max_length':\n '255'}), 'partner': ('django.db.models.fields.related.ForeignKey',\n [], {'to': u\"orm['main.Partner']\"}), 'prolongation': (\n 'django.db.models.fields.TimeField', [], {}), 'terminals': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['main.Terminal']\", 'symmetrical': 'False'})},\n u'main.immediatelyad': {'Meta': {'object_name': 'ImmediatelyAd'},\n 'content_type': ('django.db.models.fields.related.ForeignKey', [],\n {'to': u\"orm['contenttypes.ContentType']\"}), 'day': (\n 'django.db.models.fields.related.ForeignKey', [], {'related_name':\n \"u'immediatelies'\", 'to': u\"orm['main.Days']\"}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {\n }), 'time': ('django.db.models.fields.TimeField', [], {})},\n u'main.oscommandlog': {'Meta': {'object_name': 'OsCommandLog'},\n 'command': ('django.db.models.fields.TextField', [], {}),\n 'datetime': ('django.db.models.fields.DateTimeField', [], {\n 'auto_now': 'True', 'blank': 'True'}), 'errors': (\n 'django.db.models.fields.TextField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'ouput': ('django.db.models.fields.TextField', [], {}),\n 'return_code': ('django.db.models.fields.CharField', [], {\n 'max_length': '255'})}, u'main.partner': {'Meta': {'object_name':\n 'Partner'}, 'account_number': ('django.db.models.fields.CharField',\n [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'bank':\n ('django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'bik': (\n 'django.db.models.fields.CharField', [], {'max_length': '100',\n 'null': 'True', 'blank': 'True'}), 'director': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'full_name': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'inn': ('django.db.models.fields.CharField', [], {'max_length':\n '50', 'null': 'True', 'blank': 'True'}), 'kpp': (\n 'django.db.models.fields.CharField', [], {'max_length': '50',\n 'null': 'True', 'blank': 'True'}), 'ks': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'legal_address': (\n 'django.db.models.fields.CharField', [], {'max_length': '400',\n 'null': 'True', 'blank': 'True'}), 'name': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'ogrn': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'partner_type': (\n 'django.db.models.fields.PositiveIntegerField', [], {'default': '0'\n }), 'passport': ('django.db.models.fields.TextField', [], {'null':\n 'True', 'blank': 'True'}), 'phones': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'short_name': (\n 'django.db.models.fields.CharField', [], {'max_length': '500',\n 'null': 'True', 'blank': 'True'})}, u'main.terminal': {'Meta': {\n 'object_name': 'Terminal'}, 'config': (\n 'django.db.models.fields.TextField', [], {'null': 'True', 'blank':\n 'True'}), u'id': ('django.db.models.fields.AutoField', [], {\n 'primary_key': 'True'}), 'text': (\n 'django.db.models.fields.TextField', [], {})}, u'main.textad': {\n 'Meta': {'object_name': 'TextAd'}, 'datelist': (\n 'main.fields.DateArrayField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['main.Partner']\"}), 'terminals': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['main.Terminal']\", 'symmetrical': 'False'}), 'text': (\n 'django.db.models.fields.TextField', [], {})}, u'main.videoad': {\n 'Meta': {'object_name': 'VideoAd'}, 'compress': (\n 'django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'datelist': ('main.fields.DateArrayField', [], {}), 'file_video': (\n 'filebrowser.fields.FileBrowseField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['main.Partner']\"}), 'prolongation': (\n 'django.db.models.fields.TimeField', [], {'null': 'True', 'blank':\n 'True'}), 'terminals': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['main.Terminal']\", 'symmetrical': 'False'})}}\n complete_apps = ['main']\n", "step-4": "from south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'main_videoad', 'compress', self.gf(\n 'django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'main_videoad', 'compress')\n models = {u'contenttypes.contenttype': {'Meta': {'ordering':\n \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\",\n 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},\n 'app_label': ('django.db.models.fields.CharField', [], {\n 'max_length': '100'}), u'id': ('django.db.models.fields.AutoField',\n [], {'primary_key': 'True'}), 'model': (\n 'django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length':\n '100'})}, u'main.days': {'Meta': {'object_name': 'Days'}, 'date': (\n 'django.db.models.fields.DateField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image_ad': ('django.db.models.fields.related.ManyToManyField', [],\n {'symmetrical': 'False', 'to': u\"orm['main.ImageAd']\", 'null':\n 'True', 'blank': 'True'}), 'show_text': (\n 'django.db.models.fields.BooleanField', [], {}), 'show_video': (\n 'django.db.models.fields.BooleanField', [], {}), 'start_time': (\n 'django.db.models.fields.TimeField', [], {'default':\n 'datetime.time(8, 0)'}), 'stop_time': (\n 'django.db.models.fields.TimeField', [], {'default':\n 'datetime.time(22, 0)'}), 'terminal': (\n 'django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['main.Terminal']\"}), 'text_ad': (\n 'django.db.models.fields.related.ManyToManyField', [], {\n 'symmetrical': 'False', 'to': u\"orm['main.TextAd']\", 'null': 'True',\n 'blank': 'True'}), 'text_count': (\n 'django.db.models.fields.PositiveIntegerField', [], {'default': '0'\n }), 'video_ad': ('django.db.models.fields.related.ManyToManyField',\n [], {'symmetrical': 'False', 'to': u\"orm['main.VideoAd']\", 'null':\n 'True', 'blank': 'True'}), 'video_count': (\n 'django.db.models.fields.PositiveIntegerField', [], {'default': '0'\n })}, u'main.imagead': {'Meta': {'object_name': 'ImageAd'},\n 'datelist': ('main.fields.DateArrayField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image': ('filebrowser.fields.FileBrowseField', [], {'max_length':\n '255'}), 'partner': ('django.db.models.fields.related.ForeignKey',\n [], {'to': u\"orm['main.Partner']\"}), 'prolongation': (\n 'django.db.models.fields.TimeField', [], {}), 'terminals': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['main.Terminal']\", 'symmetrical': 'False'})},\n u'main.immediatelyad': {'Meta': {'object_name': 'ImmediatelyAd'},\n 'content_type': ('django.db.models.fields.related.ForeignKey', [],\n {'to': u\"orm['contenttypes.ContentType']\"}), 'day': (\n 'django.db.models.fields.related.ForeignKey', [], {'related_name':\n \"u'immediatelies'\", 'to': u\"orm['main.Days']\"}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {\n }), 'time': ('django.db.models.fields.TimeField', [], {})},\n u'main.oscommandlog': {'Meta': {'object_name': 'OsCommandLog'},\n 'command': ('django.db.models.fields.TextField', [], {}),\n 'datetime': ('django.db.models.fields.DateTimeField', [], {\n 'auto_now': 'True', 'blank': 'True'}), 'errors': (\n 'django.db.models.fields.TextField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'ouput': ('django.db.models.fields.TextField', [], {}),\n 'return_code': ('django.db.models.fields.CharField', [], {\n 'max_length': '255'})}, u'main.partner': {'Meta': {'object_name':\n 'Partner'}, 'account_number': ('django.db.models.fields.CharField',\n [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'bank':\n ('django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'bik': (\n 'django.db.models.fields.CharField', [], {'max_length': '100',\n 'null': 'True', 'blank': 'True'}), 'director': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'full_name': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'inn': ('django.db.models.fields.CharField', [], {'max_length':\n '50', 'null': 'True', 'blank': 'True'}), 'kpp': (\n 'django.db.models.fields.CharField', [], {'max_length': '50',\n 'null': 'True', 'blank': 'True'}), 'ks': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'legal_address': (\n 'django.db.models.fields.CharField', [], {'max_length': '400',\n 'null': 'True', 'blank': 'True'}), 'name': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'ogrn': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'partner_type': (\n 'django.db.models.fields.PositiveIntegerField', [], {'default': '0'\n }), 'passport': ('django.db.models.fields.TextField', [], {'null':\n 'True', 'blank': 'True'}), 'phones': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'short_name': (\n 'django.db.models.fields.CharField', [], {'max_length': '500',\n 'null': 'True', 'blank': 'True'})}, u'main.terminal': {'Meta': {\n 'object_name': 'Terminal'}, 'config': (\n 'django.db.models.fields.TextField', [], {'null': 'True', 'blank':\n 'True'}), u'id': ('django.db.models.fields.AutoField', [], {\n 'primary_key': 'True'}), 'text': (\n 'django.db.models.fields.TextField', [], {})}, u'main.textad': {\n 'Meta': {'object_name': 'TextAd'}, 'datelist': (\n 'main.fields.DateArrayField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['main.Partner']\"}), 'terminals': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['main.Terminal']\", 'symmetrical': 'False'}), 'text': (\n 'django.db.models.fields.TextField', [], {})}, u'main.videoad': {\n 'Meta': {'object_name': 'VideoAd'}, 'compress': (\n 'django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'datelist': ('main.fields.DateArrayField', [], {}), 'file_video': (\n 'filebrowser.fields.FileBrowseField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['main.Partner']\"}), 'prolongation': (\n 'django.db.models.fields.TimeField', [], {'null': 'True', 'blank':\n 'True'}), 'terminals': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['main.Terminal']\", 'symmetrical': 'False'})}}\n complete_apps = ['main']\n", "step-5": "# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding field 'VideoAd.compress'\n db.add_column(u'main_videoad', 'compress',\n self.gf('django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n\n\n def backwards(self, orm):\n # Deleting field 'VideoAd.compress'\n db.delete_column(u'main_videoad', 'compress')\n\n\n models = {\n u'contenttypes.contenttype': {\n 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},\n 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n },\n u'main.days': {\n 'Meta': {'object_name': 'Days'},\n 'date': ('django.db.models.fields.DateField', [], {}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['main.ImageAd']\", 'null': 'True', 'blank': 'True'}),\n 'show_text': ('django.db.models.fields.BooleanField', [], {}),\n 'show_video': ('django.db.models.fields.BooleanField', [], {}),\n 'start_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.time(8, 0)'}),\n 'stop_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.time(22, 0)'}),\n 'terminal': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['main.Terminal']\"}),\n 'text_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['main.TextAd']\", 'null': 'True', 'blank': 'True'}),\n 'text_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),\n 'video_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['main.VideoAd']\", 'null': 'True', 'blank': 'True'}),\n 'video_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})\n },\n u'main.imagead': {\n 'Meta': {'object_name': 'ImageAd'},\n 'datelist': ('main.fields.DateArrayField', [], {}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255'}),\n 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['main.Partner']\"}),\n 'prolongation': ('django.db.models.fields.TimeField', [], {}),\n 'terminals': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['main.Terminal']\", 'symmetrical': 'False'})\n },\n u'main.immediatelyad': {\n 'Meta': {'object_name': 'ImmediatelyAd'},\n 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['contenttypes.ContentType']\"}),\n 'day': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"u'immediatelies'\", 'to': u\"orm['main.Days']\"}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),\n 'time': ('django.db.models.fields.TimeField', [], {})\n },\n u'main.oscommandlog': {\n 'Meta': {'object_name': 'OsCommandLog'},\n 'command': ('django.db.models.fields.TextField', [], {}),\n 'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),\n 'errors': ('django.db.models.fields.TextField', [], {}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'ouput': ('django.db.models.fields.TextField', [], {}),\n 'return_code': ('django.db.models.fields.CharField', [], {'max_length': '255'})\n },\n u'main.partner': {\n 'Meta': {'object_name': 'Partner'},\n 'account_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n 'bank': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n 'bik': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),\n 'director': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'inn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),\n 'kpp': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),\n 'ks': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n 'legal_address': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n 'ogrn': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n 'partner_type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),\n 'passport': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),\n 'phones': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n 'short_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})\n },\n u'main.terminal': {\n 'Meta': {'object_name': 'Terminal'},\n 'config': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'text': ('django.db.models.fields.TextField', [], {})\n },\n u'main.textad': {\n 'Meta': {'object_name': 'TextAd'},\n 'datelist': ('main.fields.DateArrayField', [], {}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['main.Partner']\"}),\n 'terminals': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['main.Terminal']\", 'symmetrical': 'False'}),\n 'text': ('django.db.models.fields.TextField', [], {})\n },\n u'main.videoad': {\n 'Meta': {'object_name': 'VideoAd'},\n 'compress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'datelist': ('main.fields.DateArrayField', [], {}),\n 'file_video': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['main.Partner']\"}),\n 'prolongation': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),\n 'terminals': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['main.Terminal']\", 'symmetrical': 'False'})\n }\n }\n\n complete_apps = ['main']", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
#!/usr/bin/env python from LCClass import LightCurve import matplotlib.pyplot as plt import niutils def main(): lc1821 = LightCurve("PSR_B1821-24/PSR_B1821-24_combined.evt") lc0218 = LightCurve("PSR_J0218+4232/PSR_J0218+4232_combined.evt") fig, ax = plt.subplots(2, 1, figsize=(8, 8)) ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate=False) ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False) ax[1].set_xlabel("Pulse Phase", fontsize=25) ax[0].text(.08, .95, r'PSR B1821$-$24', ha='left', va='top', fontsize=20, transform=ax[0].transAxes, bbox=dict(facecolor='white', edgecolor='none', alpha=0.6)) ax[1].text(.08, .95, r'PSR J0218$+$4232', ha='left', va='top', fontsize=20, transform=ax[1].transAxes, bbox=dict(facecolor='white', edgecolor='none', alpha=0.6)) ax[0].tick_params(labelbottom=False) #plt.setp(ax[0].get_yticklabels()[0], visible=False) fig.text(.04, .5, r'Photon Counts', ha='center', va='center', rotation='vertical', fontsize=25) plt.subplots_adjust(hspace=0, bottom=.08, top=.94, right=.98, left=.15) fig.savefig("poster_plot.svg") if __name__ == '__main__': main()
normal
{ "blob_id": "48311ee17a3f2eca8db32d7672f540fa45a7a900", "index": 3524, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n lc1821 = LightCurve('PSR_B1821-24/PSR_B1821-24_combined.evt')\n lc0218 = LightCurve('PSR_J0218+4232/PSR_J0218+4232_combined.evt')\n fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate\n =False)\n ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False\n )\n ax[1].set_xlabel('Pulse Phase', fontsize=25)\n ax[0].text(0.08, 0.95, 'PSR B1821$-$24', ha='left', va='top', fontsize=\n 20, transform=ax[0].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[1].text(0.08, 0.95, 'PSR J0218$+$4232', ha='left', va='top',\n fontsize=20, transform=ax[1].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[0].tick_params(labelbottom=False)\n fig.text(0.04, 0.5, 'Photon Counts', ha='center', va='center', rotation\n ='vertical', fontsize=25)\n plt.subplots_adjust(hspace=0, bottom=0.08, top=0.94, right=0.98, left=0.15)\n fig.savefig('poster_plot.svg')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef main():\n lc1821 = LightCurve('PSR_B1821-24/PSR_B1821-24_combined.evt')\n lc0218 = LightCurve('PSR_J0218+4232/PSR_J0218+4232_combined.evt')\n fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate\n =False)\n ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False\n )\n ax[1].set_xlabel('Pulse Phase', fontsize=25)\n ax[0].text(0.08, 0.95, 'PSR B1821$-$24', ha='left', va='top', fontsize=\n 20, transform=ax[0].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[1].text(0.08, 0.95, 'PSR J0218$+$4232', ha='left', va='top',\n fontsize=20, transform=ax[1].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[0].tick_params(labelbottom=False)\n fig.text(0.04, 0.5, 'Photon Counts', ha='center', va='center', rotation\n ='vertical', fontsize=25)\n plt.subplots_adjust(hspace=0, bottom=0.08, top=0.94, right=0.98, left=0.15)\n fig.savefig('poster_plot.svg')\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "from LCClass import LightCurve\nimport matplotlib.pyplot as plt\nimport niutils\n\n\ndef main():\n lc1821 = LightCurve('PSR_B1821-24/PSR_B1821-24_combined.evt')\n lc0218 = LightCurve('PSR_J0218+4232/PSR_J0218+4232_combined.evt')\n fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate\n =False)\n ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False\n )\n ax[1].set_xlabel('Pulse Phase', fontsize=25)\n ax[0].text(0.08, 0.95, 'PSR B1821$-$24', ha='left', va='top', fontsize=\n 20, transform=ax[0].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[1].text(0.08, 0.95, 'PSR J0218$+$4232', ha='left', va='top',\n fontsize=20, transform=ax[1].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[0].tick_params(labelbottom=False)\n fig.text(0.04, 0.5, 'Photon Counts', ha='center', va='center', rotation\n ='vertical', fontsize=25)\n plt.subplots_adjust(hspace=0, bottom=0.08, top=0.94, right=0.98, left=0.15)\n fig.savefig('poster_plot.svg')\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#!/usr/bin/env python\n\nfrom LCClass import LightCurve\nimport matplotlib.pyplot as plt\nimport niutils\n\ndef main():\n lc1821 = LightCurve(\"PSR_B1821-24/PSR_B1821-24_combined.evt\")\n lc0218 = LightCurve(\"PSR_J0218+4232/PSR_J0218+4232_combined.evt\")\n\n fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n\n ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate=False)\n ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False)\n\n ax[1].set_xlabel(\"Pulse Phase\", fontsize=25)\n ax[0].text(.08, .95, r'PSR B1821$-$24', ha='left', va='top', \n fontsize=20, transform=ax[0].transAxes,\n bbox=dict(facecolor='white', edgecolor='none', alpha=0.6))\n ax[1].text(.08, .95, r'PSR J0218$+$4232', ha='left', va='top', \n fontsize=20, transform=ax[1].transAxes,\n bbox=dict(facecolor='white', edgecolor='none', alpha=0.6))\n\n ax[0].tick_params(labelbottom=False)\n #plt.setp(ax[0].get_yticklabels()[0], visible=False)\n \n fig.text(.04, .5, r'Photon Counts', ha='center', va='center',\n rotation='vertical', fontsize=25)\n\n plt.subplots_adjust(hspace=0, bottom=.08, top=.94, right=.98, left=.15)\n\n fig.savefig(\"poster_plot.svg\")\n\n\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> urlpatterns = [path('', views.home, name='park-home'), path('login/', views .login, name='park-login')] <|reserved_special_token_1|> from django.urls import path from . import views urlpatterns = [path('', views.home, name='park-home'), path('login/', views .login, name='park-login')] <|reserved_special_token_1|> from django.urls import path from . import views urlpatterns = [ path('', views.home, name ='park-home'), path('login/', views.login, name ='park-login'), ]
flexible
{ "blob_id": "2fd490ca54f5d038997cec59a3e07c3f2c2d2538", "index": 6757, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [path('', views.home, name='park-home'), path('login/', views\n .login, name='park-login')]\n", "step-3": "from django.urls import path\nfrom . import views\nurlpatterns = [path('', views.home, name='park-home'), path('login/', views\n .login, name='park-login')]\n", "step-4": "from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.home, name ='park-home'), \n path('login/', views.login, name ='park-login'), \n]", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class AttentionAgent(object): <|reserved_special_token_0|> def __init__(self, num_in_pol, num_out_pol, hidden_dim=64, lr=0.01, onehot_dim=0): """ Inputs: num_in_pol (int): number of dimensions for policy input num_out_pol (int): number of dimensions for policy output """ self.policy = DiscretePolicy(num_in_pol, num_out_pol, hidden_dim= hidden_dim, onehot_dim=onehot_dim) self.target_policy = DiscretePolicy(num_in_pol, num_out_pol, hidden_dim=hidden_dim, onehot_dim=onehot_dim) hard_update(self.target_policy, self.policy) self.policy_optimizer = Adam(self.policy.parameters(), lr=lr) <|reserved_special_token_0|> def get_params(self): return {'policy': self.policy.state_dict(), 'target_policy': self. target_policy.state_dict(), 'policy_optimizer': self. policy_optimizer.state_dict()} def load_params(self, params): self.policy.load_state_dict(params['policy']) self.target_policy.load_state_dict(params['target_policy']) self.policy_optimizer.load_state_dict(params['policy_optimizer']) <|reserved_special_token_1|> <|reserved_special_token_0|> class AttentionAgent(object): <|reserved_special_token_0|> def __init__(self, num_in_pol, num_out_pol, hidden_dim=64, lr=0.01, onehot_dim=0): """ Inputs: num_in_pol (int): number of dimensions for policy input num_out_pol (int): number of dimensions for policy output """ self.policy = DiscretePolicy(num_in_pol, num_out_pol, hidden_dim= hidden_dim, onehot_dim=onehot_dim) self.target_policy = DiscretePolicy(num_in_pol, num_out_pol, hidden_dim=hidden_dim, onehot_dim=onehot_dim) hard_update(self.target_policy, self.policy) self.policy_optimizer = Adam(self.policy.parameters(), lr=lr) def step(self, obs, explore=False): """ Take a step forward in environment for a minibatch of observations Inputs: obs (PyTorch Variable): Observations for this agent explore (boolean): Whether or not to sample Outputs: action (PyTorch Variable): Actions for this agent """ return self.policy(obs, sample=explore) def get_params(self): return {'policy': self.policy.state_dict(), 'target_policy': self. target_policy.state_dict(), 'policy_optimizer': self. policy_optimizer.state_dict()} def load_params(self, params): self.policy.load_state_dict(params['policy']) self.target_policy.load_state_dict(params['target_policy']) self.policy_optimizer.load_state_dict(params['policy_optimizer']) <|reserved_special_token_1|> <|reserved_special_token_0|> class AttentionAgent(object): """ General class for Attention agents (policy, target policy) """ def __init__(self, num_in_pol, num_out_pol, hidden_dim=64, lr=0.01, onehot_dim=0): """ Inputs: num_in_pol (int): number of dimensions for policy input num_out_pol (int): number of dimensions for policy output """ self.policy = DiscretePolicy(num_in_pol, num_out_pol, hidden_dim= hidden_dim, onehot_dim=onehot_dim) self.target_policy = DiscretePolicy(num_in_pol, num_out_pol, hidden_dim=hidden_dim, onehot_dim=onehot_dim) hard_update(self.target_policy, self.policy) self.policy_optimizer = Adam(self.policy.parameters(), lr=lr) def step(self, obs, explore=False): """ Take a step forward in environment for a minibatch of observations Inputs: obs (PyTorch Variable): Observations for this agent explore (boolean): Whether or not to sample Outputs: action (PyTorch Variable): Actions for this agent """ return self.policy(obs, sample=explore) def get_params(self): return {'policy': self.policy.state_dict(), 'target_policy': self. target_policy.state_dict(), 'policy_optimizer': self. policy_optimizer.state_dict()} def load_params(self, params): self.policy.load_state_dict(params['policy']) self.target_policy.load_state_dict(params['target_policy']) self.policy_optimizer.load_state_dict(params['policy_optimizer']) <|reserved_special_token_1|> from torch import Tensor from torch.autograd import Variable from torch.optim import Adam from maac.utils.misc import hard_update, onehot_from_logits from maac.utils.policies import DiscretePolicy class AttentionAgent(object): """ General class for Attention agents (policy, target policy) """ def __init__(self, num_in_pol, num_out_pol, hidden_dim=64, lr=0.01, onehot_dim=0): """ Inputs: num_in_pol (int): number of dimensions for policy input num_out_pol (int): number of dimensions for policy output """ self.policy = DiscretePolicy(num_in_pol, num_out_pol, hidden_dim= hidden_dim, onehot_dim=onehot_dim) self.target_policy = DiscretePolicy(num_in_pol, num_out_pol, hidden_dim=hidden_dim, onehot_dim=onehot_dim) hard_update(self.target_policy, self.policy) self.policy_optimizer = Adam(self.policy.parameters(), lr=lr) def step(self, obs, explore=False): """ Take a step forward in environment for a minibatch of observations Inputs: obs (PyTorch Variable): Observations for this agent explore (boolean): Whether or not to sample Outputs: action (PyTorch Variable): Actions for this agent """ return self.policy(obs, sample=explore) def get_params(self): return {'policy': self.policy.state_dict(), 'target_policy': self. target_policy.state_dict(), 'policy_optimizer': self. policy_optimizer.state_dict()} def load_params(self, params): self.policy.load_state_dict(params['policy']) self.target_policy.load_state_dict(params['target_policy']) self.policy_optimizer.load_state_dict(params['policy_optimizer'])
flexible
{ "blob_id": "845d04312abc0e64a7810b52bbee333d2bdf3dfb", "index": 7164, "step-1": "<mask token>\n\n\nclass AttentionAgent(object):\n <mask token>\n\n def __init__(self, num_in_pol, num_out_pol, hidden_dim=64, lr=0.01,\n onehot_dim=0):\n \"\"\"\n Inputs:\n num_in_pol (int): number of dimensions for policy input\n num_out_pol (int): number of dimensions for policy output\n \"\"\"\n self.policy = DiscretePolicy(num_in_pol, num_out_pol, hidden_dim=\n hidden_dim, onehot_dim=onehot_dim)\n self.target_policy = DiscretePolicy(num_in_pol, num_out_pol,\n hidden_dim=hidden_dim, onehot_dim=onehot_dim)\n hard_update(self.target_policy, self.policy)\n self.policy_optimizer = Adam(self.policy.parameters(), lr=lr)\n <mask token>\n\n def get_params(self):\n return {'policy': self.policy.state_dict(), 'target_policy': self.\n target_policy.state_dict(), 'policy_optimizer': self.\n policy_optimizer.state_dict()}\n\n def load_params(self, params):\n self.policy.load_state_dict(params['policy'])\n self.target_policy.load_state_dict(params['target_policy'])\n self.policy_optimizer.load_state_dict(params['policy_optimizer'])\n", "step-2": "<mask token>\n\n\nclass AttentionAgent(object):\n <mask token>\n\n def __init__(self, num_in_pol, num_out_pol, hidden_dim=64, lr=0.01,\n onehot_dim=0):\n \"\"\"\n Inputs:\n num_in_pol (int): number of dimensions for policy input\n num_out_pol (int): number of dimensions for policy output\n \"\"\"\n self.policy = DiscretePolicy(num_in_pol, num_out_pol, hidden_dim=\n hidden_dim, onehot_dim=onehot_dim)\n self.target_policy = DiscretePolicy(num_in_pol, num_out_pol,\n hidden_dim=hidden_dim, onehot_dim=onehot_dim)\n hard_update(self.target_policy, self.policy)\n self.policy_optimizer = Adam(self.policy.parameters(), lr=lr)\n\n def step(self, obs, explore=False):\n \"\"\"\n Take a step forward in environment for a minibatch of observations\n Inputs:\n obs (PyTorch Variable): Observations for this agent\n explore (boolean): Whether or not to sample\n Outputs:\n action (PyTorch Variable): Actions for this agent\n \"\"\"\n return self.policy(obs, sample=explore)\n\n def get_params(self):\n return {'policy': self.policy.state_dict(), 'target_policy': self.\n target_policy.state_dict(), 'policy_optimizer': self.\n policy_optimizer.state_dict()}\n\n def load_params(self, params):\n self.policy.load_state_dict(params['policy'])\n self.target_policy.load_state_dict(params['target_policy'])\n self.policy_optimizer.load_state_dict(params['policy_optimizer'])\n", "step-3": "<mask token>\n\n\nclass AttentionAgent(object):\n \"\"\"\n General class for Attention agents (policy, target policy)\n \"\"\"\n\n def __init__(self, num_in_pol, num_out_pol, hidden_dim=64, lr=0.01,\n onehot_dim=0):\n \"\"\"\n Inputs:\n num_in_pol (int): number of dimensions for policy input\n num_out_pol (int): number of dimensions for policy output\n \"\"\"\n self.policy = DiscretePolicy(num_in_pol, num_out_pol, hidden_dim=\n hidden_dim, onehot_dim=onehot_dim)\n self.target_policy = DiscretePolicy(num_in_pol, num_out_pol,\n hidden_dim=hidden_dim, onehot_dim=onehot_dim)\n hard_update(self.target_policy, self.policy)\n self.policy_optimizer = Adam(self.policy.parameters(), lr=lr)\n\n def step(self, obs, explore=False):\n \"\"\"\n Take a step forward in environment for a minibatch of observations\n Inputs:\n obs (PyTorch Variable): Observations for this agent\n explore (boolean): Whether or not to sample\n Outputs:\n action (PyTorch Variable): Actions for this agent\n \"\"\"\n return self.policy(obs, sample=explore)\n\n def get_params(self):\n return {'policy': self.policy.state_dict(), 'target_policy': self.\n target_policy.state_dict(), 'policy_optimizer': self.\n policy_optimizer.state_dict()}\n\n def load_params(self, params):\n self.policy.load_state_dict(params['policy'])\n self.target_policy.load_state_dict(params['target_policy'])\n self.policy_optimizer.load_state_dict(params['policy_optimizer'])\n", "step-4": "from torch import Tensor\nfrom torch.autograd import Variable\nfrom torch.optim import Adam\nfrom maac.utils.misc import hard_update, onehot_from_logits\nfrom maac.utils.policies import DiscretePolicy\n\n\nclass AttentionAgent(object):\n \"\"\"\n General class for Attention agents (policy, target policy)\n \"\"\"\n\n def __init__(self, num_in_pol, num_out_pol, hidden_dim=64, lr=0.01,\n onehot_dim=0):\n \"\"\"\n Inputs:\n num_in_pol (int): number of dimensions for policy input\n num_out_pol (int): number of dimensions for policy output\n \"\"\"\n self.policy = DiscretePolicy(num_in_pol, num_out_pol, hidden_dim=\n hidden_dim, onehot_dim=onehot_dim)\n self.target_policy = DiscretePolicy(num_in_pol, num_out_pol,\n hidden_dim=hidden_dim, onehot_dim=onehot_dim)\n hard_update(self.target_policy, self.policy)\n self.policy_optimizer = Adam(self.policy.parameters(), lr=lr)\n\n def step(self, obs, explore=False):\n \"\"\"\n Take a step forward in environment for a minibatch of observations\n Inputs:\n obs (PyTorch Variable): Observations for this agent\n explore (boolean): Whether or not to sample\n Outputs:\n action (PyTorch Variable): Actions for this agent\n \"\"\"\n return self.policy(obs, sample=explore)\n\n def get_params(self):\n return {'policy': self.policy.state_dict(), 'target_policy': self.\n target_policy.state_dict(), 'policy_optimizer': self.\n policy_optimizer.state_dict()}\n\n def load_params(self, params):\n self.policy.load_state_dict(params['policy'])\n self.target_policy.load_state_dict(params['target_policy'])\n self.policy_optimizer.load_state_dict(params['policy_optimizer'])\n", "step-5": null, "step-ids": [ 4, 5, 6, 7 ] }
[ 4, 5, 6, 7 ]
/home/sbm367/anaconda3/lib/python3.5/types.py
normal
{ "blob_id": "720d37e35eb335cc68ff27763cfe5c52f76b98d2", "index": 5781, "step-1": "/home/sbm367/anaconda3/lib/python3.5/types.py", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
try: alp="ABCDEFGHIJKLMNOPQRSTUVWXYZ" idx=eval(input("请输入一个整数")) print(alp[idx]) except NameError: print("输入错误,请输入一个整数") except: print("其他错误") else: print("没有发生错误") finally: print("程序执行完毕,不知道是否发生了异常")
normal
{ "blob_id": "99a6b450792d434e18b8f9ff350c72abe5366d95", "index": 153, "step-1": "<mask token>\n", "step-2": "try:\n alp = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n idx = eval(input('请输入一个整数'))\n print(alp[idx])\nexcept NameError:\n print('输入错误,请输入一个整数')\nexcept:\n print('其他错误')\nelse:\n print('没有发生错误')\nfinally:\n print('程序执行完毕,不知道是否发生了异常')\n", "step-3": "try:\r\n alp=\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n idx=eval(input(\"请输入一个整数\"))\r\n print(alp[idx])\r\n\r\nexcept NameError:\r\n print(\"输入错误,请输入一个整数\")\r\n\r\nexcept:\r\n print(\"其他错误\")\r\n\r\nelse:\r\n print(\"没有发生错误\")\r\n\r\nfinally:\r\n print(\"程序执行完毕,不知道是否发生了异常\")\r\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import errno import os import shutil from calendar import monthrange from datetime import datetime, timedelta from pavilion import output from pavilion import commands from pavilion.status_file import STATES from pavilion.test_run import TestRun, TestRunError, TestRunNotFoundError class CleanCommand(commands.Command): """Cleans outdated test and series run directories.""" def __init__(self): super().__init__( 'clean', 'Clean up Pavilion working directory.', short_help="Clean up Pavilion working diretory." ) def _setup_arguments(self, parser): parser.add_argument( '-v', '--verbose', action='store_true', default=False, help='Verbose output.' ) parser.add_argument( '--older-than', nargs='+', action='store', help='Set the max age of files to be removed. Can be a date ex:' '"Jan 1 2019" or , or a number of days/weeks ex:"32 weeks"' ) def run(self, pav_cfg, args): """Run this command.""" if args.older_than: if 'day' in args.older_than or 'days' in args.older_than: cutoff_date = datetime.today() - timedelta( days=int(args.older_than[0])) elif 'week' in args.older_than or 'weeks' in args.older_than: cutoff_date = datetime.today() - timedelta( weeks=int(args.older_than[0])) elif 'month' in args.older_than or 'months' in args.older_than: cutoff_date = get_month_delta(int(args.older_than[0])) else: date = ' '.join(args.older_than) try: cutoff_date = datetime.strptime(date, '%b %d %Y') except (TypeError, ValueError): output.fprint("{} is not a valid date." .format(args.older_than), file=self.errfile, color=output.RED) return errno.EINVAL # No cutoff specified, removes everything. else: cutoff_date = datetime.today() tests_dir = pav_cfg.working_dir / 'test_runs' series_dir = pav_cfg.working_dir / 'series' download_dir = pav_cfg.working_dir / 'downloads' build_dir = pav_cfg.working_dir / 'builds' dependent_builds = [] incomplete_tests = [] # Clean Tests output.fprint("Removing Tests...", file=self.outfile, color=output.GREEN) for test in os.listdir(tests_dir.as_posix()): test_time = datetime.fromtimestamp( os.path.getmtime((tests_dir / test).as_posix())) try: test_obj = TestRun.load(pav_cfg, int(test)) status = test_obj.status.current().state except (TestRunError, TestRunNotFoundError): output.fprint("Removing bad test directory {}".format(test), file=self.outfile) shutil.rmtree(tests_dir.as_posix()) continue except PermissionError as err: err = str(err).split("'") output.fprint("Permission Error: {} cannot be removed" .format(err[1]), file=self.errfile, color=31) if test_time < cutoff_date and status != STATES.RUNNING \ and status != STATES.SCHEDULED: shutil.rmtree((tests_dir / test).as_posix()) if args.verbose: output.fprint("Removed test {}".format(test), file=self.outfile) else: if args.verbose: output.fprint("Skipped test {}".format(test), file=self.outfile) incomplete_tests.append(test) dependent_builds.append(test_obj.build_name) # Clean Series completed_series = True output.fprint("Removing Series...", file=self.outfile, color=output.GREEN) for series in os.listdir(series_dir.as_posix()): try: series_time = datetime.fromtimestamp( os.path.getmtime((series_dir / series).as_posix())) for test in incomplete_tests: if os.path.exists((series_dir / series / test).as_posix()): completed_series = False if series_time < cutoff_date and completed_series: shutil.rmtree((series_dir / series).as_posix()) if args.verbose: output.fprint("Removed series {}".format(series), file=self.outfile) else: if args.verbose: output.fprint("Skipped series {}".format(series), file=self.outfile) except PermissionError as err: err = str(err).split("'") output.fprint("Permission Error: {} cannot be removed" .format(err[1]), file=self.errfile, color=31) # Clean Downloads output.fprint("Removing Downloads...", file=self.outfile, color=output.GREEN) for download in os.listdir(download_dir.as_posix()): try: download_time = datetime.fromtimestamp( os.path.getmtime((download_dir / download).as_posix())) if download_time < cutoff_date: try: shutil.rmtree((download_dir / download).as_posix()) except NotADirectoryError: output.fprint("{} is not a directory.".format(download), file=self.errfile, color=output.RED) os.remove((download_dir / download).as_posix()) if args.verbose: output.fprint("Removed download {}".format(download), file=self.outfile) else: if args.verbose: output.fprint("Skipped download {}".format(download), file=self.outfile) except PermissionError as err: err = str(err).split("'") output.fprint("Permission Error: {} cannot be removed" .format(err[1]), file=self.errfile, color=31) # Clean Builds output.fprint("Removing Builds...", file=self.outfile, color=output.GREEN) for build in os.listdir(build_dir.as_posix()): try: build_time = datetime.fromtimestamp( os.path.getmtime((build_dir / build).as_posix())) if build_time < cutoff_date and build not in dependent_builds: shutil.rmtree((build_dir / build).as_posix()) if args.verbose: output.fprint("Removed build {}".format(build), file=self.outfile) else: if args.verbose: output.fprint("Skipped build {}".format(build), file=self.outfile) except PermissionError as err: err = str(err).split("'") output.fprint("Permission Error: {} cannot be removed. " .format(err[1]), file=self.errfile, color=31) return 0 def get_month_delta(months): """Turn a number of months in the future into a concrete date.""" today = datetime.today() cur_year = today.year cur_day = today.day cur_month = today.month cur_time = today.time if cur_month - months <= 0: cut_month = (cur_month - months) % 12 diff_years = (cur_month - months) // 12 cut_year = cur_year + diff_years else: cut_month = cur_month - months cut_year = cur_year try: cutoff_date = datetime(cut_year, cut_month, cur_day, cur_time) except ValueError: last_day = monthrange(cut_year, cut_month)[1] cutoff_date = datetime(cut_year, cut_month, last_day, cur_time) return cutoff_date
normal
{ "blob_id": "18aafb71d7e6f5caa2f282126c31eb052c08ad3c", "index": 4307, "step-1": "<mask token>\n\n\nclass CleanCommand(commands.Command):\n <mask token>\n\n def __init__(self):\n super().__init__('clean', 'Clean up Pavilion working directory.',\n short_help='Clean up Pavilion working diretory.')\n\n def _setup_arguments(self, parser):\n parser.add_argument('-v', '--verbose', action='store_true', default\n =False, help='Verbose output.')\n parser.add_argument('--older-than', nargs='+', action='store', help\n =\n 'Set the max age of files to be removed. Can be a date ex:\"Jan 1 2019\" or , or a number of days/weeks ex:\"32 weeks\"'\n )\n\n def run(self, pav_cfg, args):\n \"\"\"Run this command.\"\"\"\n if args.older_than:\n if 'day' in args.older_than or 'days' in args.older_than:\n cutoff_date = datetime.today() - timedelta(days=int(args.\n older_than[0]))\n elif 'week' in args.older_than or 'weeks' in args.older_than:\n cutoff_date = datetime.today() - timedelta(weeks=int(args.\n older_than[0]))\n elif 'month' in args.older_than or 'months' in args.older_than:\n cutoff_date = get_month_delta(int(args.older_than[0]))\n else:\n date = ' '.join(args.older_than)\n try:\n cutoff_date = datetime.strptime(date, '%b %d %Y')\n except (TypeError, ValueError):\n output.fprint('{} is not a valid date.'.format(args.\n older_than), file=self.errfile, color=output.RED)\n return errno.EINVAL\n else:\n cutoff_date = datetime.today()\n tests_dir = pav_cfg.working_dir / 'test_runs'\n series_dir = pav_cfg.working_dir / 'series'\n download_dir = pav_cfg.working_dir / 'downloads'\n build_dir = pav_cfg.working_dir / 'builds'\n dependent_builds = []\n incomplete_tests = []\n output.fprint('Removing Tests...', file=self.outfile, color=output.\n GREEN)\n for test in os.listdir(tests_dir.as_posix()):\n test_time = datetime.fromtimestamp(os.path.getmtime((tests_dir /\n test).as_posix()))\n try:\n test_obj = TestRun.load(pav_cfg, int(test))\n status = test_obj.status.current().state\n except (TestRunError, TestRunNotFoundError):\n output.fprint('Removing bad test directory {}'.format(test),\n file=self.outfile)\n shutil.rmtree(tests_dir.as_posix())\n continue\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n if (test_time < cutoff_date and status != STATES.RUNNING and \n status != STATES.SCHEDULED):\n shutil.rmtree((tests_dir / test).as_posix())\n if args.verbose:\n output.fprint('Removed test {}'.format(test), file=self\n .outfile)\n else:\n if args.verbose:\n output.fprint('Skipped test {}'.format(test), file=self\n .outfile)\n incomplete_tests.append(test)\n dependent_builds.append(test_obj.build_name)\n completed_series = True\n output.fprint('Removing Series...', file=self.outfile, color=output\n .GREEN)\n for series in os.listdir(series_dir.as_posix()):\n try:\n series_time = datetime.fromtimestamp(os.path.getmtime((\n series_dir / series).as_posix()))\n for test in incomplete_tests:\n if os.path.exists((series_dir / series / test).as_posix()):\n completed_series = False\n if series_time < cutoff_date and completed_series:\n shutil.rmtree((series_dir / series).as_posix())\n if args.verbose:\n output.fprint('Removed series {}'.format(series),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped series {}'.format(series), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Downloads...', file=self.outfile, color=\n output.GREEN)\n for download in os.listdir(download_dir.as_posix()):\n try:\n download_time = datetime.fromtimestamp(os.path.getmtime((\n download_dir / download).as_posix()))\n if download_time < cutoff_date:\n try:\n shutil.rmtree((download_dir / download).as_posix())\n except NotADirectoryError:\n output.fprint('{} is not a directory.'.format(\n download), file=self.errfile, color=output.RED)\n os.remove((download_dir / download).as_posix())\n if args.verbose:\n output.fprint('Removed download {}'.format(download\n ), file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped download {}'.format(download),\n file=self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Builds...', file=self.outfile, color=output\n .GREEN)\n for build in os.listdir(build_dir.as_posix()):\n try:\n build_time = datetime.fromtimestamp(os.path.getmtime((\n build_dir / build).as_posix()))\n if build_time < cutoff_date and build not in dependent_builds:\n shutil.rmtree((build_dir / build).as_posix())\n if args.verbose:\n output.fprint('Removed build {}'.format(build),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped build {}'.format(build), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed. '.\n format(err[1]), file=self.errfile, color=31)\n return 0\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass CleanCommand(commands.Command):\n \"\"\"Cleans outdated test and series run directories.\"\"\"\n\n def __init__(self):\n super().__init__('clean', 'Clean up Pavilion working directory.',\n short_help='Clean up Pavilion working diretory.')\n\n def _setup_arguments(self, parser):\n parser.add_argument('-v', '--verbose', action='store_true', default\n =False, help='Verbose output.')\n parser.add_argument('--older-than', nargs='+', action='store', help\n =\n 'Set the max age of files to be removed. Can be a date ex:\"Jan 1 2019\" or , or a number of days/weeks ex:\"32 weeks\"'\n )\n\n def run(self, pav_cfg, args):\n \"\"\"Run this command.\"\"\"\n if args.older_than:\n if 'day' in args.older_than or 'days' in args.older_than:\n cutoff_date = datetime.today() - timedelta(days=int(args.\n older_than[0]))\n elif 'week' in args.older_than or 'weeks' in args.older_than:\n cutoff_date = datetime.today() - timedelta(weeks=int(args.\n older_than[0]))\n elif 'month' in args.older_than or 'months' in args.older_than:\n cutoff_date = get_month_delta(int(args.older_than[0]))\n else:\n date = ' '.join(args.older_than)\n try:\n cutoff_date = datetime.strptime(date, '%b %d %Y')\n except (TypeError, ValueError):\n output.fprint('{} is not a valid date.'.format(args.\n older_than), file=self.errfile, color=output.RED)\n return errno.EINVAL\n else:\n cutoff_date = datetime.today()\n tests_dir = pav_cfg.working_dir / 'test_runs'\n series_dir = pav_cfg.working_dir / 'series'\n download_dir = pav_cfg.working_dir / 'downloads'\n build_dir = pav_cfg.working_dir / 'builds'\n dependent_builds = []\n incomplete_tests = []\n output.fprint('Removing Tests...', file=self.outfile, color=output.\n GREEN)\n for test in os.listdir(tests_dir.as_posix()):\n test_time = datetime.fromtimestamp(os.path.getmtime((tests_dir /\n test).as_posix()))\n try:\n test_obj = TestRun.load(pav_cfg, int(test))\n status = test_obj.status.current().state\n except (TestRunError, TestRunNotFoundError):\n output.fprint('Removing bad test directory {}'.format(test),\n file=self.outfile)\n shutil.rmtree(tests_dir.as_posix())\n continue\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n if (test_time < cutoff_date and status != STATES.RUNNING and \n status != STATES.SCHEDULED):\n shutil.rmtree((tests_dir / test).as_posix())\n if args.verbose:\n output.fprint('Removed test {}'.format(test), file=self\n .outfile)\n else:\n if args.verbose:\n output.fprint('Skipped test {}'.format(test), file=self\n .outfile)\n incomplete_tests.append(test)\n dependent_builds.append(test_obj.build_name)\n completed_series = True\n output.fprint('Removing Series...', file=self.outfile, color=output\n .GREEN)\n for series in os.listdir(series_dir.as_posix()):\n try:\n series_time = datetime.fromtimestamp(os.path.getmtime((\n series_dir / series).as_posix()))\n for test in incomplete_tests:\n if os.path.exists((series_dir / series / test).as_posix()):\n completed_series = False\n if series_time < cutoff_date and completed_series:\n shutil.rmtree((series_dir / series).as_posix())\n if args.verbose:\n output.fprint('Removed series {}'.format(series),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped series {}'.format(series), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Downloads...', file=self.outfile, color=\n output.GREEN)\n for download in os.listdir(download_dir.as_posix()):\n try:\n download_time = datetime.fromtimestamp(os.path.getmtime((\n download_dir / download).as_posix()))\n if download_time < cutoff_date:\n try:\n shutil.rmtree((download_dir / download).as_posix())\n except NotADirectoryError:\n output.fprint('{} is not a directory.'.format(\n download), file=self.errfile, color=output.RED)\n os.remove((download_dir / download).as_posix())\n if args.verbose:\n output.fprint('Removed download {}'.format(download\n ), file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped download {}'.format(download),\n file=self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Builds...', file=self.outfile, color=output\n .GREEN)\n for build in os.listdir(build_dir.as_posix()):\n try:\n build_time = datetime.fromtimestamp(os.path.getmtime((\n build_dir / build).as_posix()))\n if build_time < cutoff_date and build not in dependent_builds:\n shutil.rmtree((build_dir / build).as_posix())\n if args.verbose:\n output.fprint('Removed build {}'.format(build),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped build {}'.format(build), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed. '.\n format(err[1]), file=self.errfile, color=31)\n return 0\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass CleanCommand(commands.Command):\n \"\"\"Cleans outdated test and series run directories.\"\"\"\n\n def __init__(self):\n super().__init__('clean', 'Clean up Pavilion working directory.',\n short_help='Clean up Pavilion working diretory.')\n\n def _setup_arguments(self, parser):\n parser.add_argument('-v', '--verbose', action='store_true', default\n =False, help='Verbose output.')\n parser.add_argument('--older-than', nargs='+', action='store', help\n =\n 'Set the max age of files to be removed. Can be a date ex:\"Jan 1 2019\" or , or a number of days/weeks ex:\"32 weeks\"'\n )\n\n def run(self, pav_cfg, args):\n \"\"\"Run this command.\"\"\"\n if args.older_than:\n if 'day' in args.older_than or 'days' in args.older_than:\n cutoff_date = datetime.today() - timedelta(days=int(args.\n older_than[0]))\n elif 'week' in args.older_than or 'weeks' in args.older_than:\n cutoff_date = datetime.today() - timedelta(weeks=int(args.\n older_than[0]))\n elif 'month' in args.older_than or 'months' in args.older_than:\n cutoff_date = get_month_delta(int(args.older_than[0]))\n else:\n date = ' '.join(args.older_than)\n try:\n cutoff_date = datetime.strptime(date, '%b %d %Y')\n except (TypeError, ValueError):\n output.fprint('{} is not a valid date.'.format(args.\n older_than), file=self.errfile, color=output.RED)\n return errno.EINVAL\n else:\n cutoff_date = datetime.today()\n tests_dir = pav_cfg.working_dir / 'test_runs'\n series_dir = pav_cfg.working_dir / 'series'\n download_dir = pav_cfg.working_dir / 'downloads'\n build_dir = pav_cfg.working_dir / 'builds'\n dependent_builds = []\n incomplete_tests = []\n output.fprint('Removing Tests...', file=self.outfile, color=output.\n GREEN)\n for test in os.listdir(tests_dir.as_posix()):\n test_time = datetime.fromtimestamp(os.path.getmtime((tests_dir /\n test).as_posix()))\n try:\n test_obj = TestRun.load(pav_cfg, int(test))\n status = test_obj.status.current().state\n except (TestRunError, TestRunNotFoundError):\n output.fprint('Removing bad test directory {}'.format(test),\n file=self.outfile)\n shutil.rmtree(tests_dir.as_posix())\n continue\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n if (test_time < cutoff_date and status != STATES.RUNNING and \n status != STATES.SCHEDULED):\n shutil.rmtree((tests_dir / test).as_posix())\n if args.verbose:\n output.fprint('Removed test {}'.format(test), file=self\n .outfile)\n else:\n if args.verbose:\n output.fprint('Skipped test {}'.format(test), file=self\n .outfile)\n incomplete_tests.append(test)\n dependent_builds.append(test_obj.build_name)\n completed_series = True\n output.fprint('Removing Series...', file=self.outfile, color=output\n .GREEN)\n for series in os.listdir(series_dir.as_posix()):\n try:\n series_time = datetime.fromtimestamp(os.path.getmtime((\n series_dir / series).as_posix()))\n for test in incomplete_tests:\n if os.path.exists((series_dir / series / test).as_posix()):\n completed_series = False\n if series_time < cutoff_date and completed_series:\n shutil.rmtree((series_dir / series).as_posix())\n if args.verbose:\n output.fprint('Removed series {}'.format(series),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped series {}'.format(series), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Downloads...', file=self.outfile, color=\n output.GREEN)\n for download in os.listdir(download_dir.as_posix()):\n try:\n download_time = datetime.fromtimestamp(os.path.getmtime((\n download_dir / download).as_posix()))\n if download_time < cutoff_date:\n try:\n shutil.rmtree((download_dir / download).as_posix())\n except NotADirectoryError:\n output.fprint('{} is not a directory.'.format(\n download), file=self.errfile, color=output.RED)\n os.remove((download_dir / download).as_posix())\n if args.verbose:\n output.fprint('Removed download {}'.format(download\n ), file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped download {}'.format(download),\n file=self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Builds...', file=self.outfile, color=output\n .GREEN)\n for build in os.listdir(build_dir.as_posix()):\n try:\n build_time = datetime.fromtimestamp(os.path.getmtime((\n build_dir / build).as_posix()))\n if build_time < cutoff_date and build not in dependent_builds:\n shutil.rmtree((build_dir / build).as_posix())\n if args.verbose:\n output.fprint('Removed build {}'.format(build),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped build {}'.format(build), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed. '.\n format(err[1]), file=self.errfile, color=31)\n return 0\n\n\ndef get_month_delta(months):\n \"\"\"Turn a number of months in the future into a concrete date.\"\"\"\n today = datetime.today()\n cur_year = today.year\n cur_day = today.day\n cur_month = today.month\n cur_time = today.time\n if cur_month - months <= 0:\n cut_month = (cur_month - months) % 12\n diff_years = (cur_month - months) // 12\n cut_year = cur_year + diff_years\n else:\n cut_month = cur_month - months\n cut_year = cur_year\n try:\n cutoff_date = datetime(cut_year, cut_month, cur_day, cur_time)\n except ValueError:\n last_day = monthrange(cut_year, cut_month)[1]\n cutoff_date = datetime(cut_year, cut_month, last_day, cur_time)\n return cutoff_date\n", "step-4": "import errno\nimport os\nimport shutil\nfrom calendar import monthrange\nfrom datetime import datetime, timedelta\nfrom pavilion import output\nfrom pavilion import commands\nfrom pavilion.status_file import STATES\nfrom pavilion.test_run import TestRun, TestRunError, TestRunNotFoundError\n\n\nclass CleanCommand(commands.Command):\n \"\"\"Cleans outdated test and series run directories.\"\"\"\n\n def __init__(self):\n super().__init__('clean', 'Clean up Pavilion working directory.',\n short_help='Clean up Pavilion working diretory.')\n\n def _setup_arguments(self, parser):\n parser.add_argument('-v', '--verbose', action='store_true', default\n =False, help='Verbose output.')\n parser.add_argument('--older-than', nargs='+', action='store', help\n =\n 'Set the max age of files to be removed. Can be a date ex:\"Jan 1 2019\" or , or a number of days/weeks ex:\"32 weeks\"'\n )\n\n def run(self, pav_cfg, args):\n \"\"\"Run this command.\"\"\"\n if args.older_than:\n if 'day' in args.older_than or 'days' in args.older_than:\n cutoff_date = datetime.today() - timedelta(days=int(args.\n older_than[0]))\n elif 'week' in args.older_than or 'weeks' in args.older_than:\n cutoff_date = datetime.today() - timedelta(weeks=int(args.\n older_than[0]))\n elif 'month' in args.older_than or 'months' in args.older_than:\n cutoff_date = get_month_delta(int(args.older_than[0]))\n else:\n date = ' '.join(args.older_than)\n try:\n cutoff_date = datetime.strptime(date, '%b %d %Y')\n except (TypeError, ValueError):\n output.fprint('{} is not a valid date.'.format(args.\n older_than), file=self.errfile, color=output.RED)\n return errno.EINVAL\n else:\n cutoff_date = datetime.today()\n tests_dir = pav_cfg.working_dir / 'test_runs'\n series_dir = pav_cfg.working_dir / 'series'\n download_dir = pav_cfg.working_dir / 'downloads'\n build_dir = pav_cfg.working_dir / 'builds'\n dependent_builds = []\n incomplete_tests = []\n output.fprint('Removing Tests...', file=self.outfile, color=output.\n GREEN)\n for test in os.listdir(tests_dir.as_posix()):\n test_time = datetime.fromtimestamp(os.path.getmtime((tests_dir /\n test).as_posix()))\n try:\n test_obj = TestRun.load(pav_cfg, int(test))\n status = test_obj.status.current().state\n except (TestRunError, TestRunNotFoundError):\n output.fprint('Removing bad test directory {}'.format(test),\n file=self.outfile)\n shutil.rmtree(tests_dir.as_posix())\n continue\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n if (test_time < cutoff_date and status != STATES.RUNNING and \n status != STATES.SCHEDULED):\n shutil.rmtree((tests_dir / test).as_posix())\n if args.verbose:\n output.fprint('Removed test {}'.format(test), file=self\n .outfile)\n else:\n if args.verbose:\n output.fprint('Skipped test {}'.format(test), file=self\n .outfile)\n incomplete_tests.append(test)\n dependent_builds.append(test_obj.build_name)\n completed_series = True\n output.fprint('Removing Series...', file=self.outfile, color=output\n .GREEN)\n for series in os.listdir(series_dir.as_posix()):\n try:\n series_time = datetime.fromtimestamp(os.path.getmtime((\n series_dir / series).as_posix()))\n for test in incomplete_tests:\n if os.path.exists((series_dir / series / test).as_posix()):\n completed_series = False\n if series_time < cutoff_date and completed_series:\n shutil.rmtree((series_dir / series).as_posix())\n if args.verbose:\n output.fprint('Removed series {}'.format(series),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped series {}'.format(series), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Downloads...', file=self.outfile, color=\n output.GREEN)\n for download in os.listdir(download_dir.as_posix()):\n try:\n download_time = datetime.fromtimestamp(os.path.getmtime((\n download_dir / download).as_posix()))\n if download_time < cutoff_date:\n try:\n shutil.rmtree((download_dir / download).as_posix())\n except NotADirectoryError:\n output.fprint('{} is not a directory.'.format(\n download), file=self.errfile, color=output.RED)\n os.remove((download_dir / download).as_posix())\n if args.verbose:\n output.fprint('Removed download {}'.format(download\n ), file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped download {}'.format(download),\n file=self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed'.\n format(err[1]), file=self.errfile, color=31)\n output.fprint('Removing Builds...', file=self.outfile, color=output\n .GREEN)\n for build in os.listdir(build_dir.as_posix()):\n try:\n build_time = datetime.fromtimestamp(os.path.getmtime((\n build_dir / build).as_posix()))\n if build_time < cutoff_date and build not in dependent_builds:\n shutil.rmtree((build_dir / build).as_posix())\n if args.verbose:\n output.fprint('Removed build {}'.format(build),\n file=self.outfile)\n elif args.verbose:\n output.fprint('Skipped build {}'.format(build), file=\n self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint('Permission Error: {} cannot be removed. '.\n format(err[1]), file=self.errfile, color=31)\n return 0\n\n\ndef get_month_delta(months):\n \"\"\"Turn a number of months in the future into a concrete date.\"\"\"\n today = datetime.today()\n cur_year = today.year\n cur_day = today.day\n cur_month = today.month\n cur_time = today.time\n if cur_month - months <= 0:\n cut_month = (cur_month - months) % 12\n diff_years = (cur_month - months) // 12\n cut_year = cur_year + diff_years\n else:\n cut_month = cur_month - months\n cut_year = cur_year\n try:\n cutoff_date = datetime(cut_year, cut_month, cur_day, cur_time)\n except ValueError:\n last_day = monthrange(cut_year, cut_month)[1]\n cutoff_date = datetime(cut_year, cut_month, last_day, cur_time)\n return cutoff_date\n", "step-5": "import errno\nimport os\nimport shutil\nfrom calendar import monthrange\nfrom datetime import datetime, timedelta\n\nfrom pavilion import output\nfrom pavilion import commands\nfrom pavilion.status_file import STATES\nfrom pavilion.test_run import TestRun, TestRunError, TestRunNotFoundError\n\n\nclass CleanCommand(commands.Command):\n \"\"\"Cleans outdated test and series run directories.\"\"\"\n\n def __init__(self):\n super().__init__(\n 'clean',\n 'Clean up Pavilion working directory.',\n short_help=\"Clean up Pavilion working diretory.\"\n )\n\n def _setup_arguments(self, parser):\n parser.add_argument(\n '-v', '--verbose', action='store_true', default=False,\n help='Verbose output.'\n )\n parser.add_argument(\n '--older-than', nargs='+', action='store',\n help='Set the max age of files to be removed. Can be a date ex:'\n '\"Jan 1 2019\" or , or a number of days/weeks ex:\"32 weeks\"'\n )\n\n def run(self, pav_cfg, args):\n \"\"\"Run this command.\"\"\"\n\n if args.older_than:\n if 'day' in args.older_than or 'days' in args.older_than:\n cutoff_date = datetime.today() - timedelta(\n days=int(args.older_than[0]))\n elif 'week' in args.older_than or 'weeks' in args.older_than:\n cutoff_date = datetime.today() - timedelta(\n weeks=int(args.older_than[0]))\n elif 'month' in args.older_than or 'months' in args.older_than:\n cutoff_date = get_month_delta(int(args.older_than[0]))\n else:\n date = ' '.join(args.older_than)\n try:\n cutoff_date = datetime.strptime(date, '%b %d %Y')\n except (TypeError, ValueError):\n output.fprint(\"{} is not a valid date.\"\n .format(args.older_than),\n file=self.errfile, color=output.RED)\n return errno.EINVAL\n\n # No cutoff specified, removes everything.\n else:\n cutoff_date = datetime.today()\n\n tests_dir = pav_cfg.working_dir / 'test_runs'\n series_dir = pav_cfg.working_dir / 'series'\n download_dir = pav_cfg.working_dir / 'downloads'\n build_dir = pav_cfg.working_dir / 'builds'\n\n dependent_builds = []\n incomplete_tests = []\n # Clean Tests\n output.fprint(\"Removing Tests...\", file=self.outfile,\n color=output.GREEN)\n for test in os.listdir(tests_dir.as_posix()):\n test_time = datetime.fromtimestamp(\n os.path.getmtime((tests_dir / test).as_posix()))\n try:\n test_obj = TestRun.load(pav_cfg, int(test))\n status = test_obj.status.current().state\n except (TestRunError, TestRunNotFoundError):\n output.fprint(\"Removing bad test directory {}\".format(test),\n file=self.outfile)\n shutil.rmtree(tests_dir.as_posix())\n continue\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint(\"Permission Error: {} cannot be removed\"\n .format(err[1]), file=self.errfile, color=31)\n if test_time < cutoff_date and status != STATES.RUNNING \\\n and status != STATES.SCHEDULED:\n shutil.rmtree((tests_dir / test).as_posix())\n if args.verbose:\n output.fprint(\"Removed test {}\".format(test),\n file=self.outfile)\n else:\n if args.verbose:\n output.fprint(\"Skipped test {}\".format(test),\n file=self.outfile)\n incomplete_tests.append(test)\n dependent_builds.append(test_obj.build_name)\n\n # Clean Series\n completed_series = True\n output.fprint(\"Removing Series...\", file=self.outfile,\n color=output.GREEN)\n for series in os.listdir(series_dir.as_posix()):\n try:\n series_time = datetime.fromtimestamp(\n os.path.getmtime((series_dir / series).as_posix()))\n for test in incomplete_tests:\n if os.path.exists((series_dir / series / test).as_posix()):\n completed_series = False\n if series_time < cutoff_date and completed_series:\n shutil.rmtree((series_dir / series).as_posix())\n if args.verbose:\n output.fprint(\"Removed series {}\".format(series),\n file=self.outfile)\n else:\n if args.verbose:\n output.fprint(\"Skipped series {}\".format(series),\n file=self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint(\"Permission Error: {} cannot be removed\"\n .format(err[1]), file=self.errfile, color=31)\n\n # Clean Downloads\n output.fprint(\"Removing Downloads...\", file=self.outfile,\n color=output.GREEN)\n for download in os.listdir(download_dir.as_posix()):\n try:\n download_time = datetime.fromtimestamp(\n os.path.getmtime((download_dir / download).as_posix()))\n if download_time < cutoff_date:\n try:\n shutil.rmtree((download_dir / download).as_posix())\n except NotADirectoryError:\n output.fprint(\"{} is not a directory.\".format(download),\n file=self.errfile, color=output.RED)\n os.remove((download_dir / download).as_posix())\n if args.verbose:\n output.fprint(\"Removed download {}\".format(download),\n file=self.outfile)\n else:\n if args.verbose:\n output.fprint(\"Skipped download {}\".format(download),\n file=self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint(\"Permission Error: {} cannot be removed\"\n .format(err[1]), file=self.errfile, color=31)\n\n # Clean Builds\n output.fprint(\"Removing Builds...\", file=self.outfile,\n color=output.GREEN)\n for build in os.listdir(build_dir.as_posix()):\n try:\n build_time = datetime.fromtimestamp(\n os.path.getmtime((build_dir / build).as_posix()))\n if build_time < cutoff_date and build not in dependent_builds:\n shutil.rmtree((build_dir / build).as_posix())\n if args.verbose:\n output.fprint(\"Removed build {}\".format(build),\n file=self.outfile)\n else:\n if args.verbose:\n output.fprint(\"Skipped build {}\".format(build),\n file=self.outfile)\n except PermissionError as err:\n err = str(err).split(\"'\")\n output.fprint(\"Permission Error: {} cannot be removed. \"\n .format(err[1]), file=self.errfile, color=31)\n\n return 0\n\n\ndef get_month_delta(months):\n \"\"\"Turn a number of months in the future into a concrete date.\"\"\"\n\n today = datetime.today()\n cur_year = today.year\n cur_day = today.day\n cur_month = today.month\n cur_time = today.time\n\n if cur_month - months <= 0:\n cut_month = (cur_month - months) % 12\n diff_years = (cur_month - months) // 12\n cut_year = cur_year + diff_years\n else:\n cut_month = cur_month - months\n cut_year = cur_year\n\n try:\n cutoff_date = datetime(cut_year, cut_month, cur_day, cur_time)\n except ValueError:\n last_day = monthrange(cut_year, cut_month)[1]\n cutoff_date = datetime(cut_year, cut_month, last_day, cur_time)\n\n return cutoff_date\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @app.task def update_banner_list(): banner_query = Banner.objects.filter(is_delete=False, is_show=True ).order_by('-orders')[:BANNER_COUNT] banner_data = BannerModelSerializer(banner_query, many=True).data for banner in banner_data: banner['image'] = settings.END_BASE_URL + banner['image'] cache.set('banner_list', banner_data) return True <|reserved_special_token_1|> from .celery import app from home.models import Banner from settings.const import BANNER_COUNT from home.serializers import BannerModelSerializer from django.core.cache import cache from django.conf import settings @app.task def update_banner_list(): banner_query = Banner.objects.filter(is_delete=False, is_show=True ).order_by('-orders')[:BANNER_COUNT] banner_data = BannerModelSerializer(banner_query, many=True).data for banner in banner_data: banner['image'] = settings.END_BASE_URL + banner['image'] cache.set('banner_list', banner_data) return True <|reserved_special_token_1|> from .celery import app from home.models import Banner from settings.const import BANNER_COUNT from home.serializers import BannerModelSerializer from django.core.cache import cache from django.conf import settings @app.task def update_banner_list(): # 获取最新内容 banner_query = Banner.objects.filter(is_delete=False, is_show=True).order_by('-orders')[:BANNER_COUNT] # 序列化 banner_data = BannerModelSerializer(banner_query, many=True).data for banner in banner_data: banner['image'] = settings.END_BASE_URL + banner['image'] # 更新缓存 cache.set('banner_list', banner_data) return True
flexible
{ "blob_id": "8e85740123467889bdeb6b27d5eaa4b39df280ed", "index": 438, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\[email protected]\ndef update_banner_list():\n banner_query = Banner.objects.filter(is_delete=False, is_show=True\n ).order_by('-orders')[:BANNER_COUNT]\n banner_data = BannerModelSerializer(banner_query, many=True).data\n for banner in banner_data:\n banner['image'] = settings.END_BASE_URL + banner['image']\n cache.set('banner_list', banner_data)\n return True\n", "step-3": "from .celery import app\nfrom home.models import Banner\nfrom settings.const import BANNER_COUNT\nfrom home.serializers import BannerModelSerializer\nfrom django.core.cache import cache\nfrom django.conf import settings\n\n\[email protected]\ndef update_banner_list():\n banner_query = Banner.objects.filter(is_delete=False, is_show=True\n ).order_by('-orders')[:BANNER_COUNT]\n banner_data = BannerModelSerializer(banner_query, many=True).data\n for banner in banner_data:\n banner['image'] = settings.END_BASE_URL + banner['image']\n cache.set('banner_list', banner_data)\n return True\n", "step-4": "from .celery import app\n\nfrom home.models import Banner\nfrom settings.const import BANNER_COUNT\nfrom home.serializers import BannerModelSerializer\nfrom django.core.cache import cache\nfrom django.conf import settings\[email protected]\ndef update_banner_list():\n # 获取最新内容\n banner_query = Banner.objects.filter(is_delete=False, is_show=True).order_by('-orders')[:BANNER_COUNT]\n # 序列化\n banner_data = BannerModelSerializer(banner_query, many=True).data\n for banner in banner_data:\n banner['image'] = settings.END_BASE_URL + banner['image']\n # 更新缓存\n cache.set('banner_list', banner_data)\n return True\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
''' Created on May 17, 2016 @author: Shauryadeep Chaudhuri ''' import json import tornado from engine import Constants as c from engine.ResultGenerator import ResultGenerator from ..ServerLogger import ServerLogger class GetFromURL(tornado.web.RequestHandler): ''' This class fetches the data requested like index,schema,entry,query from the url and responds with the result ''' def initialize(self): self.logger = ServerLogger().getLogger() def get(self, index=None, schema=None, entry=None, query=None): query = dict() resultGenerator = ResultGenerator() query[c.OPERATION] = c.GET if index: query[c.INDEX] = index if schema: query[c.SCHEMA] = schema if entry: query[c.ENTRY] = entry self.logger.debug("Internal Query Generated"+str(query)) try: result = str(resultGenerator.processQuery(json.dumps(query))) self.logger.info("Result fetched:" + result) self.write(result) except Exception as e: self.logger.error('Error', exc_info=True) self.write("Error: " + str(e))
normal
{ "blob_id": "5a13c7e3be8a0b5f3baf7106a938fc97f078c5bc", "index": 7335, "step-1": "<mask token>\n\n\nclass GetFromURL(tornado.web.RequestHandler):\n <mask token>\n <mask token>\n\n def get(self, index=None, schema=None, entry=None, query=None):\n query = dict()\n resultGenerator = ResultGenerator()\n query[c.OPERATION] = c.GET\n if index:\n query[c.INDEX] = index\n if schema:\n query[c.SCHEMA] = schema\n if entry:\n query[c.ENTRY] = entry\n self.logger.debug('Internal Query Generated' + str(query))\n try:\n result = str(resultGenerator.processQuery(json.dumps(query)))\n self.logger.info('Result fetched:' + result)\n self.write(result)\n except Exception as e:\n self.logger.error('Error', exc_info=True)\n self.write('Error: ' + str(e))\n", "step-2": "<mask token>\n\n\nclass GetFromURL(tornado.web.RequestHandler):\n <mask token>\n\n def initialize(self):\n self.logger = ServerLogger().getLogger()\n\n def get(self, index=None, schema=None, entry=None, query=None):\n query = dict()\n resultGenerator = ResultGenerator()\n query[c.OPERATION] = c.GET\n if index:\n query[c.INDEX] = index\n if schema:\n query[c.SCHEMA] = schema\n if entry:\n query[c.ENTRY] = entry\n self.logger.debug('Internal Query Generated' + str(query))\n try:\n result = str(resultGenerator.processQuery(json.dumps(query)))\n self.logger.info('Result fetched:' + result)\n self.write(result)\n except Exception as e:\n self.logger.error('Error', exc_info=True)\n self.write('Error: ' + str(e))\n", "step-3": "<mask token>\n\n\nclass GetFromURL(tornado.web.RequestHandler):\n \"\"\"\n This class fetches the data requested like index,schema,entry,query from the url and responds with the result\n \"\"\"\n\n def initialize(self):\n self.logger = ServerLogger().getLogger()\n\n def get(self, index=None, schema=None, entry=None, query=None):\n query = dict()\n resultGenerator = ResultGenerator()\n query[c.OPERATION] = c.GET\n if index:\n query[c.INDEX] = index\n if schema:\n query[c.SCHEMA] = schema\n if entry:\n query[c.ENTRY] = entry\n self.logger.debug('Internal Query Generated' + str(query))\n try:\n result = str(resultGenerator.processQuery(json.dumps(query)))\n self.logger.info('Result fetched:' + result)\n self.write(result)\n except Exception as e:\n self.logger.error('Error', exc_info=True)\n self.write('Error: ' + str(e))\n", "step-4": "<mask token>\nimport json\nimport tornado\nfrom engine import Constants as c\nfrom engine.ResultGenerator import ResultGenerator\nfrom ..ServerLogger import ServerLogger\n\n\nclass GetFromURL(tornado.web.RequestHandler):\n \"\"\"\n This class fetches the data requested like index,schema,entry,query from the url and responds with the result\n \"\"\"\n\n def initialize(self):\n self.logger = ServerLogger().getLogger()\n\n def get(self, index=None, schema=None, entry=None, query=None):\n query = dict()\n resultGenerator = ResultGenerator()\n query[c.OPERATION] = c.GET\n if index:\n query[c.INDEX] = index\n if schema:\n query[c.SCHEMA] = schema\n if entry:\n query[c.ENTRY] = entry\n self.logger.debug('Internal Query Generated' + str(query))\n try:\n result = str(resultGenerator.processQuery(json.dumps(query)))\n self.logger.info('Result fetched:' + result)\n self.write(result)\n except Exception as e:\n self.logger.error('Error', exc_info=True)\n self.write('Error: ' + str(e))\n", "step-5": "'''\r\nCreated on May 17, 2016\r\n\r\n@author: Shauryadeep Chaudhuri\r\n'''\r\n\r\nimport json\r\n\r\nimport tornado\r\n\r\nfrom engine import Constants as c\r\nfrom engine.ResultGenerator import ResultGenerator\r\nfrom ..ServerLogger import ServerLogger\r\n\r\n\r\nclass GetFromURL(tornado.web.RequestHandler):\r\n '''\r\n This class fetches the data requested like index,schema,entry,query from the url and responds with the result\r\n '''\r\n def initialize(self):\r\n self.logger = ServerLogger().getLogger()\r\n \r\n def get(self, index=None, schema=None, entry=None, query=None):\r\n\r\n query = dict()\r\n\r\n resultGenerator = ResultGenerator()\r\n\r\n query[c.OPERATION] = c.GET\r\n\r\n if index:\r\n query[c.INDEX] = index\r\n if schema:\r\n query[c.SCHEMA] = schema\r\n if entry:\r\n query[c.ENTRY] = entry\r\n \r\n self.logger.debug(\"Internal Query Generated\"+str(query))\r\n \r\n try:\r\n result = str(resultGenerator.processQuery(json.dumps(query)))\r\n \r\n self.logger.info(\"Result fetched:\" + result)\r\n \r\n self.write(result)\r\n except Exception as e:\r\n self.logger.error('Error', exc_info=True)\r\n \r\n self.write(\"Error: \" + str(e))\r\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> while cont == 'y': print('--enter underlay color in r,g,b--') c2[0] = int(input('red: ')) c2[1] = int(input('green: ')) c2[2] = int(input('blue: ')) print('') print('--enter desired color in r,g,b--') c3[0] = int(input('red: ')) c3[1] = int(input('green: ')) c3[2] = int(input('blue: ')) print('') alpha = 0 r = -1 g = -1 b = -1 while (alpha < 1 and r < 0 or g < 0 or b < 0 or r > 255 or g > 255 or b > 255): alpha += 1 / 256 inv = 1 / alpha r = c3[0] * inv + c2[0] * (1 - inv) g = c3[1] * inv + c2[1] * (1 - inv) b = c3[2] * inv + c2[2] * (1 - inv) print('---result---') print('red:', round(r)) print('green:', round(g)) print('blue:', round(b)) print('alpha:', round(alpha * 256)) print('------------') print('') cont = input('again? y/n') print('') <|reserved_special_token_1|> <|reserved_special_token_0|> c1 = [0, 0, 0, 0] c2 = [0, 0, 0] c3 = [0, 0, 0] cont = 'y' while cont == 'y': print('--enter underlay color in r,g,b--') c2[0] = int(input('red: ')) c2[1] = int(input('green: ')) c2[2] = int(input('blue: ')) print('') print('--enter desired color in r,g,b--') c3[0] = int(input('red: ')) c3[1] = int(input('green: ')) c3[2] = int(input('blue: ')) print('') alpha = 0 r = -1 g = -1 b = -1 while (alpha < 1 and r < 0 or g < 0 or b < 0 or r > 255 or g > 255 or b > 255): alpha += 1 / 256 inv = 1 / alpha r = c3[0] * inv + c2[0] * (1 - inv) g = c3[1] * inv + c2[1] * (1 - inv) b = c3[2] * inv + c2[2] * (1 - inv) print('---result---') print('red:', round(r)) print('green:', round(g)) print('blue:', round(b)) print('alpha:', round(alpha * 256)) print('------------') print('') cont = input('again? y/n') print('') <|reserved_special_token_1|> import time c1 = [0, 0, 0, 0] c2 = [0, 0, 0] c3 = [0, 0, 0] cont = 'y' while cont == 'y': print('--enter underlay color in r,g,b--') c2[0] = int(input('red: ')) c2[1] = int(input('green: ')) c2[2] = int(input('blue: ')) print('') print('--enter desired color in r,g,b--') c3[0] = int(input('red: ')) c3[1] = int(input('green: ')) c3[2] = int(input('blue: ')) print('') alpha = 0 r = -1 g = -1 b = -1 while (alpha < 1 and r < 0 or g < 0 or b < 0 or r > 255 or g > 255 or b > 255): alpha += 1 / 256 inv = 1 / alpha r = c3[0] * inv + c2[0] * (1 - inv) g = c3[1] * inv + c2[1] * (1 - inv) b = c3[2] * inv + c2[2] * (1 - inv) print('---result---') print('red:', round(r)) print('green:', round(g)) print('blue:', round(b)) print('alpha:', round(alpha * 256)) print('------------') print('') cont = input('again? y/n') print('') <|reserved_special_token_1|> # 0=RED, 1=GREEN, 2=BLUE, 3=ALPHA #import tkinter as tk #import tkinter.ttk as ttk #from tkcolorpicker import askcolor import time c1 = [0,0,0,0] #this color c2 = [0,0,0] #over this color c3 = [0,0,0] #result cont='y' #-------------------------------- while cont=='y': print('--enter underlay color in r,g,b--') c2[0]=int(input('red: ')) c2[1]=int(input('green: ')) c2[2]=int(input('blue: ')) print('') print('--enter desired color in r,g,b--') c3[0]=int(input('red: ')) c3[1]=int(input('green: ')) c3[2]=int(input('blue: ')) print('') #-------------------------------- alpha = 0 r = -1 g = -1 b = -1 while alpha < 1 and r < 0 or g < 0 or b < 0 or r > 255 or g > 255 or b > 255: alpha+= 1/256 inv = 1 / alpha r = c3[0] * inv + c2[0] * (1 - inv) g = c3[1] * inv + c2[1] * (1 - inv) b = c3[2] * inv + c2[2] * (1 - inv) print('---result---') print('red:', round(r)) print('green:', round(g)) print('blue:', round(b)) print('alpha:', round(alpha*256)) print('------------') print('') cont=input('again? y/n') print('')
flexible
{ "blob_id": "5fa8ae36c4b4a5bffa64f4c65b74b74b29ba246f", "index": 4578, "step-1": "<mask token>\n", "step-2": "<mask token>\nwhile cont == 'y':\n print('--enter underlay color in r,g,b--')\n c2[0] = int(input('red: '))\n c2[1] = int(input('green: '))\n c2[2] = int(input('blue: '))\n print('')\n print('--enter desired color in r,g,b--')\n c3[0] = int(input('red: '))\n c3[1] = int(input('green: '))\n c3[2] = int(input('blue: '))\n print('')\n alpha = 0\n r = -1\n g = -1\n b = -1\n while (alpha < 1 and r < 0 or g < 0 or b < 0 or r > 255 or g > 255 or b >\n 255):\n alpha += 1 / 256\n inv = 1 / alpha\n r = c3[0] * inv + c2[0] * (1 - inv)\n g = c3[1] * inv + c2[1] * (1 - inv)\n b = c3[2] * inv + c2[2] * (1 - inv)\n print('---result---')\n print('red:', round(r))\n print('green:', round(g))\n print('blue:', round(b))\n print('alpha:', round(alpha * 256))\n print('------------')\n print('')\n cont = input('again? y/n')\n print('')\n", "step-3": "<mask token>\nc1 = [0, 0, 0, 0]\nc2 = [0, 0, 0]\nc3 = [0, 0, 0]\ncont = 'y'\nwhile cont == 'y':\n print('--enter underlay color in r,g,b--')\n c2[0] = int(input('red: '))\n c2[1] = int(input('green: '))\n c2[2] = int(input('blue: '))\n print('')\n print('--enter desired color in r,g,b--')\n c3[0] = int(input('red: '))\n c3[1] = int(input('green: '))\n c3[2] = int(input('blue: '))\n print('')\n alpha = 0\n r = -1\n g = -1\n b = -1\n while (alpha < 1 and r < 0 or g < 0 or b < 0 or r > 255 or g > 255 or b >\n 255):\n alpha += 1 / 256\n inv = 1 / alpha\n r = c3[0] * inv + c2[0] * (1 - inv)\n g = c3[1] * inv + c2[1] * (1 - inv)\n b = c3[2] * inv + c2[2] * (1 - inv)\n print('---result---')\n print('red:', round(r))\n print('green:', round(g))\n print('blue:', round(b))\n print('alpha:', round(alpha * 256))\n print('------------')\n print('')\n cont = input('again? y/n')\n print('')\n", "step-4": "import time\nc1 = [0, 0, 0, 0]\nc2 = [0, 0, 0]\nc3 = [0, 0, 0]\ncont = 'y'\nwhile cont == 'y':\n print('--enter underlay color in r,g,b--')\n c2[0] = int(input('red: '))\n c2[1] = int(input('green: '))\n c2[2] = int(input('blue: '))\n print('')\n print('--enter desired color in r,g,b--')\n c3[0] = int(input('red: '))\n c3[1] = int(input('green: '))\n c3[2] = int(input('blue: '))\n print('')\n alpha = 0\n r = -1\n g = -1\n b = -1\n while (alpha < 1 and r < 0 or g < 0 or b < 0 or r > 255 or g > 255 or b >\n 255):\n alpha += 1 / 256\n inv = 1 / alpha\n r = c3[0] * inv + c2[0] * (1 - inv)\n g = c3[1] * inv + c2[1] * (1 - inv)\n b = c3[2] * inv + c2[2] * (1 - inv)\n print('---result---')\n print('red:', round(r))\n print('green:', round(g))\n print('blue:', round(b))\n print('alpha:', round(alpha * 256))\n print('------------')\n print('')\n cont = input('again? y/n')\n print('')\n", "step-5": "# 0=RED, 1=GREEN, 2=BLUE, 3=ALPHA\r\n\r\n#import tkinter as tk\r\n#import tkinter.ttk as ttk\r\n#from tkcolorpicker import askcolor\r\nimport time\r\n\r\nc1 = [0,0,0,0] #this color\r\nc2 = [0,0,0] #over this color\r\nc3 = [0,0,0] #result\r\n\r\ncont='y'\r\n\r\n#--------------------------------\r\n\r\nwhile cont=='y':\r\n print('--enter underlay color in r,g,b--')\r\n c2[0]=int(input('red: '))\r\n c2[1]=int(input('green: '))\r\n c2[2]=int(input('blue: '))\r\n print('')\r\n \r\n print('--enter desired color in r,g,b--')\r\n c3[0]=int(input('red: '))\r\n c3[1]=int(input('green: '))\r\n c3[2]=int(input('blue: '))\r\n print('')\r\n \r\n #--------------------------------\r\n\r\n alpha = 0\r\n r = -1\r\n g = -1\r\n b = -1\r\n\r\n while alpha < 1 and r < 0 or g < 0 or b < 0 or r > 255 or g > 255 or b > 255:\r\n alpha+= 1/256\r\n inv = 1 / alpha\r\n r = c3[0] * inv + c2[0] * (1 - inv)\r\n g = c3[1] * inv + c2[1] * (1 - inv)\r\n b = c3[2] * inv + c2[2] * (1 - inv)\r\n\r\n print('---result---')\r\n print('red:', round(r))\r\n print('green:', round(g))\r\n print('blue:', round(b))\r\n print('alpha:', round(alpha*256))\r\n print('------------')\r\n print('')\r\n\r\n cont=input('again? y/n')\r\n print('')\r\n\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for k in range(1, 100): a = [] for i in range(1, 100): a.append([]) for j in range(1, 100): a[i - 1].append(partisan_symmetry([5 * i / 100, 0.2, 5 * j / 100], 1000, False)) plt.imshow(a) plt.colorbar() plt.xticks(range(99), [(x / 20) for x in range(1, 100)]) plt.yticks(range(99), [(x / 20) for x in range(1, 100)]) plt.title('Partisan Symmetry Difference for (x,' + str(k) + ',y)') plt.savefig('./ps' + str(k) + '.png') plt.close() print('figure', k, 'done') <|reserved_special_token_1|> import matplotlib.pyplot as plt from partisan_symmetry_noplot import partisan_symmetry for k in range(1, 100): a = [] for i in range(1, 100): a.append([]) for j in range(1, 100): a[i - 1].append(partisan_symmetry([5 * i / 100, 0.2, 5 * j / 100], 1000, False)) plt.imshow(a) plt.colorbar() plt.xticks(range(99), [(x / 20) for x in range(1, 100)]) plt.yticks(range(99), [(x / 20) for x in range(1, 100)]) plt.title('Partisan Symmetry Difference for (x,' + str(k) + ',y)') plt.savefig('./ps' + str(k) + '.png') plt.close() print('figure', k, 'done') <|reserved_special_token_1|> import matplotlib.pyplot as plt from partisan_symmetry_noplot import partisan_symmetry for k in range(1,100): a=[] for i in range(1,100): a.append([]) for j in range(1,100): a[i-1].append(partisan_symmetry([5*i/100,.20,5*j/100],1000,False)) plt.imshow(a) plt.colorbar() plt.xticks(range(99),[x/20 for x in range(1,100)]) plt.yticks(range(99),[x/20 for x in range(1,100)]) plt.title("Partisan Symmetry Difference for (x,"+str(k)+",y)") plt.savefig("./ps"+str(k)+".png") plt.close() print("figure",k,"done")
flexible
{ "blob_id": "cfa0937f1c49b52283c562d9ab1cb0542e71b990", "index": 5970, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor k in range(1, 100):\n a = []\n for i in range(1, 100):\n a.append([])\n for j in range(1, 100):\n a[i - 1].append(partisan_symmetry([5 * i / 100, 0.2, 5 * j / \n 100], 1000, False))\n plt.imshow(a)\n plt.colorbar()\n plt.xticks(range(99), [(x / 20) for x in range(1, 100)])\n plt.yticks(range(99), [(x / 20) for x in range(1, 100)])\n plt.title('Partisan Symmetry Difference for (x,' + str(k) + ',y)')\n plt.savefig('./ps' + str(k) + '.png')\n plt.close()\n print('figure', k, 'done')\n", "step-3": "import matplotlib.pyplot as plt\nfrom partisan_symmetry_noplot import partisan_symmetry\nfor k in range(1, 100):\n a = []\n for i in range(1, 100):\n a.append([])\n for j in range(1, 100):\n a[i - 1].append(partisan_symmetry([5 * i / 100, 0.2, 5 * j / \n 100], 1000, False))\n plt.imshow(a)\n plt.colorbar()\n plt.xticks(range(99), [(x / 20) for x in range(1, 100)])\n plt.yticks(range(99), [(x / 20) for x in range(1, 100)])\n plt.title('Partisan Symmetry Difference for (x,' + str(k) + ',y)')\n plt.savefig('./ps' + str(k) + '.png')\n plt.close()\n print('figure', k, 'done')\n", "step-4": "import matplotlib.pyplot as plt\nfrom partisan_symmetry_noplot import partisan_symmetry\nfor k in range(1,100):\n a=[]\n for i in range(1,100):\n a.append([])\n for j in range(1,100):\n a[i-1].append(partisan_symmetry([5*i/100,.20,5*j/100],1000,False))\n\n plt.imshow(a)\n plt.colorbar()\n plt.xticks(range(99),[x/20 for x in range(1,100)])\n plt.yticks(range(99),[x/20 for x in range(1,100)])\n plt.title(\"Partisan Symmetry Difference for (x,\"+str(k)+\",y)\")\n plt.savefig(\"./ps\"+str(k)+\".png\")\n plt.close()\n print(\"figure\",k,\"done\")\n \n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python import numpy as np import rospy import tf from geometry_msgs.msg import PoseStamped, Twist, TwistStamped, Point from nav_msgs.msg import Odometry from visualization_msgs.msg import Marker from bebop_nmpc_solver import BebopNmpcFormulationParam, bebop_nmpc_casadi_solver # The frame by default is NWU class BebopNmpcControl: def __init__(self, mpc_form_param): # MPC formulation settings self.mpc_form_param_ = mpc_form_param # bebop param self.roll_max_ = self.mpc_form_param_.roll_max self.pitch_max_ = self.mpc_form_param_.pitch_max self.vz_max_ = self.mpc_form_param_.vz_max self.yawrate_max_ = self.mpc_form_param_.yawrate_max self.K_yaw_ = self.mpc_form_param_.K_yaw self.bebop_size_ = self.mpc_form_param_.bebop_size # state and goal pose, size self.bebop_state_current_ = np.zeros(9) self.bebop_pose_goal_ = np.array([0, 0, 1.0, 0]) # collision avoidance obs param self.nobs_ = self.mpc_form_param_.nobs self.obs_size_ = self.mpc_form_param_.obs_size self.obs_state_current_ = np.array([0, 0, -1.0, 0, 0, 0]) self.obs_state_prediction_ = np.tile(np.array(self.obs_state_current_), (self.mpc_form_param_.N, 1)).T # MPC settings self.mpc_dt_ = self.mpc_form_param_.dt self.mpc_N_ = self.mpc_form_param_.N self.mpc_Tf_ = self.mpc_form_param_.Tf self.mpc_nx_ = self.mpc_form_param_.nx self.mpc_nu_ = self.mpc_form_param_.nu self.mpc_ns_ = self.mpc_form_param_.ns self.mpc_np_ = self.mpc_form_param_.nparam self.mpc_weights_wp_ = self.mpc_form_param_.mpc_weights_wp self.mpc_weights_input_ = self.mpc_form_param_.mpc_weights_input self.mpc_weights_coll_ = self.mpc_form_param_.mpc_weights_coll self.mpc_weights_slack_ = self.mpc_form_param_.mpc_weights_slack # MPC variables self.mpc_nlp_traj_ = np.zeros((self.mpc_nu_ + self.mpc_nx_, self.mpc_N_)).reshape(-1) self.mpc_nlp_param_ = self.mpc_nx_ + self.mpc_np_ * self.mpc_N_ self.mpc_x_plan_ = np.zeros((self.mpc_nx_, self.mpc_N_)) self.mpc_u_plan_ = np.zeros((self.mpc_nu_, self.mpc_N_)) self.mpc_s_plan_ = np.zeros((self.mpc_ns_, self.mpc_N_)) self.mpc_u_now_ = np.zeros(self.mpc_nu_) self.mpc_feasible_ = False self.mpc_success_ = False # MPC solver recompile = False [self.nlp_solver_complied_, self.nlp_lbx_, self.nlp_ubx_, self.nlp_lbg_, self.nlp_ubg_] = \ bebop_nmpc_casadi_solver(self.mpc_form_param_, recompile) # ROS subscriber self.odom_sub_ = rospy.Subscriber("/bebop/odom", Odometry, self.set_bebop_odom) # bebop_odom self.received_first_odom_ = False self.odom_received_time_ = rospy.Time.now() self.odom_time_out_ = 0.2 self.pose_sub_ = rospy.Subscriber("/bebop/pose", PoseStamped, self.set_bebop_pose) self.twist_sub_ = rospy.Subscriber("/bebop/twist", TwistStamped, self.set_bebop_twist) self.pose_goal_sub_ = rospy.Subscriber("/bebop/pose_goal", PoseStamped, self.set_bebop_pose_goal) self.received_first_goal_ = False # ROS publisher self.bebop_cmd_vel_ = np.array(4) self.bebop_cmd_vel_pub_ = rospy.Publisher("/bebop/auto_cmd_vel", Twist, queue_size=1) self.mpc_traj_plan_vis_pub_ = rospy.Publisher("/bebop/mpc/trajectory_plan_vis", Marker, queue_size=1) def set_bebop_odom(self, odom_msg): if self.received_first_odom_ is False: self.received_first_odom_ = True rospy.loginfo('First odometry received!') # read data self.odom_received_time_ = rospy.Time.now() px = odom_msg.pose.pose.position.x py = odom_msg.pose.pose.position.y pz = odom_msg.pose.pose.position.z vx = odom_msg.twist.twist.linear.x vy = odom_msg.twist.twist.linear.y vz = odom_msg.twist.twist.linear.z rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.pose.orientation.z, odom_msg.pose.pose.orientation.w]) self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0], rpy[1], rpy[2]]) if self.received_first_goal_ is False: # if not received any goal pose self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]]) def set_bebop_pose(self, pose_msg): if self.received_first_odom_ is False: self.received_first_odom_ = True rospy.loginfo('First pose received!') self.odom_received_time_ = rospy.Time.now() px = pose_msg.pose.position.x py = pose_msg.pose.position.y pz = pose_msg.pose.position.z rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.orientation.z, pose_msg.pose.orientation.w]) self.bebop_state_current_[0:3] = np.array([px, py, pz]) self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]]) if self.received_first_goal_ is False: # if not received any goal pose self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]]) def set_bebop_twist(self, twist_msg): vx = twist_msg.twist.linear.x vy = twist_msg.twist.linear.y vz = twist_msg.twist.linear.z self.bebop_state_current_[3:6] = np.array([vx, vy, vz]) def set_bebop_pose_goal(self, pose_goal_msg): if self.received_first_goal_ is False: self.received_first_goal_ = True rospy.loginfo('First pose goal received!') px_goal = pose_goal_msg.pose.position.x py_goal = pose_goal_msg.pose.position.y pz_goal = pose_goal_msg.pose.position.z rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.pose.orientation.x, pose_goal_msg.pose.orientation.y, pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]) self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal, rpy_goal[2]]) def obs_motion_prediction(self): for iStage in range(0, self.mpc_N_): self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3] \ + self.obs_state_current_[3:6] * (iStage+1) * self.mpc_dt_ def reset_nlp_solver(self): # initialize plan u_reset = np.zeros(self.mpc_nu_) x_reset = np.zeros(self.mpc_nx_) s_reset = np.zeros(self.mpc_ns_) # x_reset = self.bebop_state_current_[:self.mpc_nx_] x_reset[0:3] = self.bebop_state_current_[0:3] x_reset[6:8] = self.bebop_state_current_[6:8] nlp_plan = np.concatenate((u_reset, x_reset, s_reset), axis=0).reshape(-1) self.mpc_nlp_traj_ = np.tile(np.array(nlp_plan), self.mpc_N_).reshape(-1) def initialize_nlp_solver(self): u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.mpc_u_plan_[:, -1:]), axis=1) x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.mpc_x_plan_[:, -1:]), axis=1) s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.mpc_s_plan_[:, -1:]), axis=1) self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)).reshape(-1) def set_nlp_params(self): parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_)) # all parameters on each stage for iStage in range(0, self.mpc_N_): parameters_all_stage[self.mpc_form_param_.param_index_bebop_pose_start, iStage] = \ np.array([self.bebop_state_current_[0], self.bebop_state_current_[1], self.bebop_state_current_[2], self.bebop_state_current_[8]]) parameters_all_stage[self.mpc_form_param_.param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_ parameters_all_stage[self.mpc_form_param_.param_index_bebop_size, iStage] = self.bebop_size_ parameters_all_stage[self.mpc_form_param_.param_index_obs_info, iStage] = np.concatenate(( self.obs_state_prediction_[0:3, iStage], self.obs_size_ )) if iStage == self.mpc_N_ - 1: # terminal weights parameters_all_stage[self.mpc_form_param_.param_index_mpc_weights, iStage] = np.hstack( (self.mpc_weights_wp_, 0.1 * self.mpc_weights_input_, self.mpc_weights_coll_, self.mpc_weights_slack_) ) else: parameters_all_stage[self.mpc_form_param_.param_index_mpc_weights, iStage] = np.hstack( (0.05 * self.mpc_weights_wp_, self.mpc_weights_input_, self.mpc_weights_coll_, self.mpc_weights_slack_) ) # set parameters self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.mpc_nx_], np.transpose(parameters_all_stage).reshape(-1))) def run_nlp_solver(self): # initialize solver if self.mpc_feasible_ is True: self.initialize_nlp_solver() else: self.reset_nlp_solver() # set solver params self.set_nlp_params() # call the solver time_before_solver = rospy.get_rostime() nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.nlp_lbg_, ubg=self.nlp_ubg_) # deal with infeasibility if self.nlp_solver_complied_.stats()['success'] is False: # if infeasible self.mpc_feasible_ = False self.mpc_success_ = False rospy.logwarn("MPC infeasible!") else: self.mpc_feasible_ = True self.mpc_success_ = True solver_time = (rospy.get_rostime() - time_before_solver).to_sec() * 1000.0 solver_iter = self.nlp_solver_complied_.stats()['iter_count'] rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.', solver_iter, solver_time) # obtain solution traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self.mpc_ns_, self.mpc_N_)) self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :]) self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_+self.mpc_nx_, :]) self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_+self.mpc_nx_:, :]) self.mpc_u_now_ = self.mpc_u_plan_[:, 0] def calculate_bebop_cmd_vel(self): # if odom received time_now = rospy.Time.now() if (time_now - self.odom_received_time_).to_sec() > self.odom_time_out_: rospy.logwarn('Odometry time out! Will try to make the MAV hover.') self.bebop_pose_goal_ = np.concatenate((self.bebop_state_current_[0:3], self.bebop_state_current_[8:9])) else: # run the nlp solver self.run_nlp_solver() # control commands if self.mpc_success_ is True: roll_cmd = self.mpc_u_now_[0] pitch_cmd = self.mpc_u_now_[1] vz_cmd = self.mpc_u_now_[2] else: rospy.logwarn('MPC failure! Default commands sent.') roll_cmd = 0.0 pitch_cmd = 0.0 vz_cmd = 0.0 # yaw control yaw_now = self.bebop_state_current_[8] yaw_ref = self.bebop_pose_goal_[3] yaw_error = yaw_ref - yaw_now while np.abs(yaw_error) > np.pi: if yaw_error > 0.0: yaw_error = yaw_error - 2.0 * np.pi else: yaw_error = yaw_error + 2.0 * np.pi yawrate_cmd = self.K_yaw_ * yaw_error yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.yawrate_max_) # obtained command self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd, yawrate_cmd]) def pub_bebop_cmd_vel(self): try: cmd_vel_msg = Twist() cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_ # pitch to move along x cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_ # roll to move along y cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_ cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_ self.bebop_cmd_vel_pub_.publish(cmd_vel_msg) except: rospy.logwarn('Bebop cmd_vel command not published!') def pub_mpc_traj_plan_vis(self): try: marker_msg = Marker() marker_msg.header.frame_id = "map" marker_msg.header.stamp = rospy.Time.now() marker_msg.type = 8 marker_msg.action = 0 # set the scale of the marker marker_msg.scale.x = 0.2 marker_msg.scale.y = 0.2 marker_msg.scale.z = 0.2 # set the color marker_msg.color.r = 1.0 marker_msg.color.g = 0.0 marker_msg.color.b = 0.0 marker_msg.color.a = 1.0 # Set the pose of the marker marker_msg.pose.position.x = 0.0 marker_msg.pose.position.y = 0.0 marker_msg.pose.position.z = 0.0 marker_msg.pose.orientation.x = 0 marker_msg.pose.orientation.y = 0 marker_msg.pose.orientation.z = 0 marker_msg.pose.orientation.w = 1.0 # points mpc_traj_plan_points = [] for iStage in range(0, self.mpc_N_): point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_[1, iStage], self.mpc_x_plan_[2, iStage]) mpc_traj_plan_points.append(point) marker_msg.points = mpc_traj_plan_points self.mpc_traj_plan_vis_pub_.publish(marker_msg) except: rospy.logwarn("MPC trajectory plan not published!") def bebop_nmpc_control(): # create a node rospy.loginfo("Starting Bebop NMPC Control...") rospy.init_node("bebop_nmpc_control_node", anonymous=False) hz = 50 rate = rospy.Rate(hz) rospy.sleep(1.0) # formulation mpc_form_param = BebopNmpcFormulationParam() # control bebop_nmpc = BebopNmpcControl(mpc_form_param) while not rospy.is_shutdown(): if bebop_nmpc.received_first_odom_ is False: rospy.logwarn('Waiting for first Odometry!') elif bebop_nmpc.received_first_goal_ is False: rospy.logwarn('Waiting for first goal pose!') else: bebop_nmpc.calculate_bebop_cmd_vel() bebop_nmpc.pub_bebop_cmd_vel() bebop_nmpc.pub_mpc_traj_plan_vis() rate.sleep() if __name__ == "__main__": bebop_nmpc_control()
normal
{ "blob_id": "76d0dd2d6b2d580900283f2623f05dd02a70fcd8", "index": 6825, "step-1": "<mask token>\n\n\nclass BebopNmpcControl:\n <mask token>\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.\n orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.\n pose.orientation.z, odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0\n ], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.\n orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.\n orientation.z, pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.\n pose.orientation.x, pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]\n )\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,\n rpy_goal[2]])\n <mask token>\n <mask token>\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.\n mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.\n mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.\n mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)\n ).reshape(-1)\n <mask token>\n\n def run_nlp_solver(self):\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n self.set_nlp_params()\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.\n mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.\n nlp_lbg_, ubg=self.nlp_ubg_)\n if self.nlp_solver_complied_.stats()['success'] is False:\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn('MPC infeasible!')\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec(\n ) * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',\n solver_iter, solver_time)\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self\n .mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +\n self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec(\n ) > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.\n bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n self.run_nlp_solver()\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.\n yawrate_max_)\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,\n yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = 'map'\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_\n [1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn('MPC trajectory plan not published!')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass BebopNmpcControl:\n <mask token>\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.\n orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.\n pose.orientation.z, odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0\n ], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.\n orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.\n orientation.z, pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.\n pose.orientation.x, pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]\n )\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,\n rpy_goal[2]])\n\n def obs_motion_prediction(self):\n for iStage in range(0, self.mpc_N_):\n self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3\n ] + self.obs_state_current_[3:6] * (iStage + 1) * self.mpc_dt_\n <mask token>\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.\n mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.\n mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.\n mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)\n ).reshape(-1)\n\n def set_nlp_params(self):\n parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_))\n for iStage in range(0, self.mpc_N_):\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_start, iStage] = np.array([self.\n bebop_state_current_[0], self.bebop_state_current_[1], self\n .bebop_state_current_[2], self.bebop_state_current_[8]])\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_size, iStage] = self.bebop_size_\n parameters_all_stage[self.mpc_form_param_.param_index_obs_info,\n iStage] = np.concatenate((self.obs_state_prediction_[0:3,\n iStage], self.obs_size_))\n if iStage == self.mpc_N_ - 1:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((self.\n mpc_weights_wp_, 0.1 * self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n else:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((0.05 *\n self.mpc_weights_wp_, self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.\n mpc_nx_], np.transpose(parameters_all_stage).reshape(-1)))\n\n def run_nlp_solver(self):\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n self.set_nlp_params()\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.\n mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.\n nlp_lbg_, ubg=self.nlp_ubg_)\n if self.nlp_solver_complied_.stats()['success'] is False:\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn('MPC infeasible!')\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec(\n ) * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',\n solver_iter, solver_time)\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self\n .mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +\n self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec(\n ) > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.\n bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n self.run_nlp_solver()\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.\n yawrate_max_)\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,\n yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = 'map'\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_\n [1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn('MPC trajectory plan not published!')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass BebopNmpcControl:\n <mask token>\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.\n orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.\n pose.orientation.z, odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0\n ], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.\n orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.\n orientation.z, pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.\n pose.orientation.x, pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]\n )\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,\n rpy_goal[2]])\n\n def obs_motion_prediction(self):\n for iStage in range(0, self.mpc_N_):\n self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3\n ] + self.obs_state_current_[3:6] * (iStage + 1) * self.mpc_dt_\n\n def reset_nlp_solver(self):\n u_reset = np.zeros(self.mpc_nu_)\n x_reset = np.zeros(self.mpc_nx_)\n s_reset = np.zeros(self.mpc_ns_)\n x_reset[0:3] = self.bebop_state_current_[0:3]\n x_reset[6:8] = self.bebop_state_current_[6:8]\n nlp_plan = np.concatenate((u_reset, x_reset, s_reset), axis=0).reshape(\n -1)\n self.mpc_nlp_traj_ = np.tile(np.array(nlp_plan), self.mpc_N_).reshape(\n -1)\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.\n mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.\n mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.\n mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)\n ).reshape(-1)\n\n def set_nlp_params(self):\n parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_))\n for iStage in range(0, self.mpc_N_):\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_start, iStage] = np.array([self.\n bebop_state_current_[0], self.bebop_state_current_[1], self\n .bebop_state_current_[2], self.bebop_state_current_[8]])\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_size, iStage] = self.bebop_size_\n parameters_all_stage[self.mpc_form_param_.param_index_obs_info,\n iStage] = np.concatenate((self.obs_state_prediction_[0:3,\n iStage], self.obs_size_))\n if iStage == self.mpc_N_ - 1:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((self.\n mpc_weights_wp_, 0.1 * self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n else:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((0.05 *\n self.mpc_weights_wp_, self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.\n mpc_nx_], np.transpose(parameters_all_stage).reshape(-1)))\n\n def run_nlp_solver(self):\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n self.set_nlp_params()\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.\n mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.\n nlp_lbg_, ubg=self.nlp_ubg_)\n if self.nlp_solver_complied_.stats()['success'] is False:\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn('MPC infeasible!')\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec(\n ) * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',\n solver_iter, solver_time)\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self\n .mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +\n self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec(\n ) > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.\n bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n self.run_nlp_solver()\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.\n yawrate_max_)\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,\n yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = 'map'\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_\n [1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn('MPC trajectory plan not published!')\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass BebopNmpcControl:\n\n def __init__(self, mpc_form_param):\n self.mpc_form_param_ = mpc_form_param\n self.roll_max_ = self.mpc_form_param_.roll_max\n self.pitch_max_ = self.mpc_form_param_.pitch_max\n self.vz_max_ = self.mpc_form_param_.vz_max\n self.yawrate_max_ = self.mpc_form_param_.yawrate_max\n self.K_yaw_ = self.mpc_form_param_.K_yaw\n self.bebop_size_ = self.mpc_form_param_.bebop_size\n self.bebop_state_current_ = np.zeros(9)\n self.bebop_pose_goal_ = np.array([0, 0, 1.0, 0])\n self.nobs_ = self.mpc_form_param_.nobs\n self.obs_size_ = self.mpc_form_param_.obs_size\n self.obs_state_current_ = np.array([0, 0, -1.0, 0, 0, 0])\n self.obs_state_prediction_ = np.tile(np.array(self.\n obs_state_current_), (self.mpc_form_param_.N, 1)).T\n self.mpc_dt_ = self.mpc_form_param_.dt\n self.mpc_N_ = self.mpc_form_param_.N\n self.mpc_Tf_ = self.mpc_form_param_.Tf\n self.mpc_nx_ = self.mpc_form_param_.nx\n self.mpc_nu_ = self.mpc_form_param_.nu\n self.mpc_ns_ = self.mpc_form_param_.ns\n self.mpc_np_ = self.mpc_form_param_.nparam\n self.mpc_weights_wp_ = self.mpc_form_param_.mpc_weights_wp\n self.mpc_weights_input_ = self.mpc_form_param_.mpc_weights_input\n self.mpc_weights_coll_ = self.mpc_form_param_.mpc_weights_coll\n self.mpc_weights_slack_ = self.mpc_form_param_.mpc_weights_slack\n self.mpc_nlp_traj_ = np.zeros((self.mpc_nu_ + self.mpc_nx_, self.\n mpc_N_)).reshape(-1)\n self.mpc_nlp_param_ = self.mpc_nx_ + self.mpc_np_ * self.mpc_N_\n self.mpc_x_plan_ = np.zeros((self.mpc_nx_, self.mpc_N_))\n self.mpc_u_plan_ = np.zeros((self.mpc_nu_, self.mpc_N_))\n self.mpc_s_plan_ = np.zeros((self.mpc_ns_, self.mpc_N_))\n self.mpc_u_now_ = np.zeros(self.mpc_nu_)\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n recompile = False\n [self.nlp_solver_complied_, self.nlp_lbx_, self.nlp_ubx_, self.\n nlp_lbg_, self.nlp_ubg_] = bebop_nmpc_casadi_solver(self.\n mpc_form_param_, recompile)\n self.odom_sub_ = rospy.Subscriber('/bebop/odom', Odometry, self.\n set_bebop_odom)\n self.received_first_odom_ = False\n self.odom_received_time_ = rospy.Time.now()\n self.odom_time_out_ = 0.2\n self.pose_sub_ = rospy.Subscriber('/bebop/pose', PoseStamped, self.\n set_bebop_pose)\n self.twist_sub_ = rospy.Subscriber('/bebop/twist', TwistStamped,\n self.set_bebop_twist)\n self.pose_goal_sub_ = rospy.Subscriber('/bebop/pose_goal',\n PoseStamped, self.set_bebop_pose_goal)\n self.received_first_goal_ = False\n self.bebop_cmd_vel_ = np.array(4)\n self.bebop_cmd_vel_pub_ = rospy.Publisher('/bebop/auto_cmd_vel',\n Twist, queue_size=1)\n self.mpc_traj_plan_vis_pub_ = rospy.Publisher(\n '/bebop/mpc/trajectory_plan_vis', Marker, queue_size=1)\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.\n orientation.x, odom_msg.pose.pose.orientation.y, odom_msg.pose.\n pose.orientation.z, odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0\n ], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.\n orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.\n orientation.z, pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False:\n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.\n pose.orientation.x, pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z, pose_goal_msg.pose.orientation.w]\n )\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal,\n rpy_goal[2]])\n\n def obs_motion_prediction(self):\n for iStage in range(0, self.mpc_N_):\n self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3\n ] + self.obs_state_current_[3:6] * (iStage + 1) * self.mpc_dt_\n\n def reset_nlp_solver(self):\n u_reset = np.zeros(self.mpc_nu_)\n x_reset = np.zeros(self.mpc_nx_)\n s_reset = np.zeros(self.mpc_ns_)\n x_reset[0:3] = self.bebop_state_current_[0:3]\n x_reset[6:8] = self.bebop_state_current_[6:8]\n nlp_plan = np.concatenate((u_reset, x_reset, s_reset), axis=0).reshape(\n -1)\n self.mpc_nlp_traj_ = np.tile(np.array(nlp_plan), self.mpc_N_).reshape(\n -1)\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.\n mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.\n mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.\n mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)\n ).reshape(-1)\n\n def set_nlp_params(self):\n parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_))\n for iStage in range(0, self.mpc_N_):\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_start, iStage] = np.array([self.\n bebop_state_current_[0], self.bebop_state_current_[1], self\n .bebop_state_current_[2], self.bebop_state_current_[8]])\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_\n parameters_all_stage[self.mpc_form_param_.\n param_index_bebop_size, iStage] = self.bebop_size_\n parameters_all_stage[self.mpc_form_param_.param_index_obs_info,\n iStage] = np.concatenate((self.obs_state_prediction_[0:3,\n iStage], self.obs_size_))\n if iStage == self.mpc_N_ - 1:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((self.\n mpc_weights_wp_, 0.1 * self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n else:\n parameters_all_stage[self.mpc_form_param_.\n param_index_mpc_weights, iStage] = np.hstack((0.05 *\n self.mpc_weights_wp_, self.mpc_weights_input_, self.\n mpc_weights_coll_, self.mpc_weights_slack_))\n self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.\n mpc_nx_], np.transpose(parameters_all_stage).reshape(-1)))\n\n def run_nlp_solver(self):\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n self.set_nlp_params()\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_, p=self.\n mpc_nlp_param_, lbx=self.nlp_lbx_, ubx=self.nlp_ubx_, lbg=self.\n nlp_lbg_, ubg=self.nlp_ubg_)\n if self.nlp_solver_complied_.stats()['success'] is False:\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn('MPC infeasible!')\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec(\n ) * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.',\n solver_iter, solver_time)\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self\n .mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_ +\n self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_ + self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec(\n ) > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.\n bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n self.run_nlp_solver()\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.\n yawrate_max_)\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd,\n yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = 'map'\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_\n [1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn('MPC trajectory plan not published!')\n\n\ndef bebop_nmpc_control():\n rospy.loginfo('Starting Bebop NMPC Control...')\n rospy.init_node('bebop_nmpc_control_node', anonymous=False)\n hz = 50\n rate = rospy.Rate(hz)\n rospy.sleep(1.0)\n mpc_form_param = BebopNmpcFormulationParam()\n bebop_nmpc = BebopNmpcControl(mpc_form_param)\n while not rospy.is_shutdown():\n if bebop_nmpc.received_first_odom_ is False:\n rospy.logwarn('Waiting for first Odometry!')\n elif bebop_nmpc.received_first_goal_ is False:\n rospy.logwarn('Waiting for first goal pose!')\n else:\n bebop_nmpc.calculate_bebop_cmd_vel()\n bebop_nmpc.pub_bebop_cmd_vel()\n bebop_nmpc.pub_mpc_traj_plan_vis()\n rate.sleep()\n\n\n<mask token>\n", "step-5": "#!/usr/bin/env python\n\nimport numpy as np\nimport rospy\nimport tf\nfrom geometry_msgs.msg import PoseStamped, Twist, TwistStamped, Point\nfrom nav_msgs.msg import Odometry\nfrom visualization_msgs.msg import Marker\nfrom bebop_nmpc_solver import BebopNmpcFormulationParam, bebop_nmpc_casadi_solver\n\n\n# The frame by default is NWU\n\n\nclass BebopNmpcControl:\n def __init__(self, mpc_form_param):\n # MPC formulation settings\n self.mpc_form_param_ = mpc_form_param\n\n # bebop param\n self.roll_max_ = self.mpc_form_param_.roll_max\n self.pitch_max_ = self.mpc_form_param_.pitch_max\n self.vz_max_ = self.mpc_form_param_.vz_max\n self.yawrate_max_ = self.mpc_form_param_.yawrate_max\n self.K_yaw_ = self.mpc_form_param_.K_yaw\n self.bebop_size_ = self.mpc_form_param_.bebop_size\n\n # state and goal pose, size\n self.bebop_state_current_ = np.zeros(9)\n self.bebop_pose_goal_ = np.array([0, 0, 1.0, 0])\n\n # collision avoidance obs param\n self.nobs_ = self.mpc_form_param_.nobs\n self.obs_size_ = self.mpc_form_param_.obs_size\n self.obs_state_current_ = np.array([0, 0, -1.0, 0, 0, 0])\n self.obs_state_prediction_ = np.tile(np.array(self.obs_state_current_), (self.mpc_form_param_.N, 1)).T\n\n # MPC settings\n self.mpc_dt_ = self.mpc_form_param_.dt\n self.mpc_N_ = self.mpc_form_param_.N\n self.mpc_Tf_ = self.mpc_form_param_.Tf\n self.mpc_nx_ = self.mpc_form_param_.nx\n self.mpc_nu_ = self.mpc_form_param_.nu\n self.mpc_ns_ = self.mpc_form_param_.ns\n self.mpc_np_ = self.mpc_form_param_.nparam\n self.mpc_weights_wp_ = self.mpc_form_param_.mpc_weights_wp\n self.mpc_weights_input_ = self.mpc_form_param_.mpc_weights_input\n self.mpc_weights_coll_ = self.mpc_form_param_.mpc_weights_coll\n self.mpc_weights_slack_ = self.mpc_form_param_.mpc_weights_slack\n\n # MPC variables\n self.mpc_nlp_traj_ = np.zeros((self.mpc_nu_ + self.mpc_nx_, self.mpc_N_)).reshape(-1)\n self.mpc_nlp_param_ = self.mpc_nx_ + self.mpc_np_ * self.mpc_N_\n self.mpc_x_plan_ = np.zeros((self.mpc_nx_, self.mpc_N_))\n self.mpc_u_plan_ = np.zeros((self.mpc_nu_, self.mpc_N_))\n self.mpc_s_plan_ = np.zeros((self.mpc_ns_, self.mpc_N_))\n self.mpc_u_now_ = np.zeros(self.mpc_nu_)\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n\n # MPC solver\n recompile = False \n [self.nlp_solver_complied_, self.nlp_lbx_, self.nlp_ubx_, self.nlp_lbg_, self.nlp_ubg_] = \\\n bebop_nmpc_casadi_solver(self.mpc_form_param_, recompile)\n\n # ROS subscriber\n self.odom_sub_ = rospy.Subscriber(\"/bebop/odom\", Odometry, self.set_bebop_odom) # bebop_odom\n self.received_first_odom_ = False\n self.odom_received_time_ = rospy.Time.now()\n self.odom_time_out_ = 0.2\n\n self.pose_sub_ = rospy.Subscriber(\"/bebop/pose\", PoseStamped, self.set_bebop_pose)\n self.twist_sub_ = rospy.Subscriber(\"/bebop/twist\", TwistStamped, self.set_bebop_twist)\n\n self.pose_goal_sub_ = rospy.Subscriber(\"/bebop/pose_goal\", PoseStamped, self.set_bebop_pose_goal)\n self.received_first_goal_ = False \n\n # ROS publisher\n self.bebop_cmd_vel_ = np.array(4)\n self.bebop_cmd_vel_pub_ = rospy.Publisher(\"/bebop/auto_cmd_vel\", Twist, queue_size=1)\n self.mpc_traj_plan_vis_pub_ = rospy.Publisher(\"/bebop/mpc/trajectory_plan_vis\", Marker, queue_size=1)\n\n def set_bebop_odom(self, odom_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First odometry received!')\n # read data\n self.odom_received_time_ = rospy.Time.now()\n px = odom_msg.pose.pose.position.x\n py = odom_msg.pose.pose.position.y\n pz = odom_msg.pose.pose.position.z\n vx = odom_msg.twist.twist.linear.x\n vy = odom_msg.twist.twist.linear.y\n vz = odom_msg.twist.twist.linear.z\n rpy = tf.transformations.euler_from_quaternion([odom_msg.pose.pose.orientation.x,\n odom_msg.pose.pose.orientation.y,\n odom_msg.pose.pose.orientation.z,\n odom_msg.pose.pose.orientation.w])\n self.bebop_state_current_ = np.array([px, py, pz, vx, vy, vz, rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False: # if not received any goal pose \n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_pose(self, pose_msg):\n if self.received_first_odom_ is False:\n self.received_first_odom_ = True\n rospy.loginfo('First pose received!')\n self.odom_received_time_ = rospy.Time.now()\n px = pose_msg.pose.position.x\n py = pose_msg.pose.position.y\n pz = pose_msg.pose.position.z\n rpy = tf.transformations.euler_from_quaternion([pose_msg.pose.orientation.x,\n pose_msg.pose.orientation.y,\n pose_msg.pose.orientation.z,\n pose_msg.pose.orientation.w])\n self.bebop_state_current_[0:3] = np.array([px, py, pz])\n self.bebop_state_current_[6:9] = np.array([rpy[0], rpy[1], rpy[2]])\n if self.received_first_goal_ is False: # if not received any goal pose \n self.limo_pose_goal_ = np.array([px, py, pz, rpy[2]])\n\n def set_bebop_twist(self, twist_msg):\n vx = twist_msg.twist.linear.x\n vy = twist_msg.twist.linear.y\n vz = twist_msg.twist.linear.z\n self.bebop_state_current_[3:6] = np.array([vx, vy, vz])\n\n def set_bebop_pose_goal(self, pose_goal_msg):\n if self.received_first_goal_ is False:\n self.received_first_goal_ = True\n rospy.loginfo('First pose goal received!')\n px_goal = pose_goal_msg.pose.position.x\n py_goal = pose_goal_msg.pose.position.y\n pz_goal = pose_goal_msg.pose.position.z\n rpy_goal = tf.transformations.euler_from_quaternion([pose_goal_msg.pose.orientation.x,\n pose_goal_msg.pose.orientation.y,\n pose_goal_msg.pose.orientation.z,\n pose_goal_msg.pose.orientation.w])\n self.bebop_pose_goal_ = np.array([px_goal, py_goal, pz_goal, rpy_goal[2]])\n\n def obs_motion_prediction(self):\n for iStage in range(0, self.mpc_N_):\n self.obs_state_prediction_[0:3] = self.obs_state_current_[0:3] \\\n + self.obs_state_current_[3:6] * (iStage+1) * self.mpc_dt_\n\n def reset_nlp_solver(self):\n # initialize plan\n u_reset = np.zeros(self.mpc_nu_)\n x_reset = np.zeros(self.mpc_nx_)\n s_reset = np.zeros(self.mpc_ns_)\n # x_reset = self.bebop_state_current_[:self.mpc_nx_]\n x_reset[0:3] = self.bebop_state_current_[0:3]\n x_reset[6:8] = self.bebop_state_current_[6:8]\n nlp_plan = np.concatenate((u_reset, x_reset, s_reset), axis=0).reshape(-1)\n self.mpc_nlp_traj_ = np.tile(np.array(nlp_plan), self.mpc_N_).reshape(-1)\n\n def initialize_nlp_solver(self):\n u_traj_init = np.concatenate((self.mpc_u_plan_[:, 1:], self.mpc_u_plan_[:, -1:]), axis=1)\n x_traj_init = np.concatenate((self.mpc_x_plan_[:, 1:], self.mpc_x_plan_[:, -1:]), axis=1)\n s_traj_init = np.concatenate((self.mpc_s_plan_[:, 1:], self.mpc_s_plan_[:, -1:]), axis=1)\n self.mpc_nlp_traj_ = np.vstack((u_traj_init, x_traj_init, s_traj_init)).reshape(-1)\n\n def set_nlp_params(self):\n parameters_all_stage = np.zeros((self.mpc_np_, self.mpc_N_)) # all parameters on each stage\n for iStage in range(0, self.mpc_N_):\n parameters_all_stage[self.mpc_form_param_.param_index_bebop_pose_start, iStage] = \\\n np.array([self.bebop_state_current_[0], self.bebop_state_current_[1], self.bebop_state_current_[2],\n self.bebop_state_current_[8]])\n parameters_all_stage[self.mpc_form_param_.param_index_bebop_pose_goal, iStage] = self.bebop_pose_goal_\n parameters_all_stage[self.mpc_form_param_.param_index_bebop_size, iStage] = self.bebop_size_\n parameters_all_stage[self.mpc_form_param_.param_index_obs_info, iStage] = np.concatenate((\n self.obs_state_prediction_[0:3, iStage], self.obs_size_\n ))\n if iStage == self.mpc_N_ - 1: # terminal weights\n parameters_all_stage[self.mpc_form_param_.param_index_mpc_weights, iStage] = np.hstack(\n (self.mpc_weights_wp_, 0.1 * self.mpc_weights_input_,\n self.mpc_weights_coll_, self.mpc_weights_slack_)\n )\n else:\n parameters_all_stage[self.mpc_form_param_.param_index_mpc_weights, iStage] = np.hstack(\n (0.05 * self.mpc_weights_wp_, self.mpc_weights_input_,\n self.mpc_weights_coll_, self.mpc_weights_slack_)\n )\n # set parameters\n self.mpc_nlp_param_ = np.hstack((self.bebop_state_current_[:self.mpc_nx_],\n np.transpose(parameters_all_stage).reshape(-1)))\n\n def run_nlp_solver(self):\n # initialize solver\n if self.mpc_feasible_ is True:\n self.initialize_nlp_solver()\n else:\n self.reset_nlp_solver()\n\n # set solver params\n self.set_nlp_params()\n\n # call the solver\n time_before_solver = rospy.get_rostime()\n nlp_sol = self.nlp_solver_complied_(x0=self.mpc_nlp_traj_,\n p=self.mpc_nlp_param_,\n lbx=self.nlp_lbx_,\n ubx=self.nlp_ubx_,\n lbg=self.nlp_lbg_,\n ubg=self.nlp_ubg_)\n\n # deal with infeasibility\n if self.nlp_solver_complied_.stats()['success'] is False: # if infeasible\n self.mpc_feasible_ = False\n self.mpc_success_ = False\n rospy.logwarn(\"MPC infeasible!\")\n else:\n self.mpc_feasible_ = True\n self.mpc_success_ = True\n\n solver_time = (rospy.get_rostime() - time_before_solver).to_sec() * 1000.0\n solver_iter = self.nlp_solver_complied_.stats()['iter_count']\n rospy.loginfo('MPC feasible, iter: %d, computation time: %.1f ms.', solver_iter, solver_time)\n\n # obtain solution\n traj_opt = nlp_sol['x'].reshape((self.mpc_nu_ + self.mpc_nx_ + self.mpc_ns_, self.mpc_N_))\n self.mpc_u_plan_ = np.array(traj_opt[:self.mpc_nu_, :])\n self.mpc_x_plan_ = np.array(traj_opt[self.mpc_nu_:self.mpc_nu_+self.mpc_nx_, :])\n self.mpc_s_plan_ = np.array(traj_opt[self.mpc_nu_+self.mpc_nx_:, :])\n self.mpc_u_now_ = self.mpc_u_plan_[:, 0]\n\n def calculate_bebop_cmd_vel(self):\n # if odom received\n time_now = rospy.Time.now()\n if (time_now - self.odom_received_time_).to_sec() > self.odom_time_out_:\n rospy.logwarn('Odometry time out! Will try to make the MAV hover.')\n self.bebop_pose_goal_ = np.concatenate((self.bebop_state_current_[0:3], self.bebop_state_current_[8:9]))\n else:\n # run the nlp solver\n self.run_nlp_solver()\n\n # control commands\n if self.mpc_success_ is True:\n roll_cmd = self.mpc_u_now_[0]\n pitch_cmd = self.mpc_u_now_[1]\n vz_cmd = self.mpc_u_now_[2]\n else:\n rospy.logwarn('MPC failure! Default commands sent.')\n roll_cmd = 0.0\n pitch_cmd = 0.0\n vz_cmd = 0.0\n\n # yaw control\n yaw_now = self.bebop_state_current_[8]\n yaw_ref = self.bebop_pose_goal_[3]\n yaw_error = yaw_ref - yaw_now\n while np.abs(yaw_error) > np.pi:\n if yaw_error > 0.0:\n yaw_error = yaw_error - 2.0 * np.pi\n else:\n yaw_error = yaw_error + 2.0 * np.pi\n yawrate_cmd = self.K_yaw_ * yaw_error\n yawrate_cmd = np.clip(yawrate_cmd, -self.yawrate_max_, self.yawrate_max_)\n\n # obtained command\n self.bebop_cmd_vel_ = np.array([roll_cmd, pitch_cmd, vz_cmd, yawrate_cmd])\n\n def pub_bebop_cmd_vel(self):\n try:\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = self.bebop_cmd_vel_[1] / self.pitch_max_ # pitch to move along x\n cmd_vel_msg.linear.y = -self.bebop_cmd_vel_[0] / self.roll_max_ # roll to move along y\n cmd_vel_msg.linear.z = self.bebop_cmd_vel_[2] / self.vz_max_\n cmd_vel_msg.angular.z = self.bebop_cmd_vel_[3] / self.yawrate_max_\n self.bebop_cmd_vel_pub_.publish(cmd_vel_msg)\n except:\n rospy.logwarn('Bebop cmd_vel command not published!')\n\n def pub_mpc_traj_plan_vis(self):\n try:\n marker_msg = Marker()\n marker_msg.header.frame_id = \"map\"\n marker_msg.header.stamp = rospy.Time.now()\n marker_msg.type = 8\n marker_msg.action = 0\n # set the scale of the marker\n marker_msg.scale.x = 0.2\n marker_msg.scale.y = 0.2\n marker_msg.scale.z = 0.2\n # set the color\n marker_msg.color.r = 1.0\n marker_msg.color.g = 0.0\n marker_msg.color.b = 0.0\n marker_msg.color.a = 1.0\n # Set the pose of the marker\n marker_msg.pose.position.x = 0.0\n marker_msg.pose.position.y = 0.0\n marker_msg.pose.position.z = 0.0\n marker_msg.pose.orientation.x = 0\n marker_msg.pose.orientation.y = 0\n marker_msg.pose.orientation.z = 0\n marker_msg.pose.orientation.w = 1.0\n # points\n mpc_traj_plan_points = []\n for iStage in range(0, self.mpc_N_):\n point = Point(self.mpc_x_plan_[0, iStage], self.mpc_x_plan_[1, iStage], self.mpc_x_plan_[2, iStage])\n mpc_traj_plan_points.append(point)\n marker_msg.points = mpc_traj_plan_points\n self.mpc_traj_plan_vis_pub_.publish(marker_msg)\n except:\n rospy.logwarn(\"MPC trajectory plan not published!\")\n\n\ndef bebop_nmpc_control():\n # create a node\n rospy.loginfo(\"Starting Bebop NMPC Control...\")\n rospy.init_node(\"bebop_nmpc_control_node\", anonymous=False)\n hz = 50\n rate = rospy.Rate(hz)\n rospy.sleep(1.0)\n\n # formulation\n mpc_form_param = BebopNmpcFormulationParam()\n\n # control\n bebop_nmpc = BebopNmpcControl(mpc_form_param)\n\n while not rospy.is_shutdown():\n if bebop_nmpc.received_first_odom_ is False:\n rospy.logwarn('Waiting for first Odometry!')\n elif bebop_nmpc.received_first_goal_ is False:\n rospy.logwarn('Waiting for first goal pose!')\n else:\n bebop_nmpc.calculate_bebop_cmd_vel()\n bebop_nmpc.pub_bebop_cmd_vel()\n bebop_nmpc.pub_mpc_traj_plan_vis()\n rate.sleep()\n\n\nif __name__ == \"__main__\":\n bebop_nmpc_control()\n", "step-ids": [ 10, 12, 13, 15, 18 ] }
[ 10, 12, 13, 15, 18 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> print('{:>3}-й день: {:.3}'.format(day, distance)) while target > distance: day += 1 distance += distance / 10 print('{:>3}-й день: {:.3}'.format(day, distance)) print('Ответ: на {}-й день спортсмен достиг результата — не менее {} км.'. format(day, target)) <|reserved_special_token_1|> distance = float(input('Введите начальную дистанцию: ')) target = int(input('Введите целевую дистанцию: ')) day = 1 print('{:>3}-й день: {:.3}'.format(day, distance)) while target > distance: day += 1 distance += distance / 10 print('{:>3}-й день: {:.3}'.format(day, distance)) print('Ответ: на {}-й день спортсмен достиг результата — не менее {} км.'. format(day, target)) <|reserved_special_token_1|> distance = float(input("Введите начальную дистанцию: ")) target = int(input("Введите целевую дистанцию: ")) day = 1 print("{:>3}-й день: {:.3}".format(day, distance)) # некрасивенько while target > distance: day += 1 distance += distance / 10 print("{:>3}-й день: {:.3}".format(day, distance)) print("Ответ: на {}-й день спортсмен достиг результата — не менее {} км.".format(day, target))
flexible
{ "blob_id": "9033ba0a19d765a83737d59289735a9ffd02abb1", "index": 7519, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint('{:>3}-й день: {:.3}'.format(day, distance))\nwhile target > distance:\n day += 1\n distance += distance / 10\n print('{:>3}-й день: {:.3}'.format(day, distance))\nprint('Ответ: на {}-й день спортсмен достиг результата — не менее {} км.'.\n format(day, target))\n", "step-3": "distance = float(input('Введите начальную дистанцию: '))\ntarget = int(input('Введите целевую дистанцию: '))\nday = 1\nprint('{:>3}-й день: {:.3}'.format(day, distance))\nwhile target > distance:\n day += 1\n distance += distance / 10\n print('{:>3}-й день: {:.3}'.format(day, distance))\nprint('Ответ: на {}-й день спортсмен достиг результата — не менее {} км.'.\n format(day, target))\n", "step-4": "distance = float(input(\"Введите начальную дистанцию: \"))\ntarget = int(input(\"Введите целевую дистанцию: \"))\n\nday = 1\nprint(\"{:>3}-й день: {:.3}\".format(day, distance)) # некрасивенько\n\n\nwhile target > distance:\n day += 1\n distance += distance / 10\n print(\"{:>3}-й день: {:.3}\".format(day, distance))\n\nprint(\"Ответ: на {}-й день спортсмен достиг результата — не менее {} км.\".format(day, target))", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import os import h5py import numpy as np from keras import backend as K from keras.layers import Activation, BatchNormalization, Conv2D, Dense, Dot, \ Dropout, Flatten, Input, MaxPooling2D, GlobalAveragePooling2D from keras import regularizers from keras.layers import Average as KerasAverage from keras.models import Sequential, Model from keras.optimizers import Adam, SGD from keras.engine.topology import Layer from .layers import LayerNormalization, CustomSoftmax from .tf_implementations.loss_functions import loss_factory class TotalReshape(Layer): def __init__(self, target_shape, **kwargs): self.target_shape = target_shape super(TotalReshape, self).__init__(**kwargs) def compute_output_shape(self, input_shape): return tuple( x if x != -1 else None for x in self.target_shape ) def call(self, x): return K.reshape(x, self.target_shape) class BaseReducer(Layer): def __init__(self, **kwargs): super(BaseReducer, self).__init__(**kwargs) def compute_output_shape(self, input_shape): return input_shape[:-1] class Average(BaseReducer): def call(self, x): return K.mean(x, axis=-1) class Max(BaseReducer): def call(self, x): return K.max(x, axis=-1) class TopKAverage(BaseReducer): def __init__(self, k, **kwargs): self.k = k super(TopKAverage, self).__init__(**kwargs) def call(self, x): if K.backend() == "tensorflow": tf = K.tf x, _ = tf.nn.top_k(x, self.k, sorted=False) return K.mean(x, axis=-1) else: raise NotImplementedError("TopKAverage is not implemented for " " %s backend" % (K.backend(),)) def reducer_factory(reducer, k=3): # Set the type of the reducer to be used if reducer == "max": return Max() elif reducer == "average": return Average() elif reducer == "topK": return TopKAverage(k) def mae(y_true, y_pred): """ Implementation of Mean average error """ return K.mean(K.abs(y_true - y_pred)) def mde(y_true, y_pred): return K.mean(K.cast( K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred, axis=1)), K.floatx() )) def create_simple_cnn(input_shape, kernel_regularizer=None): common_params = dict( filters=32, kernel_size=3, kernel_regularizer=kernel_regularizer ) return Sequential([ Conv2D(input_shape=input_shape, **common_params), BatchNormalization(), Activation("relu"), Conv2D(**common_params), BatchNormalization(), Activation("relu"), Conv2D(**common_params), BatchNormalization(), Activation("relu"), Conv2D(**common_params), BatchNormalization(), Activation("relu"), Conv2D(**common_params), BatchNormalization() ]) def create_simple_cnn_ln(input_shape, kernel_regularizer=None): common_params = dict( filters=32, kernel_size=3, kernel_regularizer=kernel_regularizer ) return Sequential([ Conv2D(input_shape=input_shape, **common_params), LayerNormalization(), Activation("relu"), Conv2D(**common_params), LayerNormalization(), Activation("relu"), Conv2D(**common_params), LayerNormalization(), Activation("relu"), Conv2D(**common_params), LayerNormalization(), Activation("relu"), Conv2D(**common_params), LayerNormalization() ]) def create_dilated_cnn_receptive_field_25( input_shape, kernel_regularizer=None ): return Sequential([ Conv2D( filters=32, kernel_size=5, input_shape=input_shape, kernel_regularizer=kernel_regularizer ), BatchNormalization(), Activation("relu"), Conv2D( filters=32, kernel_size=5, kernel_regularizer=kernel_regularizer ), BatchNormalization(), Activation("relu"), Conv2D( filters=32, kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate=2 ), BatchNormalization(), Activation("relu"), Conv2D( filters=32, kernel_size=3, kernel_regularizer=kernel_regularizer, ), BatchNormalization(), Activation("relu"), Conv2D( filters=32, kernel_size=3, kernel_regularizer=kernel_regularizer ), BatchNormalization(), Activation("relu"), Conv2D( filters=32, kernel_size=3, kernel_regularizer=kernel_regularizer ), BatchNormalization(), Activation("relu"), Conv2D( filters=32, kernel_size=3, kernel_regularizer=kernel_regularizer ), BatchNormalization() ]) def create_dilated_cnn_receptive_field_25_with_tanh( input_shape, kernel_regularizer=None ): return Sequential([ Conv2D( filters=32, kernel_size=5, input_shape=input_shape, kernel_regularizer=kernel_regularizer ), BatchNormalization(), Activation("tanh"), Conv2D( filters=32, kernel_size=5, kernel_regularizer=kernel_regularizer ), BatchNormalization(), Activation("tanh"), Conv2D( filters=32, kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate=2 ), BatchNormalization(), Activation("tanh"), Conv2D( filters=32, kernel_size=3, kernel_regularizer=kernel_regularizer, ), BatchNormalization(), Activation("tanh"), Conv2D( filters=32, kernel_size=3, kernel_regularizer=kernel_regularizer ), BatchNormalization(), Activation("tanh"), Conv2D( filters=32, kernel_size=3, kernel_regularizer=kernel_regularizer ), BatchNormalization(), Activation("tanh"), Conv2D( filters=32, kernel_size=3, kernel_regularizer=kernel_regularizer ), BatchNormalization() ]) def create_hartmann_cnn(input_shape, kernel_regularizer=None): return Sequential([ Conv2D(filters=32, kernel_size=5, input_shape=input_shape), Activation("tanh"), MaxPooling2D(pool_size=(2, 2)), Conv2D(filters=64, kernel_size=5), Activation("tanh"), MaxPooling2D(pool_size=(2, 2)) ]) def cnn_factory(name): cnn_factories = { "simple_cnn": create_simple_cnn, "simple_cnn_ln": create_simple_cnn_ln, "dilated_cnn_receptive_field_25": create_dilated_cnn_receptive_field_25, "dilated_cnn_receptive_field_25_with_tanh": create_dilated_cnn_receptive_field_25_with_tanh, "hartmann_cnn": create_hartmann_cnn } return cnn_factories[name] def optimizer_factory(optimizer, lr, momentum=None, clipnorm=0.0, clipvalue=1): # Set the type of optimizer to be used if optimizer == "Adam": return Adam(lr=lr, clipnorm=clipnorm, clipvalue=clipvalue) elif optimizer == "SGD": return SGD(lr=lr, momentum=momentum, clipnorm=clipnorm, clipvalue=clipvalue) def kernel_regularizer_factory(regularizer_factor): if regularizer_factor == 0.0: return None else: return regularizers.l2(regularizer_factor) def build_simple_cnn( input_shape, create_cnn, optimizer="Adam", lr=1e-3, momentum=None, clipnorm=0.0, loss="mse", reducer="average", merge_layer="dot-product", weight_decay=None, weight_file=None ): # Make sure that we have a proper input shape # TODO: Maybe change this to 3, because we finally need only the # patch_shape? assert len(input_shape) == 5 # Unpack the input shape to make the code more readable D, N, W, H, C = input_shape model = create_cnn( input_shape=(None, None, C), kernel_regularizer=weight_decay ) model.compile( optimizer=optimizer_factory( optimizer, lr=lr, momentum=momentum, clipnorm=clipnorm ), loss=loss_factory(loss) ) # If there is a weight file specified load the weights if weight_file: try: f = h5py.File(weight_file, "r") keys = [os.path.join(model.name, w.name) for l in model.layers for w in l.weights] weights = [f[os.path.join("model_weights", k)][:] for k in keys] model.set_weights(weights) except: model.load_weights(weight_file, by_name=True) return model def build_simple_nn_for_training( input_shape, create_cnn, optimizer="Adam", lr=1e-3, momentum=None, clipnorm=0.0, loss="emd", reducer="average", merge_layer="dot-product", weight_decay=None, weight_file=None ): # Make sure that we have a proper input shape assert len(input_shape) == 5 # Unpack the input shape to make the code more readable # print(input_shape) input_shape=list(input_shape) for i in range(len(input_shape)): if input_shape[i]!=None: input_shape[i]=int(input_shape[i]) input_shape=tuple(input_shape) D, N, W, H, C = input_shape # Create the two stream inputs x1_in = Input(shape=input_shape) x2_in = Input(shape=input_shape) # Reshape them for input in the CNN x1 = TotalReshape((-1, W, H, C))(x1_in) x2 = TotalReshape((-1, W, H, C))(x2_in) # Create the CNN and extract features from both streams cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay) x1 = Flatten()(cnn(x1)) x2 = Flatten()(cnn(x2)) # Compute a kind of similarity between the features of the two streams x = Dot(axes=-1, normalize=(merge_layer == "cosine-similarity"))([x1, x2]) # Reshape them back into their semantic shape (depth planes, patches, etc) x = TotalReshape((-1, D, N))(x) # Compute the final similarity scores for each depth plane x = reducer_factory(reducer)(x) # Compute the final output y = Activation("softmax")(x) model = Model(inputs=[x1_in, x2_in], outputs=y) model.compile( optimizer=optimizer_factory( optimizer, lr=lr, momentum=momentum, clipnorm=clipnorm ), loss=loss_factory(loss), metrics=["accuracy", mae, mde] ) if weight_file: model.load_weights(weight_file, by_name=True) return model def build_hartmann_network( input_shape, create_cnn=create_hartmann_cnn, optimizer="SGD", lr=1e-3, momentum=None, clipnorm=0.0, loss=None, reducer=None, merge_layer=None, weight_decay=None, weight_file=None ): # Make sure that we have a proper input shape assert len(input_shape) == 3 # Unpack the input shape to make the code more readable H, W, C = input_shape # Create the feature extracting CNN cnn = create_hartmann_cnn(input_shape=(None, None, C)) # Create the similarity CNN sim = Sequential([ Conv2D( filters=2048, kernel_size=5, input_shape=K.int_shape(cnn.output)[1:] ), Activation("relu"), Conv2D(filters=2048, kernel_size=1), Activation("relu"), Conv2D(filters=2, kernel_size=1), Activation("softmax") ]) # Create the joint model for training x_in = [Input(shape=input_shape) for i in range(5)] x = [cnn(xi) for xi in x_in] x = KerasAverage()(x) y = sim(x) model = Model(inputs=x_in, outputs=y) # Compile all the models model.compile( optimizer=optimizer_factory( optimizer, lr=lr, momentum=momentum, clipnorm=clipnorm ), loss="categorical_crossentropy", metrics=["accuracy"] ) cnn.compile("sgd", "mse") # Just so that we can run predict() sim.compile("sgd", "mse") # Attach the cnn and sim to the model in case someone wants to use them model.cnn = cnn model.sim = sim if weight_file: model.load_weights(weight_file, by_name=True) return model def get_nn(name): models = { "simple_cnn": build_simple_cnn, "simple_nn_for_training": build_simple_nn_for_training, "hartmann": build_hartmann_network } return models[name]
normal
{ "blob_id": "0eefae7e0d341d74154bbe480f5ed766829e3ce3", "index": 3734, "step-1": "<mask token>\n\n\nclass TotalReshape(Layer):\n\n def __init__(self, target_shape, **kwargs):\n self.target_shape = target_shape\n super(TotalReshape, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return tuple(x if x != -1 else None for x in self.target_shape)\n\n def call(self, x):\n return K.reshape(x, self.target_shape)\n\n\nclass BaseReducer(Layer):\n\n def __init__(self, **kwargs):\n super(BaseReducer, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return input_shape[:-1]\n\n\nclass Average(BaseReducer):\n\n def call(self, x):\n return K.mean(x, axis=-1)\n\n\nclass Max(BaseReducer):\n\n def call(self, x):\n return K.max(x, axis=-1)\n\n\nclass TopKAverage(BaseReducer):\n\n def __init__(self, k, **kwargs):\n self.k = k\n super(TopKAverage, self).__init__(**kwargs)\n\n def call(self, x):\n if K.backend() == 'tensorflow':\n tf = K.tf\n x, _ = tf.nn.top_k(x, self.k, sorted=False)\n return K.mean(x, axis=-1)\n else:\n raise NotImplementedError(\n 'TopKAverage is not implemented for %s backend' % (K.\n backend(),))\n\n\n<mask token>\n\n\ndef create_simple_cnn_ln(input_shape, kernel_regularizer=None):\n common_params = dict(filters=32, kernel_size=3, kernel_regularizer=\n kernel_regularizer)\n return Sequential([Conv2D(input_shape=input_shape, **common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization()])\n\n\n<mask token>\n\n\ndef cnn_factory(name):\n cnn_factories = {'simple_cnn': create_simple_cnn, 'simple_cnn_ln':\n create_simple_cnn_ln, 'dilated_cnn_receptive_field_25':\n create_dilated_cnn_receptive_field_25,\n 'dilated_cnn_receptive_field_25_with_tanh':\n create_dilated_cnn_receptive_field_25_with_tanh, 'hartmann_cnn':\n create_hartmann_cnn}\n return cnn_factories[name]\n\n\n<mask token>\n\n\ndef build_simple_nn_for_training(input_shape, create_cnn, optimizer='Adam',\n lr=0.001, momentum=None, clipnorm=0.0, loss='emd', reducer='average',\n merge_layer='dot-product', weight_decay=None, weight_file=None):\n assert len(input_shape) == 5\n input_shape = list(input_shape)\n for i in range(len(input_shape)):\n if input_shape[i] != None:\n input_shape[i] = int(input_shape[i])\n input_shape = tuple(input_shape)\n D, N, W, H, C = input_shape\n x1_in = Input(shape=input_shape)\n x2_in = Input(shape=input_shape)\n x1 = TotalReshape((-1, W, H, C))(x1_in)\n x2 = TotalReshape((-1, W, H, C))(x2_in)\n cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)\n x1 = Flatten()(cnn(x1))\n x2 = Flatten()(cnn(x2))\n x = Dot(axes=-1, normalize=merge_layer == 'cosine-similarity')([x1, x2])\n x = TotalReshape((-1, D, N))(x)\n x = reducer_factory(reducer)(x)\n y = Activation('softmax')(x)\n model = Model(inputs=[x1_in, x2_in], outputs=y)\n model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=\n momentum, clipnorm=clipnorm), loss=loss_factory(loss), metrics=[\n 'accuracy', mae, mde])\n if weight_file:\n model.load_weights(weight_file, by_name=True)\n return model\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass TotalReshape(Layer):\n\n def __init__(self, target_shape, **kwargs):\n self.target_shape = target_shape\n super(TotalReshape, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return tuple(x if x != -1 else None for x in self.target_shape)\n\n def call(self, x):\n return K.reshape(x, self.target_shape)\n\n\nclass BaseReducer(Layer):\n\n def __init__(self, **kwargs):\n super(BaseReducer, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return input_shape[:-1]\n\n\nclass Average(BaseReducer):\n\n def call(self, x):\n return K.mean(x, axis=-1)\n\n\nclass Max(BaseReducer):\n\n def call(self, x):\n return K.max(x, axis=-1)\n\n\nclass TopKAverage(BaseReducer):\n\n def __init__(self, k, **kwargs):\n self.k = k\n super(TopKAverage, self).__init__(**kwargs)\n\n def call(self, x):\n if K.backend() == 'tensorflow':\n tf = K.tf\n x, _ = tf.nn.top_k(x, self.k, sorted=False)\n return K.mean(x, axis=-1)\n else:\n raise NotImplementedError(\n 'TopKAverage is not implemented for %s backend' % (K.\n backend(),))\n\n\n<mask token>\n\n\ndef mae(y_true, y_pred):\n \"\"\" Implementation of Mean average error\n \"\"\"\n return K.mean(K.abs(y_true - y_pred))\n\n\n<mask token>\n\n\ndef create_simple_cnn(input_shape, kernel_regularizer=None):\n common_params = dict(filters=32, kernel_size=3, kernel_regularizer=\n kernel_regularizer)\n return Sequential([Conv2D(input_shape=input_shape, **common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization()])\n\n\ndef create_simple_cnn_ln(input_shape, kernel_regularizer=None):\n common_params = dict(filters=32, kernel_size=3, kernel_regularizer=\n kernel_regularizer)\n return Sequential([Conv2D(input_shape=input_shape, **common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization()])\n\n\ndef create_dilated_cnn_receptive_field_25(input_shape, kernel_regularizer=None\n ):\n return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=\n input_shape, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate\n =2), BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization()])\n\n\n<mask token>\n\n\ndef cnn_factory(name):\n cnn_factories = {'simple_cnn': create_simple_cnn, 'simple_cnn_ln':\n create_simple_cnn_ln, 'dilated_cnn_receptive_field_25':\n create_dilated_cnn_receptive_field_25,\n 'dilated_cnn_receptive_field_25_with_tanh':\n create_dilated_cnn_receptive_field_25_with_tanh, 'hartmann_cnn':\n create_hartmann_cnn}\n return cnn_factories[name]\n\n\n<mask token>\n\n\ndef build_simple_nn_for_training(input_shape, create_cnn, optimizer='Adam',\n lr=0.001, momentum=None, clipnorm=0.0, loss='emd', reducer='average',\n merge_layer='dot-product', weight_decay=None, weight_file=None):\n assert len(input_shape) == 5\n input_shape = list(input_shape)\n for i in range(len(input_shape)):\n if input_shape[i] != None:\n input_shape[i] = int(input_shape[i])\n input_shape = tuple(input_shape)\n D, N, W, H, C = input_shape\n x1_in = Input(shape=input_shape)\n x2_in = Input(shape=input_shape)\n x1 = TotalReshape((-1, W, H, C))(x1_in)\n x2 = TotalReshape((-1, W, H, C))(x2_in)\n cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)\n x1 = Flatten()(cnn(x1))\n x2 = Flatten()(cnn(x2))\n x = Dot(axes=-1, normalize=merge_layer == 'cosine-similarity')([x1, x2])\n x = TotalReshape((-1, D, N))(x)\n x = reducer_factory(reducer)(x)\n y = Activation('softmax')(x)\n model = Model(inputs=[x1_in, x2_in], outputs=y)\n model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=\n momentum, clipnorm=clipnorm), loss=loss_factory(loss), metrics=[\n 'accuracy', mae, mde])\n if weight_file:\n model.load_weights(weight_file, by_name=True)\n return model\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass TotalReshape(Layer):\n\n def __init__(self, target_shape, **kwargs):\n self.target_shape = target_shape\n super(TotalReshape, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return tuple(x if x != -1 else None for x in self.target_shape)\n\n def call(self, x):\n return K.reshape(x, self.target_shape)\n\n\nclass BaseReducer(Layer):\n\n def __init__(self, **kwargs):\n super(BaseReducer, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return input_shape[:-1]\n\n\nclass Average(BaseReducer):\n\n def call(self, x):\n return K.mean(x, axis=-1)\n\n\nclass Max(BaseReducer):\n\n def call(self, x):\n return K.max(x, axis=-1)\n\n\nclass TopKAverage(BaseReducer):\n\n def __init__(self, k, **kwargs):\n self.k = k\n super(TopKAverage, self).__init__(**kwargs)\n\n def call(self, x):\n if K.backend() == 'tensorflow':\n tf = K.tf\n x, _ = tf.nn.top_k(x, self.k, sorted=False)\n return K.mean(x, axis=-1)\n else:\n raise NotImplementedError(\n 'TopKAverage is not implemented for %s backend' % (K.\n backend(),))\n\n\n<mask token>\n\n\ndef mae(y_true, y_pred):\n \"\"\" Implementation of Mean average error\n \"\"\"\n return K.mean(K.abs(y_true - y_pred))\n\n\ndef mde(y_true, y_pred):\n return K.mean(K.cast(K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred,\n axis=1)), K.floatx()))\n\n\ndef create_simple_cnn(input_shape, kernel_regularizer=None):\n common_params = dict(filters=32, kernel_size=3, kernel_regularizer=\n kernel_regularizer)\n return Sequential([Conv2D(input_shape=input_shape, **common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization()])\n\n\ndef create_simple_cnn_ln(input_shape, kernel_regularizer=None):\n common_params = dict(filters=32, kernel_size=3, kernel_regularizer=\n kernel_regularizer)\n return Sequential([Conv2D(input_shape=input_shape, **common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization()])\n\n\ndef create_dilated_cnn_receptive_field_25(input_shape, kernel_regularizer=None\n ):\n return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=\n input_shape, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate\n =2), BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization()])\n\n\ndef create_dilated_cnn_receptive_field_25_with_tanh(input_shape,\n kernel_regularizer=None):\n return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=\n input_shape, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate\n =2), BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization()])\n\n\ndef create_hartmann_cnn(input_shape, kernel_regularizer=None):\n return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=\n input_shape), Activation('tanh'), MaxPooling2D(pool_size=(2, 2)),\n Conv2D(filters=64, kernel_size=5), Activation('tanh'), MaxPooling2D\n (pool_size=(2, 2))])\n\n\ndef cnn_factory(name):\n cnn_factories = {'simple_cnn': create_simple_cnn, 'simple_cnn_ln':\n create_simple_cnn_ln, 'dilated_cnn_receptive_field_25':\n create_dilated_cnn_receptive_field_25,\n 'dilated_cnn_receptive_field_25_with_tanh':\n create_dilated_cnn_receptive_field_25_with_tanh, 'hartmann_cnn':\n create_hartmann_cnn}\n return cnn_factories[name]\n\n\n<mask token>\n\n\ndef build_simple_cnn(input_shape, create_cnn, optimizer='Adam', lr=0.001,\n momentum=None, clipnorm=0.0, loss='mse', reducer='average', merge_layer\n ='dot-product', weight_decay=None, weight_file=None):\n assert len(input_shape) == 5\n D, N, W, H, C = input_shape\n model = create_cnn(input_shape=(None, None, C), kernel_regularizer=\n weight_decay)\n model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=\n momentum, clipnorm=clipnorm), loss=loss_factory(loss))\n if weight_file:\n try:\n f = h5py.File(weight_file, 'r')\n keys = [os.path.join(model.name, w.name) for l in model.layers for\n w in l.weights]\n weights = [f[os.path.join('model_weights', k)][:] for k in keys]\n model.set_weights(weights)\n except:\n model.load_weights(weight_file, by_name=True)\n return model\n\n\ndef build_simple_nn_for_training(input_shape, create_cnn, optimizer='Adam',\n lr=0.001, momentum=None, clipnorm=0.0, loss='emd', reducer='average',\n merge_layer='dot-product', weight_decay=None, weight_file=None):\n assert len(input_shape) == 5\n input_shape = list(input_shape)\n for i in range(len(input_shape)):\n if input_shape[i] != None:\n input_shape[i] = int(input_shape[i])\n input_shape = tuple(input_shape)\n D, N, W, H, C = input_shape\n x1_in = Input(shape=input_shape)\n x2_in = Input(shape=input_shape)\n x1 = TotalReshape((-1, W, H, C))(x1_in)\n x2 = TotalReshape((-1, W, H, C))(x2_in)\n cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)\n x1 = Flatten()(cnn(x1))\n x2 = Flatten()(cnn(x2))\n x = Dot(axes=-1, normalize=merge_layer == 'cosine-similarity')([x1, x2])\n x = TotalReshape((-1, D, N))(x)\n x = reducer_factory(reducer)(x)\n y = Activation('softmax')(x)\n model = Model(inputs=[x1_in, x2_in], outputs=y)\n model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=\n momentum, clipnorm=clipnorm), loss=loss_factory(loss), metrics=[\n 'accuracy', mae, mde])\n if weight_file:\n model.load_weights(weight_file, by_name=True)\n return model\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass TotalReshape(Layer):\n\n def __init__(self, target_shape, **kwargs):\n self.target_shape = target_shape\n super(TotalReshape, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return tuple(x if x != -1 else None for x in self.target_shape)\n\n def call(self, x):\n return K.reshape(x, self.target_shape)\n\n\nclass BaseReducer(Layer):\n\n def __init__(self, **kwargs):\n super(BaseReducer, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return input_shape[:-1]\n\n\nclass Average(BaseReducer):\n\n def call(self, x):\n return K.mean(x, axis=-1)\n\n\nclass Max(BaseReducer):\n\n def call(self, x):\n return K.max(x, axis=-1)\n\n\nclass TopKAverage(BaseReducer):\n\n def __init__(self, k, **kwargs):\n self.k = k\n super(TopKAverage, self).__init__(**kwargs)\n\n def call(self, x):\n if K.backend() == 'tensorflow':\n tf = K.tf\n x, _ = tf.nn.top_k(x, self.k, sorted=False)\n return K.mean(x, axis=-1)\n else:\n raise NotImplementedError(\n 'TopKAverage is not implemented for %s backend' % (K.\n backend(),))\n\n\ndef reducer_factory(reducer, k=3):\n if reducer == 'max':\n return Max()\n elif reducer == 'average':\n return Average()\n elif reducer == 'topK':\n return TopKAverage(k)\n\n\ndef mae(y_true, y_pred):\n \"\"\" Implementation of Mean average error\n \"\"\"\n return K.mean(K.abs(y_true - y_pred))\n\n\ndef mde(y_true, y_pred):\n return K.mean(K.cast(K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred,\n axis=1)), K.floatx()))\n\n\ndef create_simple_cnn(input_shape, kernel_regularizer=None):\n common_params = dict(filters=32, kernel_size=3, kernel_regularizer=\n kernel_regularizer)\n return Sequential([Conv2D(input_shape=input_shape, **common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization()])\n\n\ndef create_simple_cnn_ln(input_shape, kernel_regularizer=None):\n common_params = dict(filters=32, kernel_size=3, kernel_regularizer=\n kernel_regularizer)\n return Sequential([Conv2D(input_shape=input_shape, **common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization()])\n\n\ndef create_dilated_cnn_receptive_field_25(input_shape, kernel_regularizer=None\n ):\n return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=\n input_shape, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate\n =2), BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization()])\n\n\ndef create_dilated_cnn_receptive_field_25_with_tanh(input_shape,\n kernel_regularizer=None):\n return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=\n input_shape, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate\n =2), BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization()])\n\n\ndef create_hartmann_cnn(input_shape, kernel_regularizer=None):\n return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=\n input_shape), Activation('tanh'), MaxPooling2D(pool_size=(2, 2)),\n Conv2D(filters=64, kernel_size=5), Activation('tanh'), MaxPooling2D\n (pool_size=(2, 2))])\n\n\ndef cnn_factory(name):\n cnn_factories = {'simple_cnn': create_simple_cnn, 'simple_cnn_ln':\n create_simple_cnn_ln, 'dilated_cnn_receptive_field_25':\n create_dilated_cnn_receptive_field_25,\n 'dilated_cnn_receptive_field_25_with_tanh':\n create_dilated_cnn_receptive_field_25_with_tanh, 'hartmann_cnn':\n create_hartmann_cnn}\n return cnn_factories[name]\n\n\ndef optimizer_factory(optimizer, lr, momentum=None, clipnorm=0.0, clipvalue=1):\n if optimizer == 'Adam':\n return Adam(lr=lr, clipnorm=clipnorm, clipvalue=clipvalue)\n elif optimizer == 'SGD':\n return SGD(lr=lr, momentum=momentum, clipnorm=clipnorm, clipvalue=\n clipvalue)\n\n\ndef kernel_regularizer_factory(regularizer_factor):\n if regularizer_factor == 0.0:\n return None\n else:\n return regularizers.l2(regularizer_factor)\n\n\ndef build_simple_cnn(input_shape, create_cnn, optimizer='Adam', lr=0.001,\n momentum=None, clipnorm=0.0, loss='mse', reducer='average', merge_layer\n ='dot-product', weight_decay=None, weight_file=None):\n assert len(input_shape) == 5\n D, N, W, H, C = input_shape\n model = create_cnn(input_shape=(None, None, C), kernel_regularizer=\n weight_decay)\n model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=\n momentum, clipnorm=clipnorm), loss=loss_factory(loss))\n if weight_file:\n try:\n f = h5py.File(weight_file, 'r')\n keys = [os.path.join(model.name, w.name) for l in model.layers for\n w in l.weights]\n weights = [f[os.path.join('model_weights', k)][:] for k in keys]\n model.set_weights(weights)\n except:\n model.load_weights(weight_file, by_name=True)\n return model\n\n\ndef build_simple_nn_for_training(input_shape, create_cnn, optimizer='Adam',\n lr=0.001, momentum=None, clipnorm=0.0, loss='emd', reducer='average',\n merge_layer='dot-product', weight_decay=None, weight_file=None):\n assert len(input_shape) == 5\n input_shape = list(input_shape)\n for i in range(len(input_shape)):\n if input_shape[i] != None:\n input_shape[i] = int(input_shape[i])\n input_shape = tuple(input_shape)\n D, N, W, H, C = input_shape\n x1_in = Input(shape=input_shape)\n x2_in = Input(shape=input_shape)\n x1 = TotalReshape((-1, W, H, C))(x1_in)\n x2 = TotalReshape((-1, W, H, C))(x2_in)\n cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)\n x1 = Flatten()(cnn(x1))\n x2 = Flatten()(cnn(x2))\n x = Dot(axes=-1, normalize=merge_layer == 'cosine-similarity')([x1, x2])\n x = TotalReshape((-1, D, N))(x)\n x = reducer_factory(reducer)(x)\n y = Activation('softmax')(x)\n model = Model(inputs=[x1_in, x2_in], outputs=y)\n model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=\n momentum, clipnorm=clipnorm), loss=loss_factory(loss), metrics=[\n 'accuracy', mae, mde])\n if weight_file:\n model.load_weights(weight_file, by_name=True)\n return model\n\n\ndef build_hartmann_network(input_shape, create_cnn=create_hartmann_cnn,\n optimizer='SGD', lr=0.001, momentum=None, clipnorm=0.0, loss=None,\n reducer=None, merge_layer=None, weight_decay=None, weight_file=None):\n assert len(input_shape) == 3\n H, W, C = input_shape\n cnn = create_hartmann_cnn(input_shape=(None, None, C))\n sim = Sequential([Conv2D(filters=2048, kernel_size=5, input_shape=K.\n int_shape(cnn.output)[1:]), Activation('relu'), Conv2D(filters=2048,\n kernel_size=1), Activation('relu'), Conv2D(filters=2, kernel_size=1\n ), Activation('softmax')])\n x_in = [Input(shape=input_shape) for i in range(5)]\n x = [cnn(xi) for xi in x_in]\n x = KerasAverage()(x)\n y = sim(x)\n model = Model(inputs=x_in, outputs=y)\n model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=\n momentum, clipnorm=clipnorm), loss='categorical_crossentropy',\n metrics=['accuracy'])\n cnn.compile('sgd', 'mse')\n sim.compile('sgd', 'mse')\n model.cnn = cnn\n model.sim = sim\n if weight_file:\n model.load_weights(weight_file, by_name=True)\n return model\n\n\ndef get_nn(name):\n models = {'simple_cnn': build_simple_cnn, 'simple_nn_for_training':\n build_simple_nn_for_training, 'hartmann': build_hartmann_network}\n return models[name]\n", "step-5": "import os\n\nimport h5py\nimport numpy as np\n\nfrom keras import backend as K\nfrom keras.layers import Activation, BatchNormalization, Conv2D, Dense, Dot, \\\n Dropout, Flatten, Input, MaxPooling2D, GlobalAveragePooling2D\nfrom keras import regularizers\nfrom keras.layers import Average as KerasAverage\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam, SGD\nfrom keras.engine.topology import Layer\n\nfrom .layers import LayerNormalization, CustomSoftmax\nfrom .tf_implementations.loss_functions import loss_factory\n\n\nclass TotalReshape(Layer):\n def __init__(self, target_shape, **kwargs):\n self.target_shape = target_shape\n super(TotalReshape, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return tuple(\n x if x != -1 else None\n for x in self.target_shape\n )\n\n def call(self, x):\n return K.reshape(x, self.target_shape)\n\n\nclass BaseReducer(Layer):\n def __init__(self, **kwargs):\n super(BaseReducer, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return input_shape[:-1]\n\n\nclass Average(BaseReducer):\n def call(self, x):\n return K.mean(x, axis=-1)\n\n\nclass Max(BaseReducer):\n def call(self, x):\n return K.max(x, axis=-1)\n\n\nclass TopKAverage(BaseReducer):\n def __init__(self, k, **kwargs):\n self.k = k\n super(TopKAverage, self).__init__(**kwargs)\n\n def call(self, x):\n if K.backend() == \"tensorflow\":\n tf = K.tf\n x, _ = tf.nn.top_k(x, self.k, sorted=False)\n return K.mean(x, axis=-1)\n else:\n raise NotImplementedError(\"TopKAverage is not implemented for \"\n \" %s backend\" % (K.backend(),))\n\n\ndef reducer_factory(reducer, k=3):\n # Set the type of the reducer to be used\n if reducer == \"max\":\n return Max()\n elif reducer == \"average\":\n return Average()\n elif reducer == \"topK\":\n return TopKAverage(k)\n\n\ndef mae(y_true, y_pred):\n \"\"\" Implementation of Mean average error\n \"\"\"\n return K.mean(K.abs(y_true - y_pred))\n\n\ndef mde(y_true, y_pred):\n return K.mean(K.cast(\n K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred, axis=1)),\n K.floatx()\n ))\n\n\ndef create_simple_cnn(input_shape, kernel_regularizer=None):\n common_params = dict(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n )\n return Sequential([\n Conv2D(input_shape=input_shape, **common_params),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n BatchNormalization()\n ])\n\n\ndef create_simple_cnn_ln(input_shape, kernel_regularizer=None):\n common_params = dict(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n )\n return Sequential([\n Conv2D(input_shape=input_shape, **common_params),\n LayerNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n LayerNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n LayerNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n LayerNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n LayerNormalization()\n ])\n\n\ndef create_dilated_cnn_receptive_field_25(\n input_shape,\n kernel_regularizer=None\n):\n return Sequential([\n Conv2D(\n filters=32,\n kernel_size=5,\n input_shape=input_shape,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(\n filters=32,\n kernel_size=5,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(\n filters=32,\n kernel_size=5,\n kernel_regularizer=kernel_regularizer,\n dilation_rate=2\n ),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer,\n ),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization()\n ])\n\n\ndef create_dilated_cnn_receptive_field_25_with_tanh(\n input_shape,\n kernel_regularizer=None\n):\n return Sequential([\n Conv2D(\n filters=32,\n kernel_size=5,\n input_shape=input_shape,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"tanh\"),\n Conv2D(\n filters=32,\n kernel_size=5,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"tanh\"),\n Conv2D(\n filters=32,\n kernel_size=5,\n kernel_regularizer=kernel_regularizer,\n dilation_rate=2\n ),\n BatchNormalization(),\n Activation(\"tanh\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer,\n ),\n BatchNormalization(),\n Activation(\"tanh\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"tanh\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"tanh\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization()\n ])\n\n\ndef create_hartmann_cnn(input_shape, kernel_regularizer=None):\n return Sequential([\n Conv2D(filters=32, kernel_size=5, input_shape=input_shape),\n Activation(\"tanh\"),\n MaxPooling2D(pool_size=(2, 2)),\n Conv2D(filters=64, kernel_size=5),\n Activation(\"tanh\"),\n MaxPooling2D(pool_size=(2, 2))\n ])\n\n\ndef cnn_factory(name):\n cnn_factories = {\n \"simple_cnn\": create_simple_cnn,\n \"simple_cnn_ln\": create_simple_cnn_ln,\n \"dilated_cnn_receptive_field_25\":\n create_dilated_cnn_receptive_field_25,\n \"dilated_cnn_receptive_field_25_with_tanh\":\n create_dilated_cnn_receptive_field_25_with_tanh,\n \"hartmann_cnn\": create_hartmann_cnn\n }\n return cnn_factories[name]\n\n\ndef optimizer_factory(optimizer, lr, momentum=None, clipnorm=0.0, clipvalue=1):\n # Set the type of optimizer to be used\n if optimizer == \"Adam\":\n return Adam(lr=lr, clipnorm=clipnorm, clipvalue=clipvalue)\n elif optimizer == \"SGD\":\n return SGD(lr=lr, momentum=momentum, clipnorm=clipnorm,\n clipvalue=clipvalue)\n\n\ndef kernel_regularizer_factory(regularizer_factor):\n if regularizer_factor == 0.0:\n return None\n else:\n return regularizers.l2(regularizer_factor)\n\n\ndef build_simple_cnn(\n input_shape,\n create_cnn,\n optimizer=\"Adam\",\n lr=1e-3,\n momentum=None,\n clipnorm=0.0,\n loss=\"mse\",\n reducer=\"average\",\n merge_layer=\"dot-product\",\n weight_decay=None,\n weight_file=None\n):\n # Make sure that we have a proper input shape\n # TODO: Maybe change this to 3, because we finally need only the\n # patch_shape?\n assert len(input_shape) == 5\n\n # Unpack the input shape to make the code more readable\n D, N, W, H, C = input_shape\n\n model = create_cnn(\n input_shape=(None, None, C),\n kernel_regularizer=weight_decay\n )\n model.compile(\n optimizer=optimizer_factory(\n optimizer,\n lr=lr,\n momentum=momentum,\n clipnorm=clipnorm\n ),\n loss=loss_factory(loss)\n )\n\n # If there is a weight file specified load the weights\n if weight_file:\n try:\n f = h5py.File(weight_file, \"r\")\n keys = [os.path.join(model.name, w.name)\n for l in model.layers for w in l.weights]\n weights = [f[os.path.join(\"model_weights\", k)][:] for k in keys]\n\n model.set_weights(weights)\n except:\n model.load_weights(weight_file, by_name=True)\n\n return model\n\n\ndef build_simple_nn_for_training(\n input_shape,\n create_cnn,\n optimizer=\"Adam\",\n lr=1e-3,\n momentum=None,\n clipnorm=0.0,\n loss=\"emd\",\n reducer=\"average\",\n merge_layer=\"dot-product\",\n weight_decay=None,\n weight_file=None\n):\n # Make sure that we have a proper input shape\n assert len(input_shape) == 5\n\n # Unpack the input shape to make the code more readable\n # print(input_shape)\n input_shape=list(input_shape)\n for i in range(len(input_shape)):\n if input_shape[i]!=None:\n input_shape[i]=int(input_shape[i])\n input_shape=tuple(input_shape)\n D, N, W, H, C = input_shape\n\n # Create the two stream inputs\n x1_in = Input(shape=input_shape)\n x2_in = Input(shape=input_shape)\n\n # Reshape them for input in the CNN\n x1 = TotalReshape((-1, W, H, C))(x1_in)\n x2 = TotalReshape((-1, W, H, C))(x2_in)\n\n # Create the CNN and extract features from both streams\n cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)\n x1 = Flatten()(cnn(x1))\n x2 = Flatten()(cnn(x2))\n\n # Compute a kind of similarity between the features of the two streams\n x = Dot(axes=-1, normalize=(merge_layer == \"cosine-similarity\"))([x1, x2])\n\n # Reshape them back into their semantic shape (depth planes, patches, etc)\n x = TotalReshape((-1, D, N))(x)\n\n # Compute the final similarity scores for each depth plane\n x = reducer_factory(reducer)(x)\n\n # Compute the final output\n y = Activation(\"softmax\")(x)\n\n model = Model(inputs=[x1_in, x2_in], outputs=y)\n model.compile(\n optimizer=optimizer_factory(\n optimizer,\n lr=lr,\n momentum=momentum,\n clipnorm=clipnorm\n ),\n loss=loss_factory(loss),\n metrics=[\"accuracy\", mae, mde]\n )\n\n if weight_file:\n model.load_weights(weight_file, by_name=True)\n\n return model\n\n\ndef build_hartmann_network(\n input_shape,\n create_cnn=create_hartmann_cnn,\n optimizer=\"SGD\",\n lr=1e-3,\n momentum=None,\n clipnorm=0.0,\n loss=None,\n reducer=None,\n merge_layer=None,\n weight_decay=None,\n weight_file=None\n):\n # Make sure that we have a proper input shape\n assert len(input_shape) == 3\n\n # Unpack the input shape to make the code more readable\n H, W, C = input_shape\n\n # Create the feature extracting CNN\n cnn = create_hartmann_cnn(input_shape=(None, None, C))\n\n # Create the similarity CNN\n sim = Sequential([\n Conv2D(\n filters=2048,\n kernel_size=5,\n input_shape=K.int_shape(cnn.output)[1:]\n ),\n Activation(\"relu\"),\n Conv2D(filters=2048, kernel_size=1),\n Activation(\"relu\"),\n Conv2D(filters=2, kernel_size=1),\n Activation(\"softmax\")\n ])\n\n # Create the joint model for training\n x_in = [Input(shape=input_shape) for i in range(5)]\n x = [cnn(xi) for xi in x_in]\n x = KerasAverage()(x)\n y = sim(x)\n model = Model(inputs=x_in, outputs=y)\n\n # Compile all the models\n model.compile(\n optimizer=optimizer_factory(\n optimizer,\n lr=lr,\n momentum=momentum,\n clipnorm=clipnorm\n ),\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"]\n )\n cnn.compile(\"sgd\", \"mse\") # Just so that we can run predict()\n sim.compile(\"sgd\", \"mse\")\n\n # Attach the cnn and sim to the model in case someone wants to use them\n model.cnn = cnn\n model.sim = sim\n\n if weight_file:\n model.load_weights(weight_file, by_name=True)\n\n return model\n\n\ndef get_nn(name):\n models = {\n \"simple_cnn\": build_simple_cnn,\n \"simple_nn_for_training\": build_simple_nn_for_training,\n \"hartmann\": build_hartmann_network\n }\n return models[name]\n", "step-ids": [ 17, 20, 24, 29, 31 ] }
[ 17, 20, 24, 29, 31 ]
from mikeio.spatial import GeometryPoint2D, GeometryPoint3D # https://www.ogc.org/standard/sfa/ def test_point2d_wkt(): p = GeometryPoint2D(10, 20) assert p.wkt == "POINT (10 20)" p = GeometryPoint2D(x=-5642.5, y=120.1) assert p.wkt == "POINT (-5642.5 120.1)" def test_point3d_wkt(): p = GeometryPoint3D(10, 20, 30) assert p.wkt == "POINT Z (10 20 30)" def test_point2d_to_shapely(): p = GeometryPoint2D(10, 20) sp = p.to_shapely() assert sp.x == 10 assert sp.y == 20 assert sp.wkt == p.wkt def test_point3d_to_shapely(): p = GeometryPoint3D(10, 20, -1) sp = p.to_shapely() assert sp.x == 10 assert sp.y == 20 assert sp.z == -1 assert sp.wkt == p.wkt
normal
{ "blob_id": "ae45a4967a8ee63c27124d345ad4dc0c01033c0e", "index": 6749, "step-1": "<mask token>\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == 'POINT Z (10 20 30)'\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef test_point2d_wkt():\n p = GeometryPoint2D(10, 20)\n assert p.wkt == 'POINT (10 20)'\n p = GeometryPoint2D(x=-5642.5, y=120.1)\n assert p.wkt == 'POINT (-5642.5 120.1)'\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == 'POINT Z (10 20 30)'\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef test_point2d_wkt():\n p = GeometryPoint2D(10, 20)\n assert p.wkt == 'POINT (10 20)'\n p = GeometryPoint2D(x=-5642.5, y=120.1)\n assert p.wkt == 'POINT (-5642.5 120.1)'\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == 'POINT Z (10 20 30)'\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\ndef test_point3d_to_shapely():\n p = GeometryPoint3D(10, 20, -1)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.z == -1\n assert sp.wkt == p.wkt\n", "step-4": "from mikeio.spatial import GeometryPoint2D, GeometryPoint3D\n\n\ndef test_point2d_wkt():\n p = GeometryPoint2D(10, 20)\n assert p.wkt == 'POINT (10 20)'\n p = GeometryPoint2D(x=-5642.5, y=120.1)\n assert p.wkt == 'POINT (-5642.5 120.1)'\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == 'POINT Z (10 20 30)'\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\ndef test_point3d_to_shapely():\n p = GeometryPoint3D(10, 20, -1)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.z == -1\n assert sp.wkt == p.wkt\n", "step-5": "from mikeio.spatial import GeometryPoint2D, GeometryPoint3D\n\n# https://www.ogc.org/standard/sfa/\n\n\ndef test_point2d_wkt():\n p = GeometryPoint2D(10, 20)\n assert p.wkt == \"POINT (10 20)\"\n\n p = GeometryPoint2D(x=-5642.5, y=120.1)\n assert p.wkt == \"POINT (-5642.5 120.1)\"\n\n\ndef test_point3d_wkt():\n p = GeometryPoint3D(10, 20, 30)\n assert p.wkt == \"POINT Z (10 20 30)\"\n\n\ndef test_point2d_to_shapely():\n p = GeometryPoint2D(10, 20)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.wkt == p.wkt\n\n\ndef test_point3d_to_shapely():\n p = GeometryPoint3D(10, 20, -1)\n sp = p.to_shapely()\n assert sp.x == 10\n assert sp.y == 20\n assert sp.z == -1\n assert sp.wkt == p.wkt\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
# Q. In How many ways N stair can be climb if allowesd steps are 1, 2 or 3. # triple Sort def noOfSteps(n, k): if n<0: return 0 if n == 0: return 1 t_steps = 0 for i in range(1, k+1): t_steps += noOfSteps(n-i, k) return t_steps def noOfStepsDP(n,k): dp = [0]*max((n+1),3) dp[0] = 1 dp[1] = 1 dp[2] = 2 for i in range(3, n+1): dp[i] = dp[i-1]+dp[i-2]+dp[i-3] return dp[n] n = 10 noOfSteps(n,3), noOfStepsDP(n,3)
normal
{ "blob_id": "6c2699ff8e739595a2648d53745dc3c788536d7b", "index": 1907, "step-1": "<mask token>\n\n\ndef noOfStepsDP(n, k):\n dp = [0] * max(n + 1, 3)\n dp[0] = 1\n dp[1] = 1\n dp[2] = 2\n for i in range(3, n + 1):\n dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]\n return dp[n]\n\n\n<mask token>\n", "step-2": "def noOfSteps(n, k):\n if n < 0:\n return 0\n if n == 0:\n return 1\n t_steps = 0\n for i in range(1, k + 1):\n t_steps += noOfSteps(n - i, k)\n return t_steps\n\n\ndef noOfStepsDP(n, k):\n dp = [0] * max(n + 1, 3)\n dp[0] = 1\n dp[1] = 1\n dp[2] = 2\n for i in range(3, n + 1):\n dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]\n return dp[n]\n\n\n<mask token>\n", "step-3": "def noOfSteps(n, k):\n if n < 0:\n return 0\n if n == 0:\n return 1\n t_steps = 0\n for i in range(1, k + 1):\n t_steps += noOfSteps(n - i, k)\n return t_steps\n\n\ndef noOfStepsDP(n, k):\n dp = [0] * max(n + 1, 3)\n dp[0] = 1\n dp[1] = 1\n dp[2] = 2\n for i in range(3, n + 1):\n dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]\n return dp[n]\n\n\n<mask token>\nnoOfSteps(n, 3), noOfStepsDP(n, 3)\n", "step-4": "def noOfSteps(n, k):\n if n < 0:\n return 0\n if n == 0:\n return 1\n t_steps = 0\n for i in range(1, k + 1):\n t_steps += noOfSteps(n - i, k)\n return t_steps\n\n\ndef noOfStepsDP(n, k):\n dp = [0] * max(n + 1, 3)\n dp[0] = 1\n dp[1] = 1\n dp[2] = 2\n for i in range(3, n + 1):\n dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]\n return dp[n]\n\n\nn = 10\nnoOfSteps(n, 3), noOfStepsDP(n, 3)\n", "step-5": "# Q. In How many ways N stair can be climb if allowesd steps are 1, 2 or 3.\r\n# triple Sort\r\n\r\ndef noOfSteps(n, k):\r\n if n<0: return 0\r\n if n == 0: return 1\r\n\r\n t_steps = 0\r\n for i in range(1, k+1):\r\n t_steps += noOfSteps(n-i, k)\r\n \r\n return t_steps\r\n\r\ndef noOfStepsDP(n,k):\r\n\r\n dp = [0]*max((n+1),3)\r\n\r\n dp[0] = 1\r\n dp[1] = 1\r\n dp[2] = 2\r\n\r\n for i in range(3, n+1):\r\n dp[i] = dp[i-1]+dp[i-2]+dp[i-3]\r\n \r\n return dp[n]\r\n\r\nn = 10\r\nnoOfSteps(n,3), noOfStepsDP(n,3)", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# Generated by Django 2.2.7 on 2019-11-23 18:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ml', '0003_auto_20191123_1835'), ] operations = [ migrations.AlterField( model_name='ml', name='file', field=models.ImageField(upload_to='images'), ), ]
normal
{ "blob_id": "2bf5ec4b4c0f0eed8364dcc9f1be599a804846f2", "index": 4981, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('ml', '0003_auto_20191123_1835')]\n operations = [migrations.AlterField(model_name='ml', name='file', field\n =models.ImageField(upload_to='images'))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('ml', '0003_auto_20191123_1835')]\n operations = [migrations.AlterField(model_name='ml', name='file', field\n =models.ImageField(upload_to='images'))]\n", "step-5": "# Generated by Django 2.2.7 on 2019-11-23 18:40\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ml', '0003_auto_20191123_1835'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='ml',\n name='file',\n field=models.ImageField(upload_to='images'),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from crispy_forms.bootstrap import FormActions from crispy_forms.helper import FormHelper from crispy_forms.layout import Layout, Div, Submit from django import forms from django.forms import RadioSelect from django.urls import reverse from core.models import Person, Datapackage from core.utils import cancel_button class PersonModelForm(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.helper = FormHelper(self) if self.instance.pk: cancel_url = reverse('management:collaborator-detail', kwargs={'pk': self.instance.pk}) else: cancel_url = reverse('management:collaborator-list') self.helper.layout = Layout( Div( Div('full_name', css_class='col-6'), css_class='row' ), FormActions( Submit('save', 'Save'), cancel_button(cancel_url) ) ) class Meta: model = Person fields = ['full_name'] class DatapackageModelForm(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.helper = FormHelper(self) self.fields['status'].queryset = self.fields['status'].queryset.order_by('name') self.fields['collaborators'].queryset = self.fields['collaborators'].queryset.order_by('full_name') self.fields['collaborators'].help_text = 'Refresh page to show new collaborators. Hold down “Control”, or “Command” on a Mac, to select more than one' self.fields['collaborators'].widget.attrs = {'size': 10} collaborator_add_url = reverse('management:collaborator-add') self.fields['collaborators'].label = f'Collaborators <div class="float-right"><a target="_blank" href="{collaborator_add_url}"><i class="fas fa-user-plus"></i> Add collaborator <i class="fas fa-external-link-alt"></i></a></div>' self.helper.layout = Layout( Div( Div('status', css_class='col-6'), css_class='row' ), Div( Div('collaborators', css_class='col-6'), css_class='row' ), FormActions( Submit('save', 'Save'), cancel_button(reverse('management:datapackage-detail', kwargs={'uuid': self.instance.uuid})), ) ) class Meta: model = Datapackage fields = ['status', 'collaborators'] widgets = {'status': RadioSelect}
normal
{ "blob_id": "5a59108084d943f6faa07ffea1467dc19c3dd790", "index": 1101, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass DatapackageModelForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.fields['status'].queryset = self.fields['status'\n ].queryset.order_by('name')\n self.fields['collaborators'].queryset = self.fields['collaborators'\n ].queryset.order_by('full_name')\n self.fields['collaborators'].help_text = (\n 'Refresh page to show new collaborators. Hold down “Control”, or “Command” on a Mac, to select more than one'\n )\n self.fields['collaborators'].widget.attrs = {'size': 10}\n collaborator_add_url = reverse('management:collaborator-add')\n self.fields['collaborators'].label = (\n f'Collaborators <div class=\"float-right\"><a target=\"_blank\" href=\"{collaborator_add_url}\"><i class=\"fas fa-user-plus\"></i> Add collaborator <i class=\"fas fa-external-link-alt\"></i></a></div>'\n )\n self.helper.layout = Layout(Div(Div('status', css_class='col-6'),\n css_class='row'), Div(Div('collaborators', css_class='col-6'),\n css_class='row'), FormActions(Submit('save', 'Save'),\n cancel_button(reverse('management:datapackage-detail', kwargs={\n 'uuid': self.instance.uuid}))))\n\n\n class Meta:\n model = Datapackage\n fields = ['status', 'collaborators']\n widgets = {'status': RadioSelect}\n", "step-3": "<mask token>\n\n\nclass PersonModelForm(forms.ModelForm):\n <mask token>\n\n\n class Meta:\n model = Person\n fields = ['full_name']\n\n\nclass DatapackageModelForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.fields['status'].queryset = self.fields['status'\n ].queryset.order_by('name')\n self.fields['collaborators'].queryset = self.fields['collaborators'\n ].queryset.order_by('full_name')\n self.fields['collaborators'].help_text = (\n 'Refresh page to show new collaborators. Hold down “Control”, or “Command” on a Mac, to select more than one'\n )\n self.fields['collaborators'].widget.attrs = {'size': 10}\n collaborator_add_url = reverse('management:collaborator-add')\n self.fields['collaborators'].label = (\n f'Collaborators <div class=\"float-right\"><a target=\"_blank\" href=\"{collaborator_add_url}\"><i class=\"fas fa-user-plus\"></i> Add collaborator <i class=\"fas fa-external-link-alt\"></i></a></div>'\n )\n self.helper.layout = Layout(Div(Div('status', css_class='col-6'),\n css_class='row'), Div(Div('collaborators', css_class='col-6'),\n css_class='row'), FormActions(Submit('save', 'Save'),\n cancel_button(reverse('management:datapackage-detail', kwargs={\n 'uuid': self.instance.uuid}))))\n\n\n class Meta:\n model = Datapackage\n fields = ['status', 'collaborators']\n widgets = {'status': RadioSelect}\n", "step-4": "from crispy_forms.bootstrap import FormActions\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Div, Submit\nfrom django import forms\nfrom django.forms import RadioSelect\nfrom django.urls import reverse\nfrom core.models import Person, Datapackage\nfrom core.utils import cancel_button\n\n\nclass PersonModelForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n if self.instance.pk:\n cancel_url = reverse('management:collaborator-detail', kwargs={\n 'pk': self.instance.pk})\n else:\n cancel_url = reverse('management:collaborator-list')\n self.helper.layout = Layout(Div(Div('full_name', css_class='col-6'),\n css_class='row'), FormActions(Submit('save', 'Save'),\n cancel_button(cancel_url)))\n\n\n class Meta:\n model = Person\n fields = ['full_name']\n\n\nclass DatapackageModelForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.fields['status'].queryset = self.fields['status'\n ].queryset.order_by('name')\n self.fields['collaborators'].queryset = self.fields['collaborators'\n ].queryset.order_by('full_name')\n self.fields['collaborators'].help_text = (\n 'Refresh page to show new collaborators. Hold down “Control”, or “Command” on a Mac, to select more than one'\n )\n self.fields['collaborators'].widget.attrs = {'size': 10}\n collaborator_add_url = reverse('management:collaborator-add')\n self.fields['collaborators'].label = (\n f'Collaborators <div class=\"float-right\"><a target=\"_blank\" href=\"{collaborator_add_url}\"><i class=\"fas fa-user-plus\"></i> Add collaborator <i class=\"fas fa-external-link-alt\"></i></a></div>'\n )\n self.helper.layout = Layout(Div(Div('status', css_class='col-6'),\n css_class='row'), Div(Div('collaborators', css_class='col-6'),\n css_class='row'), FormActions(Submit('save', 'Save'),\n cancel_button(reverse('management:datapackage-detail', kwargs={\n 'uuid': self.instance.uuid}))))\n\n\n class Meta:\n model = Datapackage\n fields = ['status', 'collaborators']\n widgets = {'status': RadioSelect}\n", "step-5": "from crispy_forms.bootstrap import FormActions\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Div, Submit\nfrom django import forms\nfrom django.forms import RadioSelect\nfrom django.urls import reverse\n\nfrom core.models import Person, Datapackage\nfrom core.utils import cancel_button\n\n\nclass PersonModelForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper(self)\n\n if self.instance.pk:\n cancel_url = reverse('management:collaborator-detail', kwargs={'pk': self.instance.pk})\n else:\n cancel_url = reverse('management:collaborator-list')\n\n self.helper.layout = Layout(\n Div(\n Div('full_name', css_class='col-6'),\n css_class='row'\n ),\n FormActions(\n Submit('save', 'Save'),\n cancel_button(cancel_url)\n )\n )\n\n class Meta:\n model = Person\n fields = ['full_name']\n\n\nclass DatapackageModelForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.helper = FormHelper(self)\n\n self.fields['status'].queryset = self.fields['status'].queryset.order_by('name')\n self.fields['collaborators'].queryset = self.fields['collaborators'].queryset.order_by('full_name')\n self.fields['collaborators'].help_text = 'Refresh page to show new collaborators. Hold down “Control”, or “Command” on a Mac, to select more than one'\n self.fields['collaborators'].widget.attrs = {'size': 10}\n\n collaborator_add_url = reverse('management:collaborator-add')\n\n self.fields['collaborators'].label = f'Collaborators <div class=\"float-right\"><a target=\"_blank\" href=\"{collaborator_add_url}\"><i class=\"fas fa-user-plus\"></i> Add collaborator <i class=\"fas fa-external-link-alt\"></i></a></div>'\n\n self.helper.layout = Layout(\n Div(\n Div('status', css_class='col-6'),\n css_class='row'\n ),\n Div(\n Div('collaborators', css_class='col-6'),\n css_class='row'\n ),\n FormActions(\n Submit('save', 'Save'),\n cancel_button(reverse('management:datapackage-detail', kwargs={'uuid': self.instance.uuid})),\n )\n )\n\n class Meta:\n model = Datapackage\n fields = ['status', 'collaborators']\n widgets = {'status': RadioSelect}\n", "step-ids": [ 0, 2, 3, 5, 6 ] }
[ 0, 2, 3, 5, 6 ]
import smtplib import requests import datetime import json import time from datetime import date from urllib.request import Request,urlopen today = date.today().strftime("%d-%m-%y") count = 0 pincodes = ["784164","781017","784161","787001"] date = 0 temp = str(14) + "-05-21" while True: for i in range(0,8): temp = str(23+i) + "-05-21" for pincode in pincodes: req = Request( "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode=" + pincode + "&date=" + temp, headers={'User-Agent': 'Mozilla/5.0'}) webpage = urlopen(req).read() data = json.loads(webpage) for center in data["centers"]: for session in center["sessions"]: print("\t", center["name"]) print("\t", center["address"]) print("\t Price: ", center["fee_type"]) print("\t", session["vaccine"]) print("\t Age limit:", session["min_age_limit"]) print("\t Available Capacity: ", session["available_capacity"]) print("////////////////////////////////////////////////////") if int(session["available_capacity"]) > 0: server = smtplib.SMTP_SSL("smtp.gmail.com", 465) server.login("[email protected]", "password") if pincode == "784164": server.sendmail("[email protected]", "[email protected]", "Vaccine available , Kindly check your cowin app") elif pincode == "781017": server.sendmail("[email protected]", "[email protected]", "Vaccine available , Kindly check your cowin app") server.sendmail("[email protected]", "[email protected]", "Vaccine available , Kindly check your cowin app") else: server.sendmail("[email protected]", "[email protected]", "Vaccine available , Kindly check your cowin app") server.quit() time.sleep(20)
normal
{ "blob_id": "7c60ae58b26ae63ba7c78a28b72192373cc05a86", "index": 1211, "step-1": "<mask token>\n", "step-2": "<mask token>\nwhile True:\n for i in range(0, 8):\n temp = str(23 + i) + '-05-21'\n for pincode in pincodes:\n req = Request(\n 'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode='\n + pincode + '&date=' + temp, headers={'User-Agent':\n 'Mozilla/5.0'})\n webpage = urlopen(req).read()\n data = json.loads(webpage)\n for center in data['centers']:\n for session in center['sessions']:\n print('\\t', center['name'])\n print('\\t', center['address'])\n print('\\t Price: ', center['fee_type'])\n print('\\t', session['vaccine'])\n print('\\t Age limit:', session['min_age_limit'])\n print('\\t Available Capacity: ', session[\n 'available_capacity'])\n print(\n '////////////////////////////////////////////////////')\n if int(session['available_capacity']) > 0:\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.login('[email protected]',\n 'password')\n if pincode == '784164':\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n elif pincode == '781017':\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n else:\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n server.quit()\n time.sleep(20)\n", "step-3": "<mask token>\ntoday = date.today().strftime('%d-%m-%y')\ncount = 0\npincodes = ['784164', '781017', '784161', '787001']\ndate = 0\ntemp = str(14) + '-05-21'\nwhile True:\n for i in range(0, 8):\n temp = str(23 + i) + '-05-21'\n for pincode in pincodes:\n req = Request(\n 'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode='\n + pincode + '&date=' + temp, headers={'User-Agent':\n 'Mozilla/5.0'})\n webpage = urlopen(req).read()\n data = json.loads(webpage)\n for center in data['centers']:\n for session in center['sessions']:\n print('\\t', center['name'])\n print('\\t', center['address'])\n print('\\t Price: ', center['fee_type'])\n print('\\t', session['vaccine'])\n print('\\t Age limit:', session['min_age_limit'])\n print('\\t Available Capacity: ', session[\n 'available_capacity'])\n print(\n '////////////////////////////////////////////////////')\n if int(session['available_capacity']) > 0:\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.login('[email protected]',\n 'password')\n if pincode == '784164':\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n elif pincode == '781017':\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n else:\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n server.quit()\n time.sleep(20)\n", "step-4": "import smtplib\nimport requests\nimport datetime\nimport json\nimport time\nfrom datetime import date\nfrom urllib.request import Request, urlopen\ntoday = date.today().strftime('%d-%m-%y')\ncount = 0\npincodes = ['784164', '781017', '784161', '787001']\ndate = 0\ntemp = str(14) + '-05-21'\nwhile True:\n for i in range(0, 8):\n temp = str(23 + i) + '-05-21'\n for pincode in pincodes:\n req = Request(\n 'https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode='\n + pincode + '&date=' + temp, headers={'User-Agent':\n 'Mozilla/5.0'})\n webpage = urlopen(req).read()\n data = json.loads(webpage)\n for center in data['centers']:\n for session in center['sessions']:\n print('\\t', center['name'])\n print('\\t', center['address'])\n print('\\t Price: ', center['fee_type'])\n print('\\t', session['vaccine'])\n print('\\t Age limit:', session['min_age_limit'])\n print('\\t Available Capacity: ', session[\n 'available_capacity'])\n print(\n '////////////////////////////////////////////////////')\n if int(session['available_capacity']) > 0:\n server = smtplib.SMTP_SSL('smtp.gmail.com', 465)\n server.login('[email protected]',\n 'password')\n if pincode == '784164':\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n elif pincode == '781017':\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n else:\n server.sendmail('[email protected]',\n '[email protected]',\n 'Vaccine available , Kindly check your cowin app'\n )\n server.quit()\n time.sleep(20)\n", "step-5": "import smtplib\r\nimport requests\r\nimport datetime\r\nimport json\r\nimport time\r\nfrom datetime import date\r\nfrom urllib.request import Request,urlopen\r\n\r\ntoday = date.today().strftime(\"%d-%m-%y\")\r\ncount = 0\r\n\r\npincodes = [\"784164\",\"781017\",\"784161\",\"787001\"]\r\n\r\ndate = 0\r\ntemp = str(14) + \"-05-21\"\r\n\r\n\r\nwhile True:\r\n\r\n for i in range(0,8):\r\n temp = str(23+i) + \"-05-21\"\r\n for pincode in pincodes:\r\n req = Request(\r\n \"https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByPin?pincode=\" + pincode + \"&date=\" + temp,\r\n headers={'User-Agent': 'Mozilla/5.0'})\r\n webpage = urlopen(req).read()\r\n data = json.loads(webpage)\r\n for center in data[\"centers\"]:\r\n for session in center[\"sessions\"]:\r\n print(\"\\t\", center[\"name\"])\r\n print(\"\\t\", center[\"address\"])\r\n print(\"\\t Price: \", center[\"fee_type\"])\r\n print(\"\\t\", session[\"vaccine\"])\r\n print(\"\\t Age limit:\", session[\"min_age_limit\"])\r\n print(\"\\t Available Capacity: \", session[\"available_capacity\"])\r\n print(\"////////////////////////////////////////////////////\")\r\n if int(session[\"available_capacity\"]) > 0:\r\n server = smtplib.SMTP_SSL(\"smtp.gmail.com\", 465)\r\n server.login(\"[email protected]\", \"password\")\r\n if pincode == \"784164\":\r\n server.sendmail(\"[email protected]\", \"[email protected]\",\r\n \"Vaccine available , Kindly check your cowin app\")\r\n elif pincode == \"781017\":\r\n server.sendmail(\"[email protected]\", \"[email protected]\",\r\n \"Vaccine available , Kindly check your cowin app\")\r\n server.sendmail(\"[email protected]\", \"[email protected]\",\r\n \"Vaccine available , Kindly check your cowin app\")\r\n else:\r\n server.sendmail(\"[email protected]\", \"[email protected]\",\r\n \"Vaccine available , Kindly check your cowin app\")\r\n server.quit()\r\n time.sleep(20)\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> class Net(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(Net, self).__init__() self.h1 = nn.Linear(input_size, hidden_size) self.h2 = nn.Linear(hidden_size, hidden_size_1) self.h3 = nn.Linear(hidden_size_1, hidden_size_2) self.h4 = nn.Linear(hidden_size_2, hidden_size_3) self.o = nn.Linear(hidden_size_3, num_classes) def forward(self, x): x = torch.sigmoid(self.h1(x)) x = torch.sigmoid(self.h2(x)) x = torch.sigmoid(self.h3(x)) x = torch.sigmoid(self.h4(x)) x = torch.sigmoid(self.o(x)) return x <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for file in glob.glob('*.jpg'): images.append(file) <|reserved_special_token_0|> for i in range(train_num + test_num): tags = labels.iloc[i]['tags'] if i < train_num: train_images.append(imageio.imread(images[i], as_gray=True).flatten()) train_labels.append(int('cloudy' not in tags and 'haze' not in tags)) else: test_images.append(imageio.imread(images[i], as_gray=True).flatten()) test_labels.append(int('cloudy' not in tags and 'haze' not in tags)) class Net(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(Net, self).__init__() self.h1 = nn.Linear(input_size, hidden_size) self.h2 = nn.Linear(hidden_size, hidden_size_1) self.h3 = nn.Linear(hidden_size_1, hidden_size_2) self.h4 = nn.Linear(hidden_size_2, hidden_size_3) self.o = nn.Linear(hidden_size_3, num_classes) def forward(self, x): x = torch.sigmoid(self.h1(x)) x = torch.sigmoid(self.h2(x)) x = torch.sigmoid(self.h3(x)) x = torch.sigmoid(self.h4(x)) x = torch.sigmoid(self.o(x)) return x <|reserved_special_token_0|> for epoch in range(num_epochs): for i, image in enumerate(train_images): image = torch.Tensor(train_images[i]).reshape(1, 65536) label = torch.Tensor([int(train_labels[i])]) outputs = model(image) outputs = outputs.squeeze(0) loss = criterion(outputs, label) optimizer.zero_grad() loss.backward() optimizer.step() if (i + 1) % 100 == 0: print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, i + 1, total_step, loss.item())) with torch.no_grad(): correct = 0 total = 0 for i, image in enumerate(test_images): image = torch.Tensor(test_images[i]).reshape(1, 65536) label = torch.Tensor([int(test_labels[i])]) outputs = model(image) outputs = outputs.squeeze(0) outputs = 1 if torch.sum(outputs) >= 0.5 else 0 if outputs == torch.sum(label): correct += 1 elif outputs == 0: print('#############') print(i, outputs, torch.sum(label)) print('Accuracy of the network on the {} test images: {} %'.format(len( test_images), 100 * correct / len(test_images))) torch.save(model.state_dict(), 'model.ckpt') <|reserved_special_token_1|> <|reserved_special_token_0|> fileDir = os.getcwd() input_size = 65536 hidden_size = 20 hidden_size_1 = 15 hidden_size_2 = 10 hidden_size_3 = 5 num_classes = 1 learning_rate = 0.001 num_epochs = 5 train_num = 1000 test_num = 148 images = [] for file in glob.glob('*.jpg'): images.append(file) images = sorted(images, key=lambda filename: int(filename[6:-4])) train_images = [] test_images = [] train_labels = [] test_labels = [] labels = pd.read_csv('./train_v2.csv') for i in range(train_num + test_num): tags = labels.iloc[i]['tags'] if i < train_num: train_images.append(imageio.imread(images[i], as_gray=True).flatten()) train_labels.append(int('cloudy' not in tags and 'haze' not in tags)) else: test_images.append(imageio.imread(images[i], as_gray=True).flatten()) test_labels.append(int('cloudy' not in tags and 'haze' not in tags)) class Net(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(Net, self).__init__() self.h1 = nn.Linear(input_size, hidden_size) self.h2 = nn.Linear(hidden_size, hidden_size_1) self.h3 = nn.Linear(hidden_size_1, hidden_size_2) self.h4 = nn.Linear(hidden_size_2, hidden_size_3) self.o = nn.Linear(hidden_size_3, num_classes) def forward(self, x): x = torch.sigmoid(self.h1(x)) x = torch.sigmoid(self.h2(x)) x = torch.sigmoid(self.h3(x)) x = torch.sigmoid(self.h4(x)) x = torch.sigmoid(self.o(x)) return x model = Net(input_size, hidden_size, num_classes) criterion = nn.SoftMarginLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) total_step = len(train_images) for epoch in range(num_epochs): for i, image in enumerate(train_images): image = torch.Tensor(train_images[i]).reshape(1, 65536) label = torch.Tensor([int(train_labels[i])]) outputs = model(image) outputs = outputs.squeeze(0) loss = criterion(outputs, label) optimizer.zero_grad() loss.backward() optimizer.step() if (i + 1) % 100 == 0: print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, i + 1, total_step, loss.item())) with torch.no_grad(): correct = 0 total = 0 for i, image in enumerate(test_images): image = torch.Tensor(test_images[i]).reshape(1, 65536) label = torch.Tensor([int(test_labels[i])]) outputs = model(image) outputs = outputs.squeeze(0) outputs = 1 if torch.sum(outputs) >= 0.5 else 0 if outputs == torch.sum(label): correct += 1 elif outputs == 0: print('#############') print(i, outputs, torch.sum(label)) print('Accuracy of the network on the {} test images: {} %'.format(len( test_images), 100 * correct / len(test_images))) torch.save(model.state_dict(), 'model.ckpt') <|reserved_special_token_1|> import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import cv2 import imageio import pandas as pd import glob, os import numpy as np fileDir = os.getcwd() input_size = 65536 hidden_size = 20 hidden_size_1 = 15 hidden_size_2 = 10 hidden_size_3 = 5 num_classes = 1 learning_rate = 0.001 num_epochs = 5 train_num = 1000 test_num = 148 images = [] for file in glob.glob('*.jpg'): images.append(file) images = sorted(images, key=lambda filename: int(filename[6:-4])) train_images = [] test_images = [] train_labels = [] test_labels = [] labels = pd.read_csv('./train_v2.csv') for i in range(train_num + test_num): tags = labels.iloc[i]['tags'] if i < train_num: train_images.append(imageio.imread(images[i], as_gray=True).flatten()) train_labels.append(int('cloudy' not in tags and 'haze' not in tags)) else: test_images.append(imageio.imread(images[i], as_gray=True).flatten()) test_labels.append(int('cloudy' not in tags and 'haze' not in tags)) class Net(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(Net, self).__init__() self.h1 = nn.Linear(input_size, hidden_size) self.h2 = nn.Linear(hidden_size, hidden_size_1) self.h3 = nn.Linear(hidden_size_1, hidden_size_2) self.h4 = nn.Linear(hidden_size_2, hidden_size_3) self.o = nn.Linear(hidden_size_3, num_classes) def forward(self, x): x = torch.sigmoid(self.h1(x)) x = torch.sigmoid(self.h2(x)) x = torch.sigmoid(self.h3(x)) x = torch.sigmoid(self.h4(x)) x = torch.sigmoid(self.o(x)) return x model = Net(input_size, hidden_size, num_classes) criterion = nn.SoftMarginLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) total_step = len(train_images) for epoch in range(num_epochs): for i, image in enumerate(train_images): image = torch.Tensor(train_images[i]).reshape(1, 65536) label = torch.Tensor([int(train_labels[i])]) outputs = model(image) outputs = outputs.squeeze(0) loss = criterion(outputs, label) optimizer.zero_grad() loss.backward() optimizer.step() if (i + 1) % 100 == 0: print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch + 1, num_epochs, i + 1, total_step, loss.item())) with torch.no_grad(): correct = 0 total = 0 for i, image in enumerate(test_images): image = torch.Tensor(test_images[i]).reshape(1, 65536) label = torch.Tensor([int(test_labels[i])]) outputs = model(image) outputs = outputs.squeeze(0) outputs = 1 if torch.sum(outputs) >= 0.5 else 0 if outputs == torch.sum(label): correct += 1 elif outputs == 0: print('#############') print(i, outputs, torch.sum(label)) print('Accuracy of the network on the {} test images: {} %'.format(len( test_images), 100 * correct / len(test_images))) torch.save(model.state_dict(), 'model.ckpt') <|reserved_special_token_1|> import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import cv2 import imageio import pandas as pd import glob, os import numpy as np fileDir = os.getcwd() # os.chdir("./train-jpg") # there are 40480 training examples # we will allocate 39000 for training # and the remaining 1480 will be for validation input_size = 65536 # 256^2 hidden_size = 20 hidden_size_1 = 15 hidden_size_2 = 10 hidden_size_3 = 5 num_classes = 1 learning_rate = 0.001 num_epochs = 5 train_num = 1000 test_num = 148 # train_num = 39000 # test_num = 1480 # %% Load data--for clouds and non-clouds images = [] for file in glob.glob("*.jpg"): images.append(file) images = sorted(images, key=lambda filename: int(filename[6: -4])) # string splicing so that the images are in order train_images = [] test_images = [] train_labels = [] test_labels = [] labels = pd.read_csv("./train_v2.csv") # labels are whether or not image is any sort of cloudy or haze for i in range(train_num + test_num): tags = labels.iloc[i]["tags"] if i < train_num: train_images.append(imageio.imread(images[i], as_gray=True).flatten()) train_labels.append(int("cloudy" not in tags and "haze" not in tags)) # train_labels.append(int("water" not in tags)) else: test_images.append(imageio.imread(images[i], as_gray=True).flatten()) test_labels.append(int("cloudy" not in tags and "haze" not in tags)) # test_labels.append(int("water" not in tags)) class Net(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(Net, self).__init__() # parameters # weights # self.h1 = nn.Sigmoid() # input_size, hidden_size # self.o = nn.Sigmoid() # hidden_size, num_classes self.h1 = nn.Linear(input_size, hidden_size) self.h2 = nn.Linear(hidden_size, hidden_size_1) self.h3 = nn.Linear(hidden_size_1, hidden_size_2) self.h4 = nn.Linear(hidden_size_2, hidden_size_3) self.o = nn.Linear(hidden_size_3, num_classes) def forward(self, x): x = torch.sigmoid(self.h1(x)) # print("doing x: {}".format(x.shape)) x = torch.sigmoid(self.h2(x)) x = torch.sigmoid(self.h3(x)) x = torch.sigmoid(self.h4(x)) x = torch.sigmoid(self.o(x)) return x # %% model = Net(input_size, hidden_size, num_classes) # no device configuration here criterion = nn.SoftMarginLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # model = TheModelClass(*args, **kwargs) # model.load_state_dict(torch.load("model.ckpt")) # model.eval() # optimizer = TheOptimizerClass(*args, **kwargs) # checkpoint = torch.load('./model.ckpt') # model.load_state_dict(checkpoint['model_state_dict']) # optimizer.load_state_dict(checkpoint['optimizer_state_dict']) # epoch = checkpoint['epoch'] # loss = checkpoint['loss'] total_step = len(train_images) for epoch in range(num_epochs): for i, image in enumerate(train_images): image = torch.Tensor(train_images[i]).reshape(1, 65536) label = torch.Tensor([int(train_labels[i])]) # label = label.long() # label = label.reshape(1,1) # label = label.squeeze() # Forward pass outputs = model(image) outputs = outputs.squeeze(0) # outputs.reshape(1,) loss = criterion(outputs, label) # Backward and optimize optimizer.zero_grad() loss.backward() optimizer.step() if (i+1) % 100 == 0: print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' .format(epoch+1, num_epochs, i+1, total_step, loss.item())) # %% with torch.no_grad(): correct = 0 total = 0 for i, image in enumerate(test_images): image = torch.Tensor(test_images[i]).reshape(1, 65536) label = torch.Tensor([int(test_labels[i])]) outputs = model(image) outputs = outputs.squeeze(0) outputs = 1 if torch.sum(outputs) >= 0.5 else 0 if outputs == torch.sum(label): correct += 1 elif outputs == 0: print("#############") print(i,outputs, torch.sum(label)) # _, predicted = torch.max(outputs.data, 1) # correct += (predicted == labels).sum().item() print('Accuracy of the network on the {} test images: {} %'.format(len(test_images), 100 * correct / len(test_images))) # %% torch.save(model.state_dict(), 'model.ckpt') # %%
flexible
{ "blob_id": "a4deb67d277538e61c32381da0fe4886016dae33", "index": 85, "step-1": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n self.h1 = nn.Linear(input_size, hidden_size)\n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes)\n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n\n<mask token>\n", "step-2": "<mask token>\nfor file in glob.glob('*.jpg'):\n images.append(file)\n<mask token>\nfor i in range(train_num + test_num):\n tags = labels.iloc[i]['tags']\n if i < train_num:\n train_images.append(imageio.imread(images[i], as_gray=True).flatten())\n train_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n else:\n test_images.append(imageio.imread(images[i], as_gray=True).flatten())\n test_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n self.h1 = nn.Linear(input_size, hidden_size)\n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes)\n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n\n<mask token>\nfor epoch in range(num_epochs):\n for i, image in enumerate(train_images):\n image = torch.Tensor(train_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(train_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n loss = criterion(outputs, label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\nwith torch.no_grad():\n correct = 0\n total = 0\n for i, image in enumerate(test_images):\n image = torch.Tensor(test_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(test_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n outputs = 1 if torch.sum(outputs) >= 0.5 else 0\n if outputs == torch.sum(label):\n correct += 1\n elif outputs == 0:\n print('#############')\n print(i, outputs, torch.sum(label))\n print('Accuracy of the network on the {} test images: {} %'.format(len(\n test_images), 100 * correct / len(test_images)))\ntorch.save(model.state_dict(), 'model.ckpt')\n", "step-3": "<mask token>\nfileDir = os.getcwd()\ninput_size = 65536\nhidden_size = 20\nhidden_size_1 = 15\nhidden_size_2 = 10\nhidden_size_3 = 5\nnum_classes = 1\nlearning_rate = 0.001\nnum_epochs = 5\ntrain_num = 1000\ntest_num = 148\nimages = []\nfor file in glob.glob('*.jpg'):\n images.append(file)\nimages = sorted(images, key=lambda filename: int(filename[6:-4]))\ntrain_images = []\ntest_images = []\ntrain_labels = []\ntest_labels = []\nlabels = pd.read_csv('./train_v2.csv')\nfor i in range(train_num + test_num):\n tags = labels.iloc[i]['tags']\n if i < train_num:\n train_images.append(imageio.imread(images[i], as_gray=True).flatten())\n train_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n else:\n test_images.append(imageio.imread(images[i], as_gray=True).flatten())\n test_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n self.h1 = nn.Linear(input_size, hidden_size)\n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes)\n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n\nmodel = Net(input_size, hidden_size, num_classes)\ncriterion = nn.SoftMarginLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\ntotal_step = len(train_images)\nfor epoch in range(num_epochs):\n for i, image in enumerate(train_images):\n image = torch.Tensor(train_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(train_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n loss = criterion(outputs, label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\nwith torch.no_grad():\n correct = 0\n total = 0\n for i, image in enumerate(test_images):\n image = torch.Tensor(test_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(test_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n outputs = 1 if torch.sum(outputs) >= 0.5 else 0\n if outputs == torch.sum(label):\n correct += 1\n elif outputs == 0:\n print('#############')\n print(i, outputs, torch.sum(label))\n print('Accuracy of the network on the {} test images: {} %'.format(len(\n test_images), 100 * correct / len(test_images)))\ntorch.save(model.state_dict(), 'model.ckpt')\n", "step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport cv2\nimport imageio\nimport pandas as pd\nimport glob, os\nimport numpy as np\nfileDir = os.getcwd()\ninput_size = 65536\nhidden_size = 20\nhidden_size_1 = 15\nhidden_size_2 = 10\nhidden_size_3 = 5\nnum_classes = 1\nlearning_rate = 0.001\nnum_epochs = 5\ntrain_num = 1000\ntest_num = 148\nimages = []\nfor file in glob.glob('*.jpg'):\n images.append(file)\nimages = sorted(images, key=lambda filename: int(filename[6:-4]))\ntrain_images = []\ntest_images = []\ntrain_labels = []\ntest_labels = []\nlabels = pd.read_csv('./train_v2.csv')\nfor i in range(train_num + test_num):\n tags = labels.iloc[i]['tags']\n if i < train_num:\n train_images.append(imageio.imread(images[i], as_gray=True).flatten())\n train_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n else:\n test_images.append(imageio.imread(images[i], as_gray=True).flatten())\n test_labels.append(int('cloudy' not in tags and 'haze' not in tags))\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n self.h1 = nn.Linear(input_size, hidden_size)\n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes)\n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n\nmodel = Net(input_size, hidden_size, num_classes)\ncriterion = nn.SoftMarginLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\ntotal_step = len(train_images)\nfor epoch in range(num_epochs):\n for i, image in enumerate(train_images):\n image = torch.Tensor(train_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(train_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n loss = criterion(outputs, label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\nwith torch.no_grad():\n correct = 0\n total = 0\n for i, image in enumerate(test_images):\n image = torch.Tensor(test_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(test_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n outputs = 1 if torch.sum(outputs) >= 0.5 else 0\n if outputs == torch.sum(label):\n correct += 1\n elif outputs == 0:\n print('#############')\n print(i, outputs, torch.sum(label))\n print('Accuracy of the network on the {} test images: {} %'.format(len(\n test_images), 100 * correct / len(test_images)))\ntorch.save(model.state_dict(), 'model.ckpt')\n", "step-5": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport cv2\nimport imageio\nimport pandas as pd\nimport glob, os\nimport numpy as np\n\nfileDir = os.getcwd()\n# os.chdir(\"./train-jpg\")\n\n# there are 40480 training examples\n# we will allocate 39000 for training\n# and the remaining 1480 will be for validation\n\ninput_size = 65536 # 256^2\nhidden_size = 20\nhidden_size_1 = 15\nhidden_size_2 = 10\nhidden_size_3 = 5\nnum_classes = 1\nlearning_rate = 0.001\nnum_epochs = 5\n\ntrain_num = 1000\ntest_num = 148\n\n# train_num = 39000\n# test_num = 1480\n\n# %% Load data--for clouds and non-clouds\nimages = []\n\nfor file in glob.glob(\"*.jpg\"):\n images.append(file)\nimages = sorted(images, key=lambda filename: int(filename[6: -4])) # string splicing so that the images are in order\n\ntrain_images = []\ntest_images = []\n\ntrain_labels = []\ntest_labels = []\nlabels = pd.read_csv(\"./train_v2.csv\") # labels are whether or not image is any sort of cloudy or haze\n\nfor i in range(train_num + test_num):\n tags = labels.iloc[i][\"tags\"]\n if i < train_num:\n train_images.append(imageio.imread(images[i], as_gray=True).flatten())\n train_labels.append(int(\"cloudy\" not in tags and \"haze\" not in tags))\n # train_labels.append(int(\"water\" not in tags))\n else:\n test_images.append(imageio.imread(images[i], as_gray=True).flatten())\n test_labels.append(int(\"cloudy\" not in tags and \"haze\" not in tags))\n # test_labels.append(int(\"water\" not in tags))\n \nclass Net(nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n \n # parameters\n \n # weights\n # self.h1 = nn.Sigmoid() # input_size, hidden_size\n # self.o = nn.Sigmoid() # hidden_size, num_classes\n\n self.h1 = nn.Linear(input_size, hidden_size) \n self.h2 = nn.Linear(hidden_size, hidden_size_1)\n self.h3 = nn.Linear(hidden_size_1, hidden_size_2)\n self.h4 = nn.Linear(hidden_size_2, hidden_size_3)\n self.o = nn.Linear(hidden_size_3, num_classes) \n\n def forward(self, x):\n x = torch.sigmoid(self.h1(x))\n # print(\"doing x: {}\".format(x.shape))\n x = torch.sigmoid(self.h2(x))\n x = torch.sigmoid(self.h3(x))\n x = torch.sigmoid(self.h4(x))\n x = torch.sigmoid(self.o(x))\n return x\n\n# %%\n\nmodel = Net(input_size, hidden_size, num_classes) # no device configuration here\ncriterion = nn.SoftMarginLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) \n# model = TheModelClass(*args, **kwargs)\n# model.load_state_dict(torch.load(\"model.ckpt\"))\n# model.eval()\n# optimizer = TheOptimizerClass(*args, **kwargs)\n\n# checkpoint = torch.load('./model.ckpt')\n# model.load_state_dict(checkpoint['model_state_dict'])\n# optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n# epoch = checkpoint['epoch']\n# loss = checkpoint['loss']\n\n\ntotal_step = len(train_images)\nfor epoch in range(num_epochs):\n for i, image in enumerate(train_images): \n\n image = torch.Tensor(train_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(train_labels[i])])\n # label = label.long()\n # label = label.reshape(1,1)\n # label = label.squeeze()\n \n # Forward pass\n outputs = model(image)\n outputs = outputs.squeeze(0)\n # outputs.reshape(1,)\n loss = criterion(outputs, label)\n \n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if (i+1) % 100 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' \n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n\n\n# %%\n\nwith torch.no_grad():\n correct = 0\n total = 0\n for i, image in enumerate(test_images):\n image = torch.Tensor(test_images[i]).reshape(1, 65536)\n label = torch.Tensor([int(test_labels[i])])\n outputs = model(image)\n outputs = outputs.squeeze(0)\n outputs = 1 if torch.sum(outputs) >= 0.5 else 0\n if outputs == torch.sum(label):\n correct += 1\n elif outputs == 0: \n print(\"#############\")\n print(i,outputs, torch.sum(label))\n # _, predicted = torch.max(outputs.data, 1)\n # correct += (predicted == labels).sum().item()\n\n print('Accuracy of the network on the {} test images: {} %'.format(len(test_images), 100 * correct / len(test_images)))\n\n\n\n# %%\n\ntorch.save(model.state_dict(), 'model.ckpt')\n\n# %%\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
import boto3 ec2 = boto3.resource('ec2') response = client.allocate_address(Domain='standard') print(response)
normal
{ "blob_id": "6424fccb7990b0a1722d5d787e7eb5acb4ff1a74", "index": 1863, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(response)\n", "step-3": "<mask token>\nec2 = boto3.resource('ec2')\nresponse = client.allocate_address(Domain='standard')\nprint(response)\n", "step-4": "import boto3\nec2 = boto3.resource('ec2')\nresponse = client.allocate_address(Domain='standard')\nprint(response)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import asyncio import logging import random from aiogram.dispatcher import FSMContext from aiogram.types import ContentTypes, Message, CallbackQuery from aiogram.utils.exceptions import BotBlocked import keyboards from data.config import ADMINS, ADMIN_CHAT_ID from keyboards.inline.activate_menu import active_menu_callback from loader import dp, db, storage from utils import text from utils.db_api import redis_commands from utils.jobs import cur_bot_info from utils.misc import rate_limit @dp.message_handler(commands="upload", user_id=ADMINS, state="*") async def upload_profile(command_msg: Message, state: FSMContext): profile_msg = command_msg.reply_to_message admin = command_msg.from_user param = command_msg.get_args() if not profile_msg: await command_msg.answer("Чтобы загрузить анкету сделай на неё REPLY") return elif param != "g" and param != "b": await command_msg.answer("Чтобы воспользоваться командой /upload нужно добавить параметры <b>b | g</b>") return other_bot = profile_msg.forward_from if not other_bot or other_bot.id != 1234060895: await profile_msg.reply("Загружать анкеты можно только из нашего БотаX :)") return elif (not profile_msg.photo and not profile_msg.video) or not profile_msg.caption: await profile_msg.reply("Загружать нужно именно анкету, а не части анкеты") return profile_data = text.get_parse_data(profile_msg.caption) if profile_msg.photo: media_id = profile_msg.photo[-1].file_id with_video = False else: media_id = profile_msg.video.file_id with_video = True profile_data.update( id=random.randint(1, 100000), username="f", media_id=media_id, with_video=with_video, sex=1 if param == "g" else 2 ) await db.add_user(**profile_data) await profile_msg.reply("Пользователь {}-{} успешно добавлен ✅" "".format(profile_data["user_nick"], profile_data["id"])) logging.info(f"Admin @{admin.username}-{admin.id} successfully " f"added fake {profile_data['user_nick']}-{profile_data['id']} ") @dp.message_handler(commands="get_msg_info", user_id=ADMINS, state="*") async def get_msg_info(command_msg: Message, state: FSMContext): msg = command_msg.reply_to_message await command_msg.delete() if not msg: await command_msg.answer("Нужно делать реплай на сообщение.") return state = await state.get_state() await msg.reply(f"Эхо в состоянии <code>{state}</code>.\n" f"\nСодержание сообщения:\n" f"\n<code>{msg}</code>\n" f"\ncontent_type = {msg.content_type}\n" f"\nentities={msg.entities}") @dp.message_handler(commands="ban_user", user_id=ADMINS, state="*") async def ban_user(command_msg: Message, state: FSMContext): ban_user_id = command_msg.get_args() admin = command_msg.from_user await command_msg.delete() if not ban_user_id or not ban_user_id.isdecimal(): await command_msg.answer(f"Формат команды: /ban_user user_id") return ban_user_id = int(ban_user_id) is_banned = await db.ban_user(ban_user_id) if not is_banned: await command_msg.answer(f"Пользователя с таким <user_id> не существует") return await redis_commands.ban_user(ban_user_id) await command_msg.answer("Пользователь({}) успешно забанен 😎".format(ban_user_id)) logging.info(f"Admin @{admin.username}-{admin.id} BAN USER-{ban_user_id}") @dp.message_handler(commands="unban_user", user_id=ADMINS, state="*") async def unban_user(command_msg: Message, state: FSMContext): unban_user_id = command_msg.get_args() admin = command_msg.from_user await command_msg.delete() if not unban_user_id or not unban_user_id.isdecimal(): await command_msg.answer(f"Формат команды: /unban_user user_id") return unban_user_id = int(unban_user_id) is_unbanned = await db.unban_user(unban_user_id) if not is_unbanned: await command_msg.answer(f"Пользователя с таким <user_id> не существует") return await redis_commands.unban_user(unban_user_id) await command_msg.answer("Пользователь({}) успешно разбанен 👻".format(unban_user_id)) logging.info(f"Admin @{admin.username}-{admin.id} UNBAN USER-{unban_user_id}") @dp.message_handler(commands="clean_old_likes", user_id=ADMINS, state="*") async def clean_old_likes(command_msg: Message, state: FSMContext): admin = command_msg.from_user await command_msg.delete() count = await db.clean_old_likes(interval=24) await command_msg.answer("Было успешно удалено {} старых лайков(за {} hours)".format(count, 24)) logging.info(f"Admin @{admin.username}-{admin.id} delete old likes(count={count})") @dp.message_handler(commands="say_to_all_now_go", user_id=ADMINS, state="*") async def say_to_all(command_msg: Message, state: FSMContext): admin = command_msg.from_user msg = command_msg.reply_to_message await command_msg.delete() if not msg: await command_msg.answer("Чтобы воспользоваться этой командой сделай REPLY") return active_user_ids = await db.get_all_users(active=True) # [375766905, 997319478] delete_bot_count = 0 for user_id in active_user_ids: try: await dp.bot.copy_message( chat_id=user_id, from_chat_id=command_msg.chat.id, message_id=msg.message_id ) await asyncio.sleep(0.05) except BotBlocked as exc: await db.update_user(user_id, active=False) await redis_commands.clear_user(user_id) await redis_commands.clear_search_ids(user_id) delete_bot_count += 1 await msg.reply("Сообщение успешно отправлено: оставили бот({}), заблокировали бот({})" "".format(len(active_user_ids) - delete_bot_count, delete_bot_count)) logging.info(f"Admin @{admin.username}-{admin.id} SAY TO ALL MSG(id={msg.message_id})") @dp.message_handler(commands="show_state_statistic", user_id=ADMINS, state="*") async def show_state_statistic(command_msg: Message, state: FSMContext): admin = command_msg.from_user statistic = dict() await command_msg.delete() states_list = await storage.get_states_list() for states_item in states_list: chat_id, user_id = states_item state_text = await storage.get_state(chat=chat_id, user=user_id, default="Deactivate bot") try: statistic[state_text] += 1 except KeyError: statistic.update({state_text: 1}) out_text = "<b>Статичктика по пользователям:</b>\n\n" for state_text, count_users in statistic.items(): out_text += f"В состоянии {state_text} — {count_users} пользователей\n\n" await command_msg.answer(out_text) logging.info(f"For Admin @{admin.username}-{admin.id} show state statistic") @rate_limit(3) @dp.message_handler(commands="show_info", user_id=ADMINS, state="*") async def show_info(command_msg: Message, state: FSMContext): admin = command_msg.from_user await command_msg.delete() await cur_bot_info(for_chat_id=command_msg.chat.id) logging.info(f"For admin @{admin.username}-{admin.id} SHOW INFO(command)") @dp.callback_query_handler(active_menu_callback.filter(), chat_id=ADMIN_CHAT_ID, state="*") async def change_active(call: CallbackQuery, state: FSMContext, callback_data: dict): active = not bool(int(callback_data["active"])) user_id = int(callback_data["user_id"]) admin = call.from_user profile_msg = call.message if active: await db.unban_user(user_id) await redis_commands.unban_user(user_id) else: await db.ban_user(user_id) await redis_commands.ban_user(user_id) await profile_msg.edit_reply_markup(keyboards.inline.get_activate_menu(user_id=user_id, active=active)) await call.answer() logging.info(f"Admin @{admin.username}-{admin.id} CHANGE ACTIVE FOR USER-{user_id} TO {active}")
normal
{ "blob_id": "302accfd5001a27c7bbe6081856d43dbec704168", "index": 339, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\[email protected]_handler(commands='upload', user_id=ADMINS, state='*')\nasync def upload_profile(command_msg: Message, state: FSMContext):\n profile_msg = command_msg.reply_to_message\n admin = command_msg.from_user\n param = command_msg.get_args()\n if not profile_msg:\n await command_msg.answer('Чтобы загрузить анкету сделай на неё REPLY')\n return\n elif param != 'g' and param != 'b':\n await command_msg.answer(\n 'Чтобы воспользоваться командой /upload нужно добавить параметры <b>b | g</b>'\n )\n return\n other_bot = profile_msg.forward_from\n if not other_bot or other_bot.id != 1234060895:\n await profile_msg.reply(\n 'Загружать анкеты можно только из нашего БотаX :)')\n return\n elif not profile_msg.photo and not profile_msg.video or not profile_msg.caption:\n await profile_msg.reply(\n 'Загружать нужно именно анкету, а не части анкеты')\n return\n profile_data = text.get_parse_data(profile_msg.caption)\n if profile_msg.photo:\n media_id = profile_msg.photo[-1].file_id\n with_video = False\n else:\n media_id = profile_msg.video.file_id\n with_video = True\n profile_data.update(id=random.randint(1, 100000), username='f',\n media_id=media_id, with_video=with_video, sex=1 if param == 'g' else 2)\n await db.add_user(**profile_data)\n await profile_msg.reply('Пользователь {}-{} успешно добавлен ✅'.format(\n profile_data['user_nick'], profile_data['id']))\n logging.info(\n f\"Admin @{admin.username}-{admin.id} successfully added fake {profile_data['user_nick']}-{profile_data['id']} \"\n )\n\n\[email protected]_handler(commands='get_msg_info', user_id=ADMINS, state='*')\nasync def get_msg_info(command_msg: Message, state: FSMContext):\n msg = command_msg.reply_to_message\n await command_msg.delete()\n if not msg:\n await command_msg.answer('Нужно делать реплай на сообщение.')\n return\n state = await state.get_state()\n await msg.reply(\n f\"\"\"Эхо в состоянии <code>{state}</code>.\n\nСодержание сообщения:\n\n<code>{msg}</code>\n\ncontent_type = {msg.content_type}\n\nentities={msg.entities}\"\"\"\n )\n\n\[email protected]_handler(commands='ban_user', user_id=ADMINS, state='*')\nasync def ban_user(command_msg: Message, state: FSMContext):\n ban_user_id = command_msg.get_args()\n admin = command_msg.from_user\n await command_msg.delete()\n if not ban_user_id or not ban_user_id.isdecimal():\n await command_msg.answer(f'Формат команды: /ban_user user_id')\n return\n ban_user_id = int(ban_user_id)\n is_banned = await db.ban_user(ban_user_id)\n if not is_banned:\n await command_msg.answer(\n f'Пользователя с таким <user_id> не существует')\n return\n await redis_commands.ban_user(ban_user_id)\n await command_msg.answer('Пользователь({}) успешно забанен 😎'.format(\n ban_user_id))\n logging.info(f'Admin @{admin.username}-{admin.id} BAN USER-{ban_user_id}')\n\n\[email protected]_handler(commands='unban_user', user_id=ADMINS, state='*')\nasync def unban_user(command_msg: Message, state: FSMContext):\n unban_user_id = command_msg.get_args()\n admin = command_msg.from_user\n await command_msg.delete()\n if not unban_user_id or not unban_user_id.isdecimal():\n await command_msg.answer(f'Формат команды: /unban_user user_id')\n return\n unban_user_id = int(unban_user_id)\n is_unbanned = await db.unban_user(unban_user_id)\n if not is_unbanned:\n await command_msg.answer(\n f'Пользователя с таким <user_id> не существует')\n return\n await redis_commands.unban_user(unban_user_id)\n await command_msg.answer('Пользователь({}) успешно разбанен 👻'.format(\n unban_user_id))\n logging.info(\n f'Admin @{admin.username}-{admin.id} UNBAN USER-{unban_user_id}')\n\n\[email protected]_handler(commands='clean_old_likes', user_id=ADMINS, state='*')\nasync def clean_old_likes(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n await command_msg.delete()\n count = await db.clean_old_likes(interval=24)\n await command_msg.answer(\n 'Было успешно удалено {} старых лайков(за {} hours)'.format(count, 24))\n logging.info(\n f'Admin @{admin.username}-{admin.id} delete old likes(count={count})')\n\n\[email protected]_handler(commands='say_to_all_now_go', user_id=ADMINS, state='*')\nasync def say_to_all(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n msg = command_msg.reply_to_message\n await command_msg.delete()\n if not msg:\n await command_msg.answer(\n 'Чтобы воспользоваться этой командой сделай REPLY')\n return\n active_user_ids = await db.get_all_users(active=True)\n delete_bot_count = 0\n for user_id in active_user_ids:\n try:\n await dp.bot.copy_message(chat_id=user_id, from_chat_id=\n command_msg.chat.id, message_id=msg.message_id)\n await asyncio.sleep(0.05)\n except BotBlocked as exc:\n await db.update_user(user_id, active=False)\n await redis_commands.clear_user(user_id)\n await redis_commands.clear_search_ids(user_id)\n delete_bot_count += 1\n await msg.reply(\n 'Сообщение успешно отправлено: оставили бот({}), заблокировали бот({})'\n .format(len(active_user_ids) - delete_bot_count, delete_bot_count))\n logging.info(\n f'Admin @{admin.username}-{admin.id} SAY TO ALL MSG(id={msg.message_id})'\n )\n\n\[email protected]_handler(commands='show_state_statistic', user_id=ADMINS, state='*')\nasync def show_state_statistic(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n statistic = dict()\n await command_msg.delete()\n states_list = await storage.get_states_list()\n for states_item in states_list:\n chat_id, user_id = states_item\n state_text = await storage.get_state(chat=chat_id, user=user_id,\n default='Deactivate bot')\n try:\n statistic[state_text] += 1\n except KeyError:\n statistic.update({state_text: 1})\n out_text = '<b>Статичктика по пользователям:</b>\\n\\n'\n for state_text, count_users in statistic.items():\n out_text += (\n f'В состоянии {state_text} — {count_users} пользователей\\n\\n')\n await command_msg.answer(out_text)\n logging.info(f'For Admin @{admin.username}-{admin.id} show state statistic'\n )\n\n\n@rate_limit(3)\[email protected]_handler(commands='show_info', user_id=ADMINS, state='*')\nasync def show_info(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n await command_msg.delete()\n await cur_bot_info(for_chat_id=command_msg.chat.id)\n logging.info(f'For admin @{admin.username}-{admin.id} SHOW INFO(command)')\n\n\[email protected]_query_handler(active_menu_callback.filter(), chat_id=\n ADMIN_CHAT_ID, state='*')\nasync def change_active(call: CallbackQuery, state: FSMContext,\n callback_data: dict):\n active = not bool(int(callback_data['active']))\n user_id = int(callback_data['user_id'])\n admin = call.from_user\n profile_msg = call.message\n if active:\n await db.unban_user(user_id)\n await redis_commands.unban_user(user_id)\n else:\n await db.ban_user(user_id)\n await redis_commands.ban_user(user_id)\n await profile_msg.edit_reply_markup(keyboards.inline.get_activate_menu(\n user_id=user_id, active=active))\n await call.answer()\n logging.info(\n f'Admin @{admin.username}-{admin.id} CHANGE ACTIVE FOR USER-{user_id} TO {active}'\n )\n", "step-3": "import asyncio\nimport logging\nimport random\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import ContentTypes, Message, CallbackQuery\nfrom aiogram.utils.exceptions import BotBlocked\nimport keyboards\nfrom data.config import ADMINS, ADMIN_CHAT_ID\nfrom keyboards.inline.activate_menu import active_menu_callback\nfrom loader import dp, db, storage\nfrom utils import text\nfrom utils.db_api import redis_commands\nfrom utils.jobs import cur_bot_info\nfrom utils.misc import rate_limit\n\n\[email protected]_handler(commands='upload', user_id=ADMINS, state='*')\nasync def upload_profile(command_msg: Message, state: FSMContext):\n profile_msg = command_msg.reply_to_message\n admin = command_msg.from_user\n param = command_msg.get_args()\n if not profile_msg:\n await command_msg.answer('Чтобы загрузить анкету сделай на неё REPLY')\n return\n elif param != 'g' and param != 'b':\n await command_msg.answer(\n 'Чтобы воспользоваться командой /upload нужно добавить параметры <b>b | g</b>'\n )\n return\n other_bot = profile_msg.forward_from\n if not other_bot or other_bot.id != 1234060895:\n await profile_msg.reply(\n 'Загружать анкеты можно только из нашего БотаX :)')\n return\n elif not profile_msg.photo and not profile_msg.video or not profile_msg.caption:\n await profile_msg.reply(\n 'Загружать нужно именно анкету, а не части анкеты')\n return\n profile_data = text.get_parse_data(profile_msg.caption)\n if profile_msg.photo:\n media_id = profile_msg.photo[-1].file_id\n with_video = False\n else:\n media_id = profile_msg.video.file_id\n with_video = True\n profile_data.update(id=random.randint(1, 100000), username='f',\n media_id=media_id, with_video=with_video, sex=1 if param == 'g' else 2)\n await db.add_user(**profile_data)\n await profile_msg.reply('Пользователь {}-{} успешно добавлен ✅'.format(\n profile_data['user_nick'], profile_data['id']))\n logging.info(\n f\"Admin @{admin.username}-{admin.id} successfully added fake {profile_data['user_nick']}-{profile_data['id']} \"\n )\n\n\[email protected]_handler(commands='get_msg_info', user_id=ADMINS, state='*')\nasync def get_msg_info(command_msg: Message, state: FSMContext):\n msg = command_msg.reply_to_message\n await command_msg.delete()\n if not msg:\n await command_msg.answer('Нужно делать реплай на сообщение.')\n return\n state = await state.get_state()\n await msg.reply(\n f\"\"\"Эхо в состоянии <code>{state}</code>.\n\nСодержание сообщения:\n\n<code>{msg}</code>\n\ncontent_type = {msg.content_type}\n\nentities={msg.entities}\"\"\"\n )\n\n\[email protected]_handler(commands='ban_user', user_id=ADMINS, state='*')\nasync def ban_user(command_msg: Message, state: FSMContext):\n ban_user_id = command_msg.get_args()\n admin = command_msg.from_user\n await command_msg.delete()\n if not ban_user_id or not ban_user_id.isdecimal():\n await command_msg.answer(f'Формат команды: /ban_user user_id')\n return\n ban_user_id = int(ban_user_id)\n is_banned = await db.ban_user(ban_user_id)\n if not is_banned:\n await command_msg.answer(\n f'Пользователя с таким <user_id> не существует')\n return\n await redis_commands.ban_user(ban_user_id)\n await command_msg.answer('Пользователь({}) успешно забанен 😎'.format(\n ban_user_id))\n logging.info(f'Admin @{admin.username}-{admin.id} BAN USER-{ban_user_id}')\n\n\[email protected]_handler(commands='unban_user', user_id=ADMINS, state='*')\nasync def unban_user(command_msg: Message, state: FSMContext):\n unban_user_id = command_msg.get_args()\n admin = command_msg.from_user\n await command_msg.delete()\n if not unban_user_id or not unban_user_id.isdecimal():\n await command_msg.answer(f'Формат команды: /unban_user user_id')\n return\n unban_user_id = int(unban_user_id)\n is_unbanned = await db.unban_user(unban_user_id)\n if not is_unbanned:\n await command_msg.answer(\n f'Пользователя с таким <user_id> не существует')\n return\n await redis_commands.unban_user(unban_user_id)\n await command_msg.answer('Пользователь({}) успешно разбанен 👻'.format(\n unban_user_id))\n logging.info(\n f'Admin @{admin.username}-{admin.id} UNBAN USER-{unban_user_id}')\n\n\[email protected]_handler(commands='clean_old_likes', user_id=ADMINS, state='*')\nasync def clean_old_likes(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n await command_msg.delete()\n count = await db.clean_old_likes(interval=24)\n await command_msg.answer(\n 'Было успешно удалено {} старых лайков(за {} hours)'.format(count, 24))\n logging.info(\n f'Admin @{admin.username}-{admin.id} delete old likes(count={count})')\n\n\[email protected]_handler(commands='say_to_all_now_go', user_id=ADMINS, state='*')\nasync def say_to_all(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n msg = command_msg.reply_to_message\n await command_msg.delete()\n if not msg:\n await command_msg.answer(\n 'Чтобы воспользоваться этой командой сделай REPLY')\n return\n active_user_ids = await db.get_all_users(active=True)\n delete_bot_count = 0\n for user_id in active_user_ids:\n try:\n await dp.bot.copy_message(chat_id=user_id, from_chat_id=\n command_msg.chat.id, message_id=msg.message_id)\n await asyncio.sleep(0.05)\n except BotBlocked as exc:\n await db.update_user(user_id, active=False)\n await redis_commands.clear_user(user_id)\n await redis_commands.clear_search_ids(user_id)\n delete_bot_count += 1\n await msg.reply(\n 'Сообщение успешно отправлено: оставили бот({}), заблокировали бот({})'\n .format(len(active_user_ids) - delete_bot_count, delete_bot_count))\n logging.info(\n f'Admin @{admin.username}-{admin.id} SAY TO ALL MSG(id={msg.message_id})'\n )\n\n\[email protected]_handler(commands='show_state_statistic', user_id=ADMINS, state='*')\nasync def show_state_statistic(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n statistic = dict()\n await command_msg.delete()\n states_list = await storage.get_states_list()\n for states_item in states_list:\n chat_id, user_id = states_item\n state_text = await storage.get_state(chat=chat_id, user=user_id,\n default='Deactivate bot')\n try:\n statistic[state_text] += 1\n except KeyError:\n statistic.update({state_text: 1})\n out_text = '<b>Статичктика по пользователям:</b>\\n\\n'\n for state_text, count_users in statistic.items():\n out_text += (\n f'В состоянии {state_text} — {count_users} пользователей\\n\\n')\n await command_msg.answer(out_text)\n logging.info(f'For Admin @{admin.username}-{admin.id} show state statistic'\n )\n\n\n@rate_limit(3)\[email protected]_handler(commands='show_info', user_id=ADMINS, state='*')\nasync def show_info(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n await command_msg.delete()\n await cur_bot_info(for_chat_id=command_msg.chat.id)\n logging.info(f'For admin @{admin.username}-{admin.id} SHOW INFO(command)')\n\n\[email protected]_query_handler(active_menu_callback.filter(), chat_id=\n ADMIN_CHAT_ID, state='*')\nasync def change_active(call: CallbackQuery, state: FSMContext,\n callback_data: dict):\n active = not bool(int(callback_data['active']))\n user_id = int(callback_data['user_id'])\n admin = call.from_user\n profile_msg = call.message\n if active:\n await db.unban_user(user_id)\n await redis_commands.unban_user(user_id)\n else:\n await db.ban_user(user_id)\n await redis_commands.ban_user(user_id)\n await profile_msg.edit_reply_markup(keyboards.inline.get_activate_menu(\n user_id=user_id, active=active))\n await call.answer()\n logging.info(\n f'Admin @{admin.username}-{admin.id} CHANGE ACTIVE FOR USER-{user_id} TO {active}'\n )\n", "step-4": "import asyncio\nimport logging\nimport random\n\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import ContentTypes, Message, CallbackQuery\nfrom aiogram.utils.exceptions import BotBlocked\n\nimport keyboards\nfrom data.config import ADMINS, ADMIN_CHAT_ID\nfrom keyboards.inline.activate_menu import active_menu_callback\nfrom loader import dp, db, storage\nfrom utils import text\nfrom utils.db_api import redis_commands\nfrom utils.jobs import cur_bot_info\nfrom utils.misc import rate_limit\n\n\[email protected]_handler(commands=\"upload\", user_id=ADMINS, state=\"*\")\nasync def upload_profile(command_msg: Message, state: FSMContext):\n profile_msg = command_msg.reply_to_message\n admin = command_msg.from_user\n param = command_msg.get_args()\n\n if not profile_msg:\n await command_msg.answer(\"Чтобы загрузить анкету сделай на неё REPLY\")\n return\n elif param != \"g\" and param != \"b\":\n await command_msg.answer(\"Чтобы воспользоваться командой /upload нужно добавить параметры <b>b | g</b>\")\n return\n\n other_bot = profile_msg.forward_from\n if not other_bot or other_bot.id != 1234060895:\n await profile_msg.reply(\"Загружать анкеты можно только из нашего БотаX :)\")\n return\n elif (not profile_msg.photo and not profile_msg.video) or not profile_msg.caption:\n await profile_msg.reply(\"Загружать нужно именно анкету, а не части анкеты\")\n return\n\n profile_data = text.get_parse_data(profile_msg.caption)\n if profile_msg.photo:\n media_id = profile_msg.photo[-1].file_id\n with_video = False\n else:\n media_id = profile_msg.video.file_id\n with_video = True\n\n profile_data.update(\n id=random.randint(1, 100000),\n username=\"f\",\n media_id=media_id,\n with_video=with_video,\n sex=1 if param == \"g\" else 2\n )\n\n await db.add_user(**profile_data)\n await profile_msg.reply(\"Пользователь {}-{} успешно добавлен ✅\"\n \"\".format(profile_data[\"user_nick\"], profile_data[\"id\"]))\n logging.info(f\"Admin @{admin.username}-{admin.id} successfully \"\n f\"added fake {profile_data['user_nick']}-{profile_data['id']} \")\n\n\[email protected]_handler(commands=\"get_msg_info\", user_id=ADMINS, state=\"*\")\nasync def get_msg_info(command_msg: Message, state: FSMContext):\n msg = command_msg.reply_to_message\n\n await command_msg.delete()\n\n if not msg:\n await command_msg.answer(\"Нужно делать реплай на сообщение.\")\n return\n\n state = await state.get_state()\n await msg.reply(f\"Эхо в состоянии <code>{state}</code>.\\n\"\n f\"\\nСодержание сообщения:\\n\"\n f\"\\n<code>{msg}</code>\\n\"\n f\"\\ncontent_type = {msg.content_type}\\n\"\n f\"\\nentities={msg.entities}\")\n\n\[email protected]_handler(commands=\"ban_user\", user_id=ADMINS, state=\"*\")\nasync def ban_user(command_msg: Message, state: FSMContext):\n ban_user_id = command_msg.get_args()\n admin = command_msg.from_user\n\n await command_msg.delete()\n\n if not ban_user_id or not ban_user_id.isdecimal():\n await command_msg.answer(f\"Формат команды: /ban_user user_id\")\n return\n ban_user_id = int(ban_user_id)\n\n is_banned = await db.ban_user(ban_user_id)\n if not is_banned:\n await command_msg.answer(f\"Пользователя с таким <user_id> не существует\")\n return\n\n await redis_commands.ban_user(ban_user_id)\n\n await command_msg.answer(\"Пользователь({}) успешно забанен 😎\".format(ban_user_id))\n\n logging.info(f\"Admin @{admin.username}-{admin.id} BAN USER-{ban_user_id}\")\n\n\[email protected]_handler(commands=\"unban_user\", user_id=ADMINS, state=\"*\")\nasync def unban_user(command_msg: Message, state: FSMContext):\n unban_user_id = command_msg.get_args()\n admin = command_msg.from_user\n\n await command_msg.delete()\n\n if not unban_user_id or not unban_user_id.isdecimal():\n await command_msg.answer(f\"Формат команды: /unban_user user_id\")\n return\n unban_user_id = int(unban_user_id)\n\n is_unbanned = await db.unban_user(unban_user_id)\n if not is_unbanned:\n await command_msg.answer(f\"Пользователя с таким <user_id> не существует\")\n return\n\n await redis_commands.unban_user(unban_user_id)\n\n await command_msg.answer(\"Пользователь({}) успешно разбанен 👻\".format(unban_user_id))\n\n logging.info(f\"Admin @{admin.username}-{admin.id} UNBAN USER-{unban_user_id}\")\n\n\[email protected]_handler(commands=\"clean_old_likes\", user_id=ADMINS, state=\"*\")\nasync def clean_old_likes(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n\n await command_msg.delete()\n\n count = await db.clean_old_likes(interval=24)\n\n await command_msg.answer(\"Было успешно удалено {} старых лайков(за {} hours)\".format(count, 24))\n\n logging.info(f\"Admin @{admin.username}-{admin.id} delete old likes(count={count})\")\n\n\[email protected]_handler(commands=\"say_to_all_now_go\", user_id=ADMINS, state=\"*\")\nasync def say_to_all(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n msg = command_msg.reply_to_message\n\n await command_msg.delete()\n\n if not msg:\n await command_msg.answer(\"Чтобы воспользоваться этой командой сделай REPLY\")\n return\n\n active_user_ids = await db.get_all_users(active=True) # [375766905, 997319478]\n delete_bot_count = 0\n\n for user_id in active_user_ids:\n try:\n await dp.bot.copy_message(\n chat_id=user_id,\n from_chat_id=command_msg.chat.id,\n message_id=msg.message_id\n )\n await asyncio.sleep(0.05)\n except BotBlocked as exc:\n await db.update_user(user_id, active=False)\n await redis_commands.clear_user(user_id)\n await redis_commands.clear_search_ids(user_id)\n delete_bot_count += 1\n\n await msg.reply(\"Сообщение успешно отправлено: оставили бот({}), заблокировали бот({})\"\n \"\".format(len(active_user_ids) - delete_bot_count, delete_bot_count))\n\n logging.info(f\"Admin @{admin.username}-{admin.id} SAY TO ALL MSG(id={msg.message_id})\")\n\n\[email protected]_handler(commands=\"show_state_statistic\", user_id=ADMINS, state=\"*\")\nasync def show_state_statistic(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n statistic = dict()\n\n await command_msg.delete()\n\n states_list = await storage.get_states_list()\n for states_item in states_list:\n chat_id, user_id = states_item\n state_text = await storage.get_state(chat=chat_id, user=user_id, default=\"Deactivate bot\")\n try:\n statistic[state_text] += 1\n except KeyError:\n statistic.update({state_text: 1})\n\n out_text = \"<b>Статичктика по пользователям:</b>\\n\\n\"\n for state_text, count_users in statistic.items():\n out_text += f\"В состоянии {state_text} — {count_users} пользователей\\n\\n\"\n\n await command_msg.answer(out_text)\n\n logging.info(f\"For Admin @{admin.username}-{admin.id} show state statistic\")\n\n\n@rate_limit(3)\[email protected]_handler(commands=\"show_info\", user_id=ADMINS, state=\"*\")\nasync def show_info(command_msg: Message, state: FSMContext):\n admin = command_msg.from_user\n\n await command_msg.delete()\n\n await cur_bot_info(for_chat_id=command_msg.chat.id)\n\n logging.info(f\"For admin @{admin.username}-{admin.id} SHOW INFO(command)\")\n\n\[email protected]_query_handler(active_menu_callback.filter(), chat_id=ADMIN_CHAT_ID, state=\"*\")\nasync def change_active(call: CallbackQuery, state: FSMContext, callback_data: dict):\n active = not bool(int(callback_data[\"active\"]))\n user_id = int(callback_data[\"user_id\"])\n admin = call.from_user\n profile_msg = call.message\n\n if active:\n await db.unban_user(user_id)\n await redis_commands.unban_user(user_id)\n else:\n await db.ban_user(user_id)\n await redis_commands.ban_user(user_id)\n\n await profile_msg.edit_reply_markup(keyboards.inline.get_activate_menu(user_id=user_id, active=active))\n await call.answer()\n\n logging.info(f\"Admin @{admin.username}-{admin.id} CHANGE ACTIVE FOR USER-{user_id} TO {active}\")\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from rest_framework import viewsets, mixins from .models import Comment, Post from .serializer import CommentSerializer, PostSerializer, AllCommentSerializer class PostViewSet(viewsets.ModelViewSet): serializer_class = PostSerializer queryset = Post.objects.all() class CommentViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins .RetrieveModelMixin): queryset = Comment.objects.all() def get_serializer_class(self): if self.action == 'retrieve': if self.get_object().level < 3: return CommentSerializer return AllCommentSerializer
normal
{ "blob_id": "9bc13c608c079cbf23ed04f29edd1fd836214cde", "index": 282, "step-1": "<mask token>\n\n\nclass CommentViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins\n .RetrieveModelMixin):\n queryset = Comment.objects.all()\n\n def get_serializer_class(self):\n if self.action == 'retrieve':\n if self.get_object().level < 3:\n return CommentSerializer\n return AllCommentSerializer\n", "step-2": "<mask token>\n\n\nclass PostViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n\n\nclass CommentViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins\n .RetrieveModelMixin):\n queryset = Comment.objects.all()\n\n def get_serializer_class(self):\n if self.action == 'retrieve':\n if self.get_object().level < 3:\n return CommentSerializer\n return AllCommentSerializer\n", "step-3": "<mask token>\n\n\nclass PostViewSet(viewsets.ModelViewSet):\n serializer_class = PostSerializer\n queryset = Post.objects.all()\n\n\nclass CommentViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins\n .RetrieveModelMixin):\n queryset = Comment.objects.all()\n\n def get_serializer_class(self):\n if self.action == 'retrieve':\n if self.get_object().level < 3:\n return CommentSerializer\n return AllCommentSerializer\n", "step-4": "from rest_framework import viewsets, mixins\nfrom .models import Comment, Post\nfrom .serializer import CommentSerializer, PostSerializer, AllCommentSerializer\n\n\nclass PostViewSet(viewsets.ModelViewSet):\n serializer_class = PostSerializer\n queryset = Post.objects.all()\n\n\nclass CommentViewSet(viewsets.GenericViewSet, mixins.ListModelMixin, mixins\n .RetrieveModelMixin):\n queryset = Comment.objects.all()\n\n def get_serializer_class(self):\n if self.action == 'retrieve':\n if self.get_object().level < 3:\n return CommentSerializer\n return AllCommentSerializer\n", "step-5": null, "step-ids": [ 3, 4, 5, 6 ] }
[ 3, 4, 5, 6 ]
# Uses python3 import sys from operator import attrgetter from collections import namedtuple Segment = namedtuple('Segment', 'start end') def optimal_points(segments): segments = sorted(segments, key=attrgetter('end'), reverse=True) points = [] #write your code here while len(segments) > 0: segement = segments.pop() point = segement.end while len(segments) > 0 and point >= segments[-1].start: segments.pop() if point not in points: points.append(point) return points if __name__ == '__main__': input = sys.stdin.read() #input = input() n, *data = map(int, input.split()) segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[1::2]))) points = optimal_points(segments) print(len(points)) print(*points)
normal
{ "blob_id": "c007dc2416d3f7c883c44dea5471927ea6f816d6", "index": 3973, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef optimal_points(segments):\n segments = sorted(segments, key=attrgetter('end'), reverse=True)\n points = []\n while len(segments) > 0:\n segement = segments.pop()\n point = segement.end\n while len(segments) > 0 and point >= segments[-1].start:\n segments.pop()\n if point not in points:\n points.append(point)\n return points\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n, *data = map(int, input.split())\n segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[\n 1::2])))\n points = optimal_points(segments)\n print(len(points))\n print(*points)\n", "step-3": "<mask token>\nSegment = namedtuple('Segment', 'start end')\n\n\ndef optimal_points(segments):\n segments = sorted(segments, key=attrgetter('end'), reverse=True)\n points = []\n while len(segments) > 0:\n segement = segments.pop()\n point = segement.end\n while len(segments) > 0 and point >= segments[-1].start:\n segments.pop()\n if point not in points:\n points.append(point)\n return points\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n, *data = map(int, input.split())\n segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[\n 1::2])))\n points = optimal_points(segments)\n print(len(points))\n print(*points)\n", "step-4": "import sys\nfrom operator import attrgetter\nfrom collections import namedtuple\nSegment = namedtuple('Segment', 'start end')\n\n\ndef optimal_points(segments):\n segments = sorted(segments, key=attrgetter('end'), reverse=True)\n points = []\n while len(segments) > 0:\n segement = segments.pop()\n point = segement.end\n while len(segments) > 0 and point >= segments[-1].start:\n segments.pop()\n if point not in points:\n points.append(point)\n return points\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n, *data = map(int, input.split())\n segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[\n 1::2])))\n points = optimal_points(segments)\n print(len(points))\n print(*points)\n", "step-5": "# Uses python3\nimport sys\nfrom operator import attrgetter\nfrom collections import namedtuple\n\nSegment = namedtuple('Segment', 'start end')\n\n\ndef optimal_points(segments):\n segments = sorted(segments, key=attrgetter('end'), reverse=True)\n points = []\n\n #write your code here\n while len(segments) > 0:\n segement = segments.pop()\n point = segement.end\n while len(segments) > 0 and point >= segments[-1].start:\n segments.pop()\n if point not in points:\n points.append(point)\n\n return points\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n #input = input()\n n, *data = map(int, input.split())\n segments = list(map(lambda x: Segment(x[0], x[1]), zip(data[::2], data[1::2])))\n points = optimal_points(segments)\n print(len(points))\n print(*points)\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
from flask import Flask, send_file import StringIO app = Flask(__name__) @app.route('/') def index(): strIO = StringIO.StringIO() strIO.write('Hello from Dan Jacob and Stephane Wirtel !') strIO.seek(0) return send_file(strIO, attachment_filename="testing.txt", as_attachment=True) app.run(debug=True)
normal
{ "blob_id": "45335fa5d4773bdd0ef3e6c340fe06e84169be5e", "index": 8708, "step-1": "<mask token>\n\n\[email protected]('/')\ndef index():\n strIO = StringIO.StringIO()\n strIO.write('Hello from Dan Jacob and Stephane Wirtel !')\n strIO.seek(0)\n return send_file(strIO, attachment_filename='testing.txt',\n as_attachment=True)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('/')\ndef index():\n strIO = StringIO.StringIO()\n strIO.write('Hello from Dan Jacob and Stephane Wirtel !')\n strIO.seek(0)\n return send_file(strIO, attachment_filename='testing.txt',\n as_attachment=True)\n\n\napp.run(debug=True)\n", "step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n strIO = StringIO.StringIO()\n strIO.write('Hello from Dan Jacob and Stephane Wirtel !')\n strIO.seek(0)\n return send_file(strIO, attachment_filename='testing.txt',\n as_attachment=True)\n\n\napp.run(debug=True)\n", "step-4": "from flask import Flask, send_file\nimport StringIO\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n strIO = StringIO.StringIO()\n strIO.write('Hello from Dan Jacob and Stephane Wirtel !')\n strIO.seek(0)\n return send_file(strIO, attachment_filename='testing.txt',\n as_attachment=True)\n\n\napp.run(debug=True)\n", "step-5": "\nfrom flask import Flask, send_file\nimport StringIO\n\napp = Flask(__name__)\n\[email protected]('/')\ndef index():\n strIO = StringIO.StringIO()\n strIO.write('Hello from Dan Jacob and Stephane Wirtel !')\n strIO.seek(0)\n return send_file(strIO,\n attachment_filename=\"testing.txt\",\n as_attachment=True)\n \napp.run(debug=True)\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from django.contrib.auth.models import User from django.db import models class Chat(models.Model): category = models.CharField(unique=True, max_length=100) def __str__(self): return self.category class ChatMessage(models.Model): context = models.CharField(max_length=1000) user = models.ForeignKey(User, on_delete=models.CASCADE) chat = models.ForeignKey(Chat, on_delete=models.CASCADE) timestamp = models.DateTimeField(auto_now_add=True) def __str__(self): return self.context
normal
{ "blob_id": "61179dc734069017adaabd53804ed0102d9416e3", "index": 8865, "step-1": "<mask token>\n\n\nclass ChatMessage(models.Model):\n context = models.CharField(max_length=1000)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n chat = models.ForeignKey(Chat, on_delete=models.CASCADE)\n timestamp = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.context\n", "step-2": "<mask token>\n\n\nclass Chat(models.Model):\n <mask token>\n <mask token>\n\n\nclass ChatMessage(models.Model):\n context = models.CharField(max_length=1000)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n chat = models.ForeignKey(Chat, on_delete=models.CASCADE)\n timestamp = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.context\n", "step-3": "<mask token>\n\n\nclass Chat(models.Model):\n <mask token>\n\n def __str__(self):\n return self.category\n\n\nclass ChatMessage(models.Model):\n context = models.CharField(max_length=1000)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n chat = models.ForeignKey(Chat, on_delete=models.CASCADE)\n timestamp = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.context\n", "step-4": "<mask token>\n\n\nclass Chat(models.Model):\n category = models.CharField(unique=True, max_length=100)\n\n def __str__(self):\n return self.category\n\n\nclass ChatMessage(models.Model):\n context = models.CharField(max_length=1000)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n chat = models.ForeignKey(Chat, on_delete=models.CASCADE)\n timestamp = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.context\n", "step-5": "from django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass Chat(models.Model):\n category = models.CharField(unique=True, max_length=100)\n\n def __str__(self):\n return self.category\n\n\nclass ChatMessage(models.Model):\n context = models.CharField(max_length=1000)\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n chat = models.ForeignKey(Chat, on_delete=models.CASCADE)\n timestamp = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.context\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
<|reserved_special_token_0|> class ChatMembersFilterAdministrators(Object): <|reserved_special_token_0|> <|reserved_special_token_0|> def __init__(self, **kwargs): pass @staticmethod def read(q: dict, *args) ->'ChatMembersFilterAdministrators': return ChatMembersFilterAdministrators() <|reserved_special_token_1|> <|reserved_special_token_0|> class ChatMembersFilterAdministrators(Object): <|reserved_special_token_0|> ID = 'chatMembersFilterAdministrators' def __init__(self, **kwargs): pass @staticmethod def read(q: dict, *args) ->'ChatMembersFilterAdministrators': return ChatMembersFilterAdministrators() <|reserved_special_token_1|> <|reserved_special_token_0|> class ChatMembersFilterAdministrators(Object): """ Returns the owner and administrators Attributes: ID (:obj:`str`): ``ChatMembersFilterAdministrators`` No parameters required. Returns: ChatMembersFilter Raises: :class:`telegram.Error` """ ID = 'chatMembersFilterAdministrators' def __init__(self, **kwargs): pass @staticmethod def read(q: dict, *args) ->'ChatMembersFilterAdministrators': return ChatMembersFilterAdministrators() <|reserved_special_token_1|> from ..utils import Object class ChatMembersFilterAdministrators(Object): """ Returns the owner and administrators Attributes: ID (:obj:`str`): ``ChatMembersFilterAdministrators`` No parameters required. Returns: ChatMembersFilter Raises: :class:`telegram.Error` """ ID = 'chatMembersFilterAdministrators' def __init__(self, **kwargs): pass @staticmethod def read(q: dict, *args) ->'ChatMembersFilterAdministrators': return ChatMembersFilterAdministrators() <|reserved_special_token_1|> from ..utils import Object class ChatMembersFilterAdministrators(Object): """ Returns the owner and administrators Attributes: ID (:obj:`str`): ``ChatMembersFilterAdministrators`` No parameters required. Returns: ChatMembersFilter Raises: :class:`telegram.Error` """ ID = "chatMembersFilterAdministrators" def __init__(self, **kwargs): pass @staticmethod def read(q: dict, *args) -> "ChatMembersFilterAdministrators": return ChatMembersFilterAdministrators()
flexible
{ "blob_id": "6dfd59bbab74a3a657d2200d62964578c296ee54", "index": 5713, "step-1": "<mask token>\n\n\nclass ChatMembersFilterAdministrators(Object):\n <mask token>\n <mask token>\n\n def __init__(self, **kwargs):\n pass\n\n @staticmethod\n def read(q: dict, *args) ->'ChatMembersFilterAdministrators':\n return ChatMembersFilterAdministrators()\n", "step-2": "<mask token>\n\n\nclass ChatMembersFilterAdministrators(Object):\n <mask token>\n ID = 'chatMembersFilterAdministrators'\n\n def __init__(self, **kwargs):\n pass\n\n @staticmethod\n def read(q: dict, *args) ->'ChatMembersFilterAdministrators':\n return ChatMembersFilterAdministrators()\n", "step-3": "<mask token>\n\n\nclass ChatMembersFilterAdministrators(Object):\n \"\"\"\n Returns the owner and administrators\n\n Attributes:\n ID (:obj:`str`): ``ChatMembersFilterAdministrators``\n\n No parameters required.\n\n Returns:\n ChatMembersFilter\n\n Raises:\n :class:`telegram.Error`\n \"\"\"\n ID = 'chatMembersFilterAdministrators'\n\n def __init__(self, **kwargs):\n pass\n\n @staticmethod\n def read(q: dict, *args) ->'ChatMembersFilterAdministrators':\n return ChatMembersFilterAdministrators()\n", "step-4": "from ..utils import Object\n\n\nclass ChatMembersFilterAdministrators(Object):\n \"\"\"\n Returns the owner and administrators\n\n Attributes:\n ID (:obj:`str`): ``ChatMembersFilterAdministrators``\n\n No parameters required.\n\n Returns:\n ChatMembersFilter\n\n Raises:\n :class:`telegram.Error`\n \"\"\"\n ID = 'chatMembersFilterAdministrators'\n\n def __init__(self, **kwargs):\n pass\n\n @staticmethod\n def read(q: dict, *args) ->'ChatMembersFilterAdministrators':\n return ChatMembersFilterAdministrators()\n", "step-5": "\n\nfrom ..utils import Object\n\n\nclass ChatMembersFilterAdministrators(Object):\n \"\"\"\n Returns the owner and administrators\n\n Attributes:\n ID (:obj:`str`): ``ChatMembersFilterAdministrators``\n\n No parameters required.\n\n Returns:\n ChatMembersFilter\n\n Raises:\n :class:`telegram.Error`\n \"\"\"\n ID = \"chatMembersFilterAdministrators\"\n\n def __init__(self, **kwargs):\n \n pass\n\n @staticmethod\n def read(q: dict, *args) -> \"ChatMembersFilterAdministrators\":\n \n return ChatMembersFilterAdministrators()\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
''' Aaditya Upadhyay oooo$$$$$$$$$$$ oo$$$$$$$$$$$$$$$$$$$$$$$o oo$$$$$$$$$$$$$$$$$$$$$$$$$$$$$o o$ $$ o$ o $ oo o$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$o $$ $$ $o$ oo $ $ "$ o$$$$$$$$$ $$$$$$$$$$$$$ $$$$$$$$o $$o$o$ "$$$$$o$ o$$$$$$$$$ $$$$$$$$$$$ $$$$$$$$$o $$$$$$$$ $$$$$$$ $$$$$$$$$$$ $$$$$$$$$$$ $$$$$$$$$$$$$$$$$$$$$$$ $$$$$$$$$$$$$$$$$$$$$$$ $$$$$$$$$$$$$ $$$$$$$$$$$$$$ """$$$ "$$$""""$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ "$$$ $$$ o$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ "$$o o$$" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$o $$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$" "$$$$$ooooo$$$o o$$ $$$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ o$$$$$$$$$$$$$$$$$ $$$$$$$$"$$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$$$"""""""" """" $$$$ "$$$$$$$$$$$$$$$$$$$$$$$$$$$$" o$$$ "$$o """$$$$$$$$$$$$$$$$$$"$$" $$$ $$o "$$""$$$$$$"""" o$$$ $$$o o$$$" "$$$o o$$$$$o"$$$o o$$$$ "$$$$oo ""$$$o$$$$o o$$$$"" ""$$$$ "$$o$$$$$$$$$""" ""$$$$$$oo $$$$$$$$$$ """"$$$$$$$$$$$ $$$$$$$$$$$$ $$$$$$$$$$" "$$$"""" ''' from sys import stdin, stdout from collections import * from math import gcd, floor, ceil def st(): return list(stdin.readline().strip()) def li(): return list(map(int, stdin.readline().split())) def mp(): return map(int, stdin.readline().split()) def inp(): return int(stdin.readline()) def pr(n): return stdout.write(str(n)+"\n") mod = 1000000007 INF = float('inf') def solve(): def check(n): temp = n while temp: x = temp % 10 temp //= 10 if x != 0: if n % x != 0: return False return True n = inp() while True: if check(n): pr(n) return n += 1 for _ in range(inp()): solve()
normal
{ "blob_id": "9cd1cb84c457db64019fa542efcf6500aa8d6d42", "index": 9275, "step-1": "<mask token>\n\n\ndef li():\n return list(map(int, stdin.readline().split()))\n\n\ndef mp():\n return map(int, stdin.readline().split())\n\n\n<mask token>\n\n\ndef pr(n):\n return stdout.write(str(n) + '\\n')\n\n\n<mask token>\n\n\ndef solve():\n\n def check(n):\n temp = n\n while temp:\n x = temp % 10\n temp //= 10\n if x != 0:\n if n % x != 0:\n return False\n return True\n n = inp()\n while True:\n if check(n):\n pr(n)\n return\n n += 1\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef st():\n return list(stdin.readline().strip())\n\n\ndef li():\n return list(map(int, stdin.readline().split()))\n\n\ndef mp():\n return map(int, stdin.readline().split())\n\n\ndef inp():\n return int(stdin.readline())\n\n\ndef pr(n):\n return stdout.write(str(n) + '\\n')\n\n\n<mask token>\n\n\ndef solve():\n\n def check(n):\n temp = n\n while temp:\n x = temp % 10\n temp //= 10\n if x != 0:\n if n % x != 0:\n return False\n return True\n n = inp()\n while True:\n if check(n):\n pr(n)\n return\n n += 1\n\n\nfor _ in range(inp()):\n solve()\n", "step-3": "<mask token>\n\n\ndef st():\n return list(stdin.readline().strip())\n\n\ndef li():\n return list(map(int, stdin.readline().split()))\n\n\ndef mp():\n return map(int, stdin.readline().split())\n\n\ndef inp():\n return int(stdin.readline())\n\n\ndef pr(n):\n return stdout.write(str(n) + '\\n')\n\n\nmod = 1000000007\nINF = float('inf')\n\n\ndef solve():\n\n def check(n):\n temp = n\n while temp:\n x = temp % 10\n temp //= 10\n if x != 0:\n if n % x != 0:\n return False\n return True\n n = inp()\n while True:\n if check(n):\n pr(n)\n return\n n += 1\n\n\nfor _ in range(inp()):\n solve()\n", "step-4": "<mask token>\nfrom sys import stdin, stdout\nfrom collections import *\nfrom math import gcd, floor, ceil\n\n\ndef st():\n return list(stdin.readline().strip())\n\n\ndef li():\n return list(map(int, stdin.readline().split()))\n\n\ndef mp():\n return map(int, stdin.readline().split())\n\n\ndef inp():\n return int(stdin.readline())\n\n\ndef pr(n):\n return stdout.write(str(n) + '\\n')\n\n\nmod = 1000000007\nINF = float('inf')\n\n\ndef solve():\n\n def check(n):\n temp = n\n while temp:\n x = temp % 10\n temp //= 10\n if x != 0:\n if n % x != 0:\n return False\n return True\n n = inp()\n while True:\n if check(n):\n pr(n)\n return\n n += 1\n\n\nfor _ in range(inp()):\n solve()\n", "step-5": "'''\nAaditya Upadhyay\n\n oooo$$$$$$$$$$$\n \n oo$$$$$$$$$$$$$$$$$$$$$$$o\n oo$$$$$$$$$$$$$$$$$$$$$$$$$$$$$o o$ $$ o$\n o $ oo o$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$o $$ $$ $o$\noo $ $ \"$ o$$$$$$$$$ $$$$$$$$$$$$$ $$$$$$$$o $$o$o$\n\"$$$$$o$ o$$$$$$$$$ $$$$$$$$$$$ $$$$$$$$$o $$$$$$$$\n $$$$$$$ $$$$$$$$$$$ $$$$$$$$$$$ $$$$$$$$$$$$$$$$$$$$$$$\n $$$$$$$$$$$$$$$$$$$$$$$ $$$$$$$$$$$$$ $$$$$$$$$$$$$$ \"\"\"$$$\n \"$$$\"\"\"\"$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ \"$$$\n $$$ o$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ \"$$o\n o$$\" $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$o\n $$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\" \"$$$$$ooooo$$$o\n o$$\n $$$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ o$$$$$$$$$$$$$$$$$\n $$$$$$$$\"$$$$ $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ $$$$\"\"\"\"\"\"\"\"\n \"\"\"\" $$$$ \"$$$$$$$$$$$$$$$$$$$$$$$$$$$$\" o$$$\n \"$$o \"\"\"$$$$$$$$$$$$$$$$$$\"$$\" $$$\n $$o \"$$\"\"$$$$$$\"\"\"\" o$$$\n $$$o o$$$\"\n \"$$$o o$$$$$o\"$$$o o$$$$\n \"$$$$oo \"\"$$$o$$$$o o$$$$\"\"\n \"\"$$$$\n \"$$o$$$$$$$$$\"\"\"\n \"\"$$$$$$oo $$$$$$$$$$\n \"\"\"\"$$$$$$$$$$$\n $$$$$$$$$$$$\n $$$$$$$$$$\"\n \"$$$\"\"\"\"\n\n'''\n\nfrom sys import stdin, stdout\nfrom collections import *\nfrom math import gcd, floor, ceil\ndef st(): return list(stdin.readline().strip())\n\n\ndef li(): return list(map(int, stdin.readline().split()))\ndef mp(): return map(int, stdin.readline().split())\ndef inp(): return int(stdin.readline())\ndef pr(n): return stdout.write(str(n)+\"\\n\")\n\n\nmod = 1000000007\nINF = float('inf')\n\n\ndef solve():\n def check(n):\n temp = n\n while temp:\n x = temp % 10\n temp //= 10\n if x != 0:\n if n % x != 0:\n return False\n return True\n\n n = inp()\n while True:\n if check(n):\n pr(n)\n return\n n += 1\n\n\nfor _ in range(inp()):\n solve()\n", "step-ids": [ 4, 7, 8, 9, 10 ] }
[ 4, 7, 8, 9, 10 ]
<|reserved_special_token_0|> def tree(l): return max([(i + j + 2) for i, j in enumerate(l)]) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def tree(l): return max([(i + j + 2) for i, j in enumerate(l)]) <|reserved_special_token_0|> print(tree(t)) <|reserved_special_token_1|> <|reserved_special_token_0|> def tree(l): return max([(i + j + 2) for i, j in enumerate(l)]) N = int(sys.stdin.readline()) t = sorted(list(map(int, sys.stdin.readline().split())), reverse=True) print(tree(t)) <|reserved_special_token_1|> import sys def tree(l): return max([(i + j + 2) for i, j in enumerate(l)]) N = int(sys.stdin.readline()) t = sorted(list(map(int, sys.stdin.readline().split())), reverse=True) print(tree(t)) <|reserved_special_token_1|> # boj, 9237 : 이장님 초대, python3 # 그리디 알고리즘 import sys def tree(l): return max([i+j+2 for i,j in enumerate(l)]) N = int(sys.stdin.readline()) t = sorted(list(map(int, sys.stdin.readline().split())), reverse = True) print(tree(t))
flexible
{ "blob_id": "e79cdd32977eb357c3f6709887b671c50eb1fa45", "index": 7071, "step-1": "<mask token>\n\n\ndef tree(l):\n return max([(i + j + 2) for i, j in enumerate(l)])\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef tree(l):\n return max([(i + j + 2) for i, j in enumerate(l)])\n\n\n<mask token>\nprint(tree(t))\n", "step-3": "<mask token>\n\n\ndef tree(l):\n return max([(i + j + 2) for i, j in enumerate(l)])\n\n\nN = int(sys.stdin.readline())\nt = sorted(list(map(int, sys.stdin.readline().split())), reverse=True)\nprint(tree(t))\n", "step-4": "import sys\n\n\ndef tree(l):\n return max([(i + j + 2) for i, j in enumerate(l)])\n\n\nN = int(sys.stdin.readline())\nt = sorted(list(map(int, sys.stdin.readline().split())), reverse=True)\nprint(tree(t))\n", "step-5": "# boj, 9237 : 이장님 초대, python3\n# 그리디 알고리즘\nimport sys\n\ndef tree(l):\n return max([i+j+2 for i,j in enumerate(l)])\n\n\nN = int(sys.stdin.readline())\nt = sorted(list(map(int, sys.stdin.readline().split())), reverse = True)\n\nprint(tree(t))", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#!/usr/bin/env pytest # -*- coding: utf-8 -*- ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite # Purpose: TopJSON driver test suite. # Author: Even Rouault # ############################################################################### # Copyright (c) 2020, Even Rouault <even dot rouault at spatialys.com> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import ogrtest import pytest from osgeo import ogr ############################################################################### # Test TopoJSON def test_ogr_toposjon_objects_is_array(): ds = ogr.Open("data/topojson/topojson1.topojson") lyr = ds.GetLayer(0) assert lyr.GetName() == "a_layer" feat = lyr.GetNextFeature() ogrtest.check_feature_geometry(feat, "LINESTRING (100 1000,110 1000,110 1100)") lyr = ds.GetLayer(1) assert lyr.GetName() == "TopoJSON" assert lyr.GetLayerDefn().GetFieldCount() == 2 assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == "id" assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == "name" expected_results = [ ("foo", None, "POINT EMPTY"), (None, None, "POINT EMPTY"), (None, None, "POINT EMPTY"), (None, None, "POINT (100 1010)"), (None, None, "LINESTRING EMPTY"), (None, None, "LINESTRING EMPTY"), (None, None, "LINESTRING EMPTY"), (None, None, "LINESTRING EMPTY"), (None, None, "LINESTRING EMPTY"), (None, None, "LINESTRING EMPTY"), (None, None, "LINESTRING EMPTY"), (None, None, "LINESTRING EMPTY"), (None, "0", "LINESTRING EMPTY"), (None, "foo", "LINESTRING EMPTY"), ("1", None, "LINESTRING (100 1000,110 1000,110 1100)"), ("2", None, "LINESTRING (110 1100,110 1000,100 1000)"), (None, None, "POLYGON EMPTY"), (None, None, "POLYGON EMPTY"), (None, None, "POLYGON EMPTY"), ( None, None, "POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))", ), ( None, None, "POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))", ), (None, None, "MULTIPOINT EMPTY"), (None, None, "MULTIPOINT EMPTY"), (None, None, "MULTIPOINT EMPTY"), (None, None, "MULTIPOINT EMPTY"), (None, None, "MULTIPOINT (100 1010,101 1020)"), (None, None, "MULTIPOLYGON EMPTY"), (None, None, "MULTIPOLYGON EMPTY"), (None, None, "MULTIPOLYGON EMPTY"), ( None, None, "MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))", ), (None, None, "MULTILINESTRING EMPTY"), (None, None, "MULTILINESTRING EMPTY"), (None, None, "MULTILINESTRING ((100 1000,110 1000,110 1100))"), ( None, None, "MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))", ), ( None, None, "MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))", ), ] assert lyr.GetFeatureCount() == len(expected_results) for i, exp_result in enumerate(expected_results): feat = lyr.GetNextFeature() if ( feat.GetField("id") != exp_result[0] or feat.GetField("name") != exp_result[1] or feat.GetGeometryRef().ExportToWkt() != exp_result[2] ): feat.DumpReadable() print(exp_result) print(feat.GetField("name")) pytest.fail("failure at feat index %d" % i) ds = None def test_ogr_toposjon_objects_is_dict(): ds = ogr.Open("data/topojson/topojson2.topojson") lyr = ds.GetLayer(0) assert lyr.GetName() == "a_layer" assert lyr.GetLayerDefn().GetFieldCount() == 2 assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == "id" assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == "name" feat = lyr.GetNextFeature() assert feat["id"] == "foo" assert feat["name"] == "line" ogrtest.check_feature_geometry(feat, "LINESTRING (100 1000,110 1000,110 1100)") lyr = ds.GetLayer(1) assert lyr.GetName() == "TopoJSON" feat = lyr.GetNextFeature() ogrtest.check_feature_geometry(feat, "LINESTRING (100 1000,110 1000,110 1100)") ds = None def test_ogr_toposjon_no_transform(): ds = ogr.Open("data/topojson/topojson3.topojson") lyr = ds.GetLayer(0) assert lyr.GetName() == "a_layer" feat = lyr.GetNextFeature() ogrtest.check_feature_geometry(feat, "LINESTRING (0 0,10 0,0 10,10 0,0 0)") lyr = ds.GetLayer(1) assert lyr.GetName() == "TopoJSON" feat = lyr.GetNextFeature() ogrtest.check_feature_geometry(feat, "LINESTRING (0 0,10 0,0 10,10 0,0 0)") ds = None
normal
{ "blob_id": "270dba92af583e37c35ed5365f764adfdc2f947d", "index": 2112, "step-1": "<mask token>\n\n\ndef test_ogr_toposjon_objects_is_dict():\n ds = ogr.Open('data/topojson/topojson2.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n feat = lyr.GetNextFeature()\n assert feat['id'] == 'foo'\n assert feat['name'] == 'line'\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n ds = None\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef test_ogr_toposjon_objects_is_dict():\n ds = ogr.Open('data/topojson/topojson2.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n feat = lyr.GetNextFeature()\n assert feat['id'] == 'foo'\n assert feat['name'] == 'line'\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n ds = None\n\n\ndef test_ogr_toposjon_no_transform():\n ds = ogr.Open('data/topojson/topojson3.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n ds = None\n", "step-3": "<mask token>\n\n\ndef test_ogr_toposjon_objects_is_array():\n ds = ogr.Open('data/topojson/topojson1.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n expected_results = [('foo', None, 'POINT EMPTY'), (None, None,\n 'POINT EMPTY'), (None, None, 'POINT EMPTY'), (None, None,\n 'POINT (100 1010)'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, '0', 'LINESTRING EMPTY'), (None, 'foo',\n 'LINESTRING EMPTY'), ('1', None,\n 'LINESTRING (100 1000,110 1000,110 1100)'), ('2', None,\n 'LINESTRING (110 1100,110 1000,100 1000)'), (None, None,\n 'POLYGON EMPTY'), (None, None, 'POLYGON EMPTY'), (None, None,\n 'POLYGON EMPTY'), (None, None,\n 'POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'\n ), (None, None,\n 'POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))'\n ), (None, None, 'MULTIPOINT EMPTY'), (None, None,\n 'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT EMPTY'), (None, None,\n 'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT (100 1010,101 1020)'),\n (None, None, 'MULTIPOLYGON EMPTY'), (None, None,\n 'MULTIPOLYGON EMPTY'), (None, None, 'MULTIPOLYGON EMPTY'), (None,\n None,\n 'MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))'\n ), (None, None, 'MULTILINESTRING EMPTY'), (None, None,\n 'MULTILINESTRING EMPTY'), (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100))'), (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))'),\n (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'\n )]\n assert lyr.GetFeatureCount() == len(expected_results)\n for i, exp_result in enumerate(expected_results):\n feat = lyr.GetNextFeature()\n if feat.GetField('id') != exp_result[0] or feat.GetField('name'\n ) != exp_result[1] or feat.GetGeometryRef().ExportToWkt(\n ) != exp_result[2]:\n feat.DumpReadable()\n print(exp_result)\n print(feat.GetField('name'))\n pytest.fail('failure at feat index %d' % i)\n ds = None\n\n\ndef test_ogr_toposjon_objects_is_dict():\n ds = ogr.Open('data/topojson/topojson2.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n feat = lyr.GetNextFeature()\n assert feat['id'] == 'foo'\n assert feat['name'] == 'line'\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n ds = None\n\n\ndef test_ogr_toposjon_no_transform():\n ds = ogr.Open('data/topojson/topojson3.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n ds = None\n", "step-4": "import ogrtest\nimport pytest\nfrom osgeo import ogr\n\n\ndef test_ogr_toposjon_objects_is_array():\n ds = ogr.Open('data/topojson/topojson1.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n expected_results = [('foo', None, 'POINT EMPTY'), (None, None,\n 'POINT EMPTY'), (None, None, 'POINT EMPTY'), (None, None,\n 'POINT (100 1010)'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, '0', 'LINESTRING EMPTY'), (None, 'foo',\n 'LINESTRING EMPTY'), ('1', None,\n 'LINESTRING (100 1000,110 1000,110 1100)'), ('2', None,\n 'LINESTRING (110 1100,110 1000,100 1000)'), (None, None,\n 'POLYGON EMPTY'), (None, None, 'POLYGON EMPTY'), (None, None,\n 'POLYGON EMPTY'), (None, None,\n 'POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'\n ), (None, None,\n 'POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))'\n ), (None, None, 'MULTIPOINT EMPTY'), (None, None,\n 'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT EMPTY'), (None, None,\n 'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT (100 1010,101 1020)'),\n (None, None, 'MULTIPOLYGON EMPTY'), (None, None,\n 'MULTIPOLYGON EMPTY'), (None, None, 'MULTIPOLYGON EMPTY'), (None,\n None,\n 'MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))'\n ), (None, None, 'MULTILINESTRING EMPTY'), (None, None,\n 'MULTILINESTRING EMPTY'), (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100))'), (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))'),\n (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'\n )]\n assert lyr.GetFeatureCount() == len(expected_results)\n for i, exp_result in enumerate(expected_results):\n feat = lyr.GetNextFeature()\n if feat.GetField('id') != exp_result[0] or feat.GetField('name'\n ) != exp_result[1] or feat.GetGeometryRef().ExportToWkt(\n ) != exp_result[2]:\n feat.DumpReadable()\n print(exp_result)\n print(feat.GetField('name'))\n pytest.fail('failure at feat index %d' % i)\n ds = None\n\n\ndef test_ogr_toposjon_objects_is_dict():\n ds = ogr.Open('data/topojson/topojson2.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n feat = lyr.GetNextFeature()\n assert feat['id'] == 'foo'\n assert feat['name'] == 'line'\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n ds = None\n\n\ndef test_ogr_toposjon_no_transform():\n ds = ogr.Open('data/topojson/topojson3.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n ds = None\n", "step-5": "#!/usr/bin/env pytest\n# -*- coding: utf-8 -*-\n###############################################################################\n# $Id$\n#\n# Project: GDAL/OGR Test Suite\n# Purpose: TopJSON driver test suite.\n# Author: Even Rouault\n#\n###############################################################################\n# Copyright (c) 2020, Even Rouault <even dot rouault at spatialys.com>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n###############################################################################\n\nimport ogrtest\nimport pytest\n\nfrom osgeo import ogr\n\n###############################################################################\n# Test TopoJSON\n\n\ndef test_ogr_toposjon_objects_is_array():\n\n ds = ogr.Open(\"data/topojson/topojson1.topojson\")\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == \"a_layer\"\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, \"LINESTRING (100 1000,110 1000,110 1100)\")\n\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == \"TopoJSON\"\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == \"id\"\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == \"name\"\n expected_results = [\n (\"foo\", None, \"POINT EMPTY\"),\n (None, None, \"POINT EMPTY\"),\n (None, None, \"POINT EMPTY\"),\n (None, None, \"POINT (100 1010)\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, \"0\", \"LINESTRING EMPTY\"),\n (None, \"foo\", \"LINESTRING EMPTY\"),\n (\"1\", None, \"LINESTRING (100 1000,110 1000,110 1100)\"),\n (\"2\", None, \"LINESTRING (110 1100,110 1000,100 1000)\"),\n (None, None, \"POLYGON EMPTY\"),\n (None, None, \"POLYGON EMPTY\"),\n (None, None, \"POLYGON EMPTY\"),\n (\n None,\n None,\n \"POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))\",\n ),\n (\n None,\n None,\n \"POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))\",\n ),\n (None, None, \"MULTIPOINT EMPTY\"),\n (None, None, \"MULTIPOINT EMPTY\"),\n (None, None, \"MULTIPOINT EMPTY\"),\n (None, None, \"MULTIPOINT EMPTY\"),\n (None, None, \"MULTIPOINT (100 1010,101 1020)\"),\n (None, None, \"MULTIPOLYGON EMPTY\"),\n (None, None, \"MULTIPOLYGON EMPTY\"),\n (None, None, \"MULTIPOLYGON EMPTY\"),\n (\n None,\n None,\n \"MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))\",\n ),\n (None, None, \"MULTILINESTRING EMPTY\"),\n (None, None, \"MULTILINESTRING EMPTY\"),\n (None, None, \"MULTILINESTRING ((100 1000,110 1000,110 1100))\"),\n (\n None,\n None,\n \"MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))\",\n ),\n (\n None,\n None,\n \"MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))\",\n ),\n ]\n assert lyr.GetFeatureCount() == len(expected_results)\n for i, exp_result in enumerate(expected_results):\n feat = lyr.GetNextFeature()\n if (\n feat.GetField(\"id\") != exp_result[0]\n or feat.GetField(\"name\") != exp_result[1]\n or feat.GetGeometryRef().ExportToWkt() != exp_result[2]\n ):\n feat.DumpReadable()\n print(exp_result)\n print(feat.GetField(\"name\"))\n pytest.fail(\"failure at feat index %d\" % i)\n ds = None\n\n\ndef test_ogr_toposjon_objects_is_dict():\n\n ds = ogr.Open(\"data/topojson/topojson2.topojson\")\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == \"a_layer\"\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == \"id\"\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == \"name\"\n feat = lyr.GetNextFeature()\n assert feat[\"id\"] == \"foo\"\n assert feat[\"name\"] == \"line\"\n ogrtest.check_feature_geometry(feat, \"LINESTRING (100 1000,110 1000,110 1100)\")\n\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == \"TopoJSON\"\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, \"LINESTRING (100 1000,110 1000,110 1100)\")\n\n ds = None\n\n\ndef test_ogr_toposjon_no_transform():\n\n ds = ogr.Open(\"data/topojson/topojson3.topojson\")\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == \"a_layer\"\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, \"LINESTRING (0 0,10 0,0 10,10 0,0 0)\")\n\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == \"TopoJSON\"\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, \"LINESTRING (0 0,10 0,0 10,10 0,0 0)\")\n ds = None\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from django.shortcuts import render, HttpResponse from django.views.generic import TemplateView from .models import Person, Stock_history from django.http import Http404, HttpResponseRedirect from .forms import NameForm, UploadFileForm from .back import handle_uploaded_file, read_file class IndexView(TemplateView): def get(self, request): price_history = Stock_history.objects.all() context = { 'entry': price_history } return render(request, 'budget/index.html', context) class DetailView(TemplateView): def get(self, request, person_id): try: persons = Person.objects.all() person = Person.objects.get(id=person_id) except Person.DoesNotExist: raise Http404("Person does not exist") context = { 'persons': persons, 'person': person, 'first_name': person.first_name, 'last_name': person.last_name, 'income': person.income, } return render(request, 'budget/detail.html', context) class PersonView(TemplateView): def get(self, request): persons = Person.objects.all() context = { 'persons': persons, } return render(request, 'budget/person.html', context) class AddView(TemplateView): template = 'budget/add.html' def get(self, request): form = NameForm context = {'form': form} return render(request, self.template, context) def post(self, request): form = NameForm(request.POST) if form.is_valid(): text = form.cleaned_data form = NameForm() p = Person(first_name=text['first_name'], last_name=text['last_name'], income = text['income']) p.save() context = { 'form': form, 'text': text, } return render(request, self.template, context) class UploadView(TemplateView): template_name = 'budget/upload.html' def get(self, request): form = UploadFileForm() return render(request, self.template_name, {'form': form}) def post(self, request): if request.method == 'POST': form = UploadFileForm(request.POST, request.FILES) if form.is_valid(): handle_uploaded_file(request.FILES['file']) read_file(request.FILES['file']) return HttpResponseRedirect('/upload') #else: # form = UploadFileForm() return render(request, self.template_name, {'form': form})
normal
{ "blob_id": "2d65ffa3fc8a5360702337d749884903b2cb0423", "index": 2353, "step-1": "<mask token>\n\n\nclass PersonView(TemplateView):\n\n def get(self, request):\n persons = Person.objects.all()\n context = {'persons': persons}\n return render(request, 'budget/person.html', context)\n\n\nclass AddView(TemplateView):\n template = 'budget/add.html'\n\n def get(self, request):\n form = NameForm\n context = {'form': form}\n return render(request, self.template, context)\n\n def post(self, request):\n form = NameForm(request.POST)\n if form.is_valid():\n text = form.cleaned_data\n form = NameForm()\n p = Person(first_name=text['first_name'], last_name=text[\n 'last_name'], income=text['income'])\n p.save()\n context = {'form': form, 'text': text}\n return render(request, self.template, context)\n\n\nclass UploadView(TemplateView):\n template_name = 'budget/upload.html'\n\n def get(self, request):\n form = UploadFileForm()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n handle_uploaded_file(request.FILES['file'])\n read_file(request.FILES['file'])\n return HttpResponseRedirect('/upload')\n return render(request, self.template_name, {'form': form})\n", "step-2": "<mask token>\n\n\nclass DetailView(TemplateView):\n <mask token>\n\n\nclass PersonView(TemplateView):\n\n def get(self, request):\n persons = Person.objects.all()\n context = {'persons': persons}\n return render(request, 'budget/person.html', context)\n\n\nclass AddView(TemplateView):\n template = 'budget/add.html'\n\n def get(self, request):\n form = NameForm\n context = {'form': form}\n return render(request, self.template, context)\n\n def post(self, request):\n form = NameForm(request.POST)\n if form.is_valid():\n text = form.cleaned_data\n form = NameForm()\n p = Person(first_name=text['first_name'], last_name=text[\n 'last_name'], income=text['income'])\n p.save()\n context = {'form': form, 'text': text}\n return render(request, self.template, context)\n\n\nclass UploadView(TemplateView):\n template_name = 'budget/upload.html'\n\n def get(self, request):\n form = UploadFileForm()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n handle_uploaded_file(request.FILES['file'])\n read_file(request.FILES['file'])\n return HttpResponseRedirect('/upload')\n return render(request, self.template_name, {'form': form})\n", "step-3": "<mask token>\n\n\nclass IndexView(TemplateView):\n <mask token>\n\n\nclass DetailView(TemplateView):\n\n def get(self, request, person_id):\n try:\n persons = Person.objects.all()\n person = Person.objects.get(id=person_id)\n except Person.DoesNotExist:\n raise Http404('Person does not exist')\n context = {'persons': persons, 'person': person, 'first_name':\n person.first_name, 'last_name': person.last_name, 'income':\n person.income}\n return render(request, 'budget/detail.html', context)\n\n\nclass PersonView(TemplateView):\n\n def get(self, request):\n persons = Person.objects.all()\n context = {'persons': persons}\n return render(request, 'budget/person.html', context)\n\n\nclass AddView(TemplateView):\n template = 'budget/add.html'\n\n def get(self, request):\n form = NameForm\n context = {'form': form}\n return render(request, self.template, context)\n\n def post(self, request):\n form = NameForm(request.POST)\n if form.is_valid():\n text = form.cleaned_data\n form = NameForm()\n p = Person(first_name=text['first_name'], last_name=text[\n 'last_name'], income=text['income'])\n p.save()\n context = {'form': form, 'text': text}\n return render(request, self.template, context)\n\n\nclass UploadView(TemplateView):\n template_name = 'budget/upload.html'\n\n def get(self, request):\n form = UploadFileForm()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n handle_uploaded_file(request.FILES['file'])\n read_file(request.FILES['file'])\n return HttpResponseRedirect('/upload')\n return render(request, self.template_name, {'form': form})\n", "step-4": "<mask token>\n\n\nclass IndexView(TemplateView):\n\n def get(self, request):\n price_history = Stock_history.objects.all()\n context = {'entry': price_history}\n return render(request, 'budget/index.html', context)\n\n\nclass DetailView(TemplateView):\n\n def get(self, request, person_id):\n try:\n persons = Person.objects.all()\n person = Person.objects.get(id=person_id)\n except Person.DoesNotExist:\n raise Http404('Person does not exist')\n context = {'persons': persons, 'person': person, 'first_name':\n person.first_name, 'last_name': person.last_name, 'income':\n person.income}\n return render(request, 'budget/detail.html', context)\n\n\nclass PersonView(TemplateView):\n\n def get(self, request):\n persons = Person.objects.all()\n context = {'persons': persons}\n return render(request, 'budget/person.html', context)\n\n\nclass AddView(TemplateView):\n template = 'budget/add.html'\n\n def get(self, request):\n form = NameForm\n context = {'form': form}\n return render(request, self.template, context)\n\n def post(self, request):\n form = NameForm(request.POST)\n if form.is_valid():\n text = form.cleaned_data\n form = NameForm()\n p = Person(first_name=text['first_name'], last_name=text[\n 'last_name'], income=text['income'])\n p.save()\n context = {'form': form, 'text': text}\n return render(request, self.template, context)\n\n\nclass UploadView(TemplateView):\n template_name = 'budget/upload.html'\n\n def get(self, request):\n form = UploadFileForm()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n handle_uploaded_file(request.FILES['file'])\n read_file(request.FILES['file'])\n return HttpResponseRedirect('/upload')\n return render(request, self.template_name, {'form': form})\n", "step-5": "from django.shortcuts import render, HttpResponse\nfrom django.views.generic import TemplateView\nfrom .models import Person, Stock_history\nfrom django.http import Http404, HttpResponseRedirect\nfrom .forms import NameForm, UploadFileForm\n\nfrom .back import handle_uploaded_file, read_file\n\nclass IndexView(TemplateView):\n def get(self, request):\n price_history = Stock_history.objects.all()\n context = {\n 'entry': price_history\n }\n return render(request, 'budget/index.html', context)\n\nclass DetailView(TemplateView):\n def get(self, request, person_id):\n try:\n persons = Person.objects.all()\n person = Person.objects.get(id=person_id)\n except Person.DoesNotExist:\n raise Http404(\"Person does not exist\")\n \n context = {\n 'persons': persons,\n 'person': person,\n 'first_name': person.first_name, \n 'last_name': person.last_name,\n 'income': person.income,\n\n }\n return render(request, 'budget/detail.html', context)\n\n\nclass PersonView(TemplateView):\n def get(self, request):\n persons = Person.objects.all()\n\n context = {\n 'persons': persons,\n }\n return render(request, 'budget/person.html', context)\n\nclass AddView(TemplateView):\n template = 'budget/add.html'\n\n def get(self, request):\n form = NameForm\n context = {'form': form}\n return render(request, self.template, context)\n\n\n def post(self, request):\n form = NameForm(request.POST)\n if form.is_valid():\n text = form.cleaned_data\n form = NameForm()\n p = Person(first_name=text['first_name'], last_name=text['last_name'], income = text['income'])\n p.save()\n context = {\n 'form': form,\n 'text': text,\n }\n\n return render(request, self.template, context)\n\n\nclass UploadView(TemplateView):\n template_name = 'budget/upload.html'\n\n def get(self, request):\n form = UploadFileForm()\n return render(request, self.template_name, {'form': form})\n\n def post(self, request):\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n handle_uploaded_file(request.FILES['file'])\n read_file(request.FILES['file'])\n return HttpResponseRedirect('/upload')\n #else:\n # form = UploadFileForm()\n return render(request, self.template_name, {'form': form})\n", "step-ids": [ 10, 11, 13, 14, 16 ] }
[ 10, 11, 13, 14, 16 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Migration(migrations.Migration): dependencies = [('user_details', '0003_auto_20180226_1816')] operations = [migrations.AlterField(model_name='token', name= 'expiry_date', field=models.DateTimeField(default=datetime.datetime (2018, 2, 28, 13, 14, 15, 831612, tzinfo=utc))), migrations. AlterField(model_name='token', name='user_id', field=models. ForeignKey(on_delete=django.db.models.deletion.CASCADE, to= 'user_details.User'))] <|reserved_special_token_1|> from __future__ import unicode_literals import datetime from django.db import migrations, models import django.db.models.deletion from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [('user_details', '0003_auto_20180226_1816')] operations = [migrations.AlterField(model_name='token', name= 'expiry_date', field=models.DateTimeField(default=datetime.datetime (2018, 2, 28, 13, 14, 15, 831612, tzinfo=utc))), migrations. AlterField(model_name='token', name='user_id', field=models. ForeignKey(on_delete=django.db.models.deletion.CASCADE, to= 'user_details.User'))] <|reserved_special_token_1|> # -*- coding: utf-8 -*- # Generated by Django 1.11.10 on 2018-02-26 13:14 from __future__ import unicode_literals import datetime from django.db import migrations, models import django.db.models.deletion from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('user_details', '0003_auto_20180226_1816'), ] operations = [ migrations.AlterField( model_name='token', name='expiry_date', field=models.DateTimeField(default=datetime.datetime(2018, 2, 28, 13, 14, 15, 831612, tzinfo=utc)), ), migrations.AlterField( model_name='token', name='user_id', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user_details.User'), ), ]
flexible
{ "blob_id": "c6170678b523a105312d8ce316853859657d3c94", "index": 2235, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('user_details', '0003_auto_20180226_1816')]\n operations = [migrations.AlterField(model_name='token', name=\n 'expiry_date', field=models.DateTimeField(default=datetime.datetime\n (2018, 2, 28, 13, 14, 15, 831612, tzinfo=utc))), migrations.\n AlterField(model_name='token', name='user_id', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'user_details.User'))]\n", "step-4": "from __future__ import unicode_literals\nimport datetime\nfrom django.db import migrations, models\nimport django.db.models.deletion\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n dependencies = [('user_details', '0003_auto_20180226_1816')]\n operations = [migrations.AlterField(model_name='token', name=\n 'expiry_date', field=models.DateTimeField(default=datetime.datetime\n (2018, 2, 28, 13, 14, 15, 831612, tzinfo=utc))), migrations.\n AlterField(model_name='token', name='user_id', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'user_details.User'))]\n", "step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.10 on 2018-02-26 13:14\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom django.db import migrations, models\nimport django.db.models.deletion\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('user_details', '0003_auto_20180226_1816'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='token',\n name='expiry_date',\n field=models.DateTimeField(default=datetime.datetime(2018, 2, 28, 13, 14, 15, 831612, tzinfo=utc)),\n ),\n migrations.AlterField(\n model_name='token',\n name='user_id',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user_details.User'),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]