repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
DinarKH/WebServerFlask | https://github.com/DinarKH/WebServerFlask | 32742d5f5ce70976c56dc0c51c46f0ed8253c545 | 2f34b87b026ebd9f303718f44a42efb9cde68947 | c0a194f804f20d59e580ed4a8e9ec76c12a9010d | refs/heads/master | 2023-03-26T11:13:06.787827 | 2019-07-26T11:31:15 | 2019-07-26T11:31:15 | 198,180,104 | 0 | 0 | null | 2019-07-22T08:27:12 | 2019-07-26T11:31:17 | 2021-03-20T01:19:16 | HTML | [
{
"alpha_fraction": 0.7833333611488342,
"alphanum_fraction": 0.79313725233078,
"avg_line_length": 47.57143020629883,
"blob_id": "086ca570f5322ca77faebae783603415ceadb549",
"content_id": "d8546d0fad34fa9f8a92f283a2bedf0ae85712a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1650,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 21,
"path": "/README.md",
"repo_name": "DinarKH/WebServerFlask",
"src_encoding": "UTF-8",
"text": "# WebServerFlask<br/>\n1. Твое имя и координаты для обратной связи<br/>\nДинар Халимов<br/>\n2. Кратко опиши архитектуру системы и принятые при проектировании\nрешения<br/>\nВ рамках разработки системы был реализован веб-сервис, работающий под управдением фреймворка Flask.<br/>\nВ качестве проектных решений можно отметить, что из-за того что в Redis позволяет удалять только ключи с течениме времени, \nбыли использованы Sorted Set и ZREMRANGEBYSCORE, чтобы имитировать механизм таймаута с помощью проверки значений<br/>\n3. Использованные внешние библиотеки и причины их выбора<br/>\nБыли использованы:<br/>\nSQLAlchemy для осущесвления работы с СУБД<br/>\nRedis для работы с сервером Redis<br/>\nBcrypt для обеспечения шифрования паролей пользователей системы<br/>\n4. Инструкцию по запуску системы на любом компьютере<br/>\ndocker-compose up<br/>\npip install -r requirements.txt<br/>\npython app.py<br/>\n5. Сколько ты заложил времени до старта работы<br/>\n50 часов<br/>\n6. И сколько ты реально потратил на это времени<br/>\n40 часов<br/>\n"
},
{
"alpha_fraction": 0.5384615659713745,
"alphanum_fraction": 0.5646852850914001,
"avg_line_length": 34.75,
"blob_id": "a7d5323559ae3fb4e7015053829f8f56bfe3b457",
"content_id": "c94d9fba2dc907f7c0f32ebeca20355e1ce97cf2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1144,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 32,
"path": "/WebServerFlask/__init__.py",
"repo_name": "DinarKH/WebServerFlask",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_bcrypt import Bcrypt\nfrom flask_login import LoginManager\nimport redis\n\napp = Flask(__name__)\n\nPOSTGRES_URL = \"192.168.99.100:5432\"\nPOSTGRES_USER = \"postgresuser\"\nPOSTGRES_PW = \"123456\"\nPOSTGRES_DB = \"servdb\"\nREDIS_HOST = '127.0.0.1'\nREDIS_PORT = 6379\nREDIS_SET = 'post_set'\nREDIS_POST_TTL = 300 # 5 minutes in seconds\n\napp.config['SECRET_KEY'] = 'b1af4eff3b8bde7a0982fcbc9905fb82'\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2' \\\n '://{user}:{pw}@{url}/{db}'.format(user=POSTGRES_USER,\n pw=POSTGRES_PW,\n url=POSTGRES_URL,\n db=POSTGRES_DB)\ndb = SQLAlchemy(app)\nbcrypt = Bcrypt(app)\nlogin_manager = LoginManager(app)\nlogin_manager.login_view = 'login'\nlogin_manager.login_message_category = 'info'\nr_client = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)\n\nfrom WebServerFlask import routes\n"
},
{
"alpha_fraction": 0.6479761004447937,
"alphanum_fraction": 0.6493032574653625,
"avg_line_length": 35.31325149536133,
"blob_id": "f94d9fe58646397d9f17900e8c89495f3a032c99",
"content_id": "0e82dc41628c3abaf4178647fa7cafb16b20283d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3014,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 83,
"path": "/WebServerFlask/routes.py",
"repo_name": "DinarKH/WebServerFlask",
"src_encoding": "UTF-8",
"text": "from flask import render_template, url_for, redirect, flash, request\nfrom .forms import RegistationForm, LoginForm, PostForm\nfrom .models import User\nfrom WebServerFlask import app, bcrypt, db, r_client, REDIS_SET, REDIS_POST_TTL\nfrom flask_login import login_user, current_user, logout_user, login_required\nimport datetime, time\n\n\[email protected]('/')\ndef home():\n return render_template('home.html')\n\n\[email protected]('/posts/', methods=['GET', 'POST'])\n@login_required\ndef posts_page():\n '''\n Show posts from redis cache and delete\n '''\n dt = datetime.datetime.now()\n curr_time = time.mktime(dt.timetuple())\n r_client.zremrangebyscore(REDIS_SET, min='-inf', max=curr_time) # Delete old posts\n if request.method == 'POST':\n r_client.zrem(REDIS_SET, request.values.get('post_name')) # Delete redis post by name\n return redirect(url_for('posts_page'))\n redis_posts = r_client.zrange(REDIS_SET, 0, -1) # Get redis posts\n return render_template('post.html', redis_posts=redis_posts)\n\n\[email protected]('/register/', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('posts_page'))\n form = RegistationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username=form.username.data, email=form.email.data,\n password=hashed_password)\n db.session.add(user)\n db.session.commit()\n flash('Account create', 'success')\n return redirect(url_for('login'))\n return render_template('register.html', form=form)\n\n\[email protected]('/login/', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('posts_page'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n flash('You log in system', 'success')\n login_user(user, remember=True)\n next_page = request.args.get('next')\n return redirect(next_page) if next_page else redirect(url_for('posts_page'))\n else:\n flash('Invalid data', 'danger')\n return render_template('login.html', form=form)\n\n\[email protected]('/logout/')\ndef logout():\n logout_user()\n return redirect(url_for('posts_page'))\n\n\[email protected]('/post/new/', methods=['GET', 'POST'])\n@login_required\ndef post_new():\n '''\n Create new post for reids\n '''\n form = PostForm()\n if form.validate_on_submit():\n dt = datetime.datetime.now()\n curr_time = time.mktime(dt.timetuple())\n # Create post with time in value = current time + 5 minutes\n r_client.zadd(REDIS_SET, {form.content.data: curr_time + REDIS_POST_TTL})\n flash('Post was created', 'success')\n return redirect(url_for('posts_page'))\n return render_template('new_post.html', form=form)\n"
}
] | 3 |
PowersYang/LearningBigdata | https://github.com/PowersYang/LearningBigdata | fdf601deeb18106787db33736f4b4f7dbb787c6a | 779046e5d26b5a87d794801d3ef8d8b176b4290f | 2f8bc81236e59ac116d08f03c7d39fa6848047b5 | refs/heads/master | 2022-02-26T00:02:16.085027 | 2019-12-29T15:21:49 | 2019-12-29T15:21:49 | 224,469,659 | 0 | 0 | null | 2019-11-27T16:13:16 | 2019-12-29T15:22:16 | 2021-12-14T21:36:33 | Java | [
{
"alpha_fraction": 0.6302765607833862,
"alphanum_fraction": 0.6404657959938049,
"avg_line_length": 23.571428298950195,
"blob_id": "a5258a9dc1818909141dc3f13479a3f869e014f4",
"content_id": "7676763fa897513953d4549954af3dbadfa363fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 687,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 28,
"path": "/Spark/sparksql/demo.py",
"repo_name": "PowersYang/LearningBigdata",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom pyspark import SparkConf\nfrom pyspark.sql import SparkSession\n\nos.environ['PYSPARK_PYTHON'] = '/usr/local/bin/python3'\n\nif __name__ == '__main__':\n spark_conf = SparkConf().setAppName('SparkSql Demo').setMaster('local[*]')\n\n spark = SparkSession.builder.config(conf=spark_conf).getOrCreate()\n\n df = spark.read.json('../in/user.json')\n\n df.show()\n\n df.select('name').show()\n\n df.select(df['name'], df['age'] + 1).show()\n\n # df.createGlobalTempView('students')\n #\n # spark.sql('select name, age from global_temp.students').show()\n\n # rdd = spark.sparkContext.parallelize([1, 2, 3, 4, 5])\n # rdd.foreach(lambda x: print(x))\n\n spark.stop()"
},
{
"alpha_fraction": 0.6656293272972107,
"alphanum_fraction": 0.6687413454055786,
"avg_line_length": 26.81730842590332,
"blob_id": "8c03001d1eba9269b22bb295ff27ab255ab0f265",
"content_id": "8bdbe5001349adfcbd6ee2dc9c213efd4420fcdd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 3020,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 104,
"path": "/HBase/src/main/java/com/ysir308/mr/FruitDriver.java",
"repo_name": "PowersYang/LearningBigdata",
"src_encoding": "UTF-8",
"text": "package com.ysir308.mr;\n\n\nimport org.apache.hadoop.conf.Configuration;\nimport org.apache.hadoop.fs.Path;\nimport org.apache.hadoop.hbase.client.Put;\nimport org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;\nimport org.apache.hadoop.hbase.mapreduce.TableReducer;\nimport org.apache.hadoop.hbase.util.Bytes;\nimport org.apache.hadoop.io.LongWritable;\nimport org.apache.hadoop.io.NullWritable;\nimport org.apache.hadoop.io.Text;\nimport org.apache.hadoop.mapreduce.Job;\nimport org.apache.hadoop.mapreduce.Mapper;\nimport org.apache.hadoop.mapreduce.lib.input.FileInputFormat;\nimport org.apache.hadoop.util.Tool;\nimport org.apache.hadoop.util.ToolRunner;\n\nimport java.io.IOException;\n\n/**\n * 读取本地文件,上传至HBase\n */\npublic class FruitDriver implements Tool {\n\n // 定义Configuration\n Configuration conf = new Configuration();\n\n public int run(String[] args) throws Exception {\n\n // 获取Job方法\n Job job = Job.getInstance(conf);\n\n // 设置驱动类路径\n job.setJarByClass(FruitDriver.class);\n\n // 设置Mapper以及Mapper输出的KV类型\n job.setMapperClass(FruitMapper.class);\n job.setMapOutputKeyClass(LongWritable.class);\n job.setMapOutputValueClass(Text.class);\n\n // 设置Reducer\n TableMapReduceUtil.initTableReducerJob(\"fruit\", FruitReducer.class, job);\n\n // 设置最终输出数据的KV类型\n // 不用写\n\n\n // 设置输入输出参数\n FileInputFormat.setInputPaths(job, new Path(args[0]));\n\n // 提交任务\n boolean result = job.waitForCompletion(true);\n\n return result ? 0 : 1;\n }\n\n public void setConf(Configuration configuration) {\n conf = configuration;\n }\n\n public Configuration getConf() {\n return conf;\n }\n\n public static void main(String[] args) {\n\n\n try {\n Configuration conf = new Configuration();\n int run = ToolRunner.run(conf, new FruitDriver(), args);\n\n System.exit(run);\n } catch (Exception e) {\n e.printStackTrace();\n }\n }\n}\n\n\nclass FruitMapper extends Mapper<LongWritable, Text, LongWritable, Text> {\n @Override\n protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {\n\n context.write(key, value);\n }\n}\n\nclass FruitReducer extends TableReducer<LongWritable, Text, NullWritable> {\n @Override\n protected void reduce(LongWritable key, Iterable<Text> values, Context context) throws IOException, InterruptedException {\n\n for (Text value : values) {\n String[] fields = value.toString().split(\"\\t\");\n\n // 构建put对象\n Put put = new Put(Bytes.toBytes(fields[0]));\n put.addColumn(Bytes.toBytes(\"info\"), Bytes.toBytes(\"name\"), Bytes.toBytes(fields[1]));\n put.addColumn(Bytes.toBytes(\"info\"), Bytes.toBytes(\"color\"), Bytes.toBytes(fields[2]));\n\n context.write(NullWritable.get(), put);\n }\n }\n}"
},
{
"alpha_fraction": 0.554872453212738,
"alphanum_fraction": 0.5576391220092773,
"avg_line_length": 24.0230770111084,
"blob_id": "1ddcfdde09bebcc56cd9c73d55f3511d1d1461e0",
"content_id": "641c7dcce1a504f9787c4f4a627748abfe83b156",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 6972,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 260,
"path": "/HBase/src/main/java/com/ysir308/test/TestAPI.java",
"repo_name": "PowersYang/LearningBigdata",
"src_encoding": "UTF-8",
"text": "package com.ysir308.test;\n\n\nimport org.apache.hadoop.conf.Configuration;\nimport org.apache.hadoop.hbase.*;\nimport org.apache.hadoop.hbase.client.*;\nimport org.apache.hadoop.hbase.util.Bytes;\n\nimport java.io.IOException;\n\n/**\n * DDL\n * 1、创建命名空间\n * 2、表的增删\n * <p>\n * <p>\n * DML\n * 数据的增删改查\n */\npublic class TestAPI {\n\n private static Connection conn = null;\n private static Admin admin = null;\n\n static {\n try {\n\n Configuration conf = HBaseConfiguration.create();\n conf.set(\"hbase.zookeeper.quorum\", \"hadoop102,hadoop103,hadoop104\");\n\n conn = ConnectionFactory.createConnection(conf);\n admin = conn.getAdmin();\n\n } catch (IOException e) {\n e.printStackTrace();\n }\n }\n\n /**\n * 判断表是否存在\n *\n * @param tableName\n * @return\n * @throws IOException\n */\n public static boolean isTableExist(String tableName) throws IOException {\n\n boolean exists = admin.tableExists(TableName.valueOf(tableName));\n\n return exists;\n }\n\n /**\n * 创建表\n *\n * @param tableName 表名称\n * @param cfs 列族\n */\n public static void createTable(String tableName, String... cfs) throws IOException {\n\n // 判断是否存在列族信息\n if (cfs.length <= 0) {\n System.out.println(\"请设置列族信息!\");\n return;\n }\n\n // 判断表是否存在\n if (isTableExist(tableName)) {\n System.out.println(tableName + \" 表已存在!\");\n return;\n }\n\n // 表描述器\n HTableDescriptor hTableDescriptor = new HTableDescriptor(TableName.valueOf(tableName));\n\n // 循环添加列族信息\n for (String cf : cfs) {\n // 列族描述器\n HColumnDescriptor hColumnDescriptor = new HColumnDescriptor(cf);\n\n // 添加列族\n hTableDescriptor.addFamily(hColumnDescriptor);\n }\n\n\n // 创建表\n admin.createTable(hTableDescriptor);\n }\n\n /**\n * 删除表\n *\n * @param tableName\n */\n public static void dropTable(String tableName) throws IOException {\n\n if (!isTableExist(tableName)) {\n System.out.println(tableName + \" 表不存在!\");\n }\n\n admin.disableTable(TableName.valueOf(tableName));\n admin.deleteTable(TableName.valueOf(tableName));\n }\n\n /**\n * 创建命名空间\n *\n * @param namespace\n */\n public static void createNamespace(String namespace) {\n\n NamespaceDescriptor namespaceDescriptor = NamespaceDescriptor.create(namespace).build();\n\n try {\n\n admin.createNamespace(namespaceDescriptor);\n\n } catch (NamespaceExistException e) {\n // API没有提供专门的方法来判断namespace是否已经存在\n // 但是可以通过捕获异常来解决\n\n System.out.println(namespace + \" 命名空间已存在\");\n } catch (IOException e) {\n e.printStackTrace();\n }\n }\n\n /**\n * 插入数据\n *\n * @param tableName\n * @param rowKey\n * @param cf 列族\n * @param cn 列\n * @param value 值\n */\n public static void putData(String tableName, String rowKey, String cf, String cn, String value) throws IOException {\n Table table = conn.getTable(TableName.valueOf(tableName));\n\n Put put = new Put(Bytes.toBytes(rowKey));\n put.addColumn(Bytes.toBytes(cf), Bytes.toBytes(cn), Bytes.toBytes(value));\n\n // 添加多个列,如果需要添加多个rowKey的话就需要构造多个Put\n put.addColumn(Bytes.toBytes(cf), Bytes.toBytes(cn), Bytes.toBytes(value));\n\n table.put(put);\n\n table.close();\n }\n\n /**\n * 获取数据\n *\n * @param tableName\n * @param rowKey\n * @param cf\n * @param cn\n */\n public static void getData(String tableName, String rowKey, String cf, String cn) throws IOException {\n Table table = conn.getTable(TableName.valueOf(tableName));\n\n Get get = new Get(Bytes.toBytes(rowKey));\n get.addColumn(Bytes.toBytes(cf), Bytes.toBytes(cn));\n get.setMaxVersions(5);\n\n // 查询结果集\n Result result = table.get(get);\n\n if (!result.isEmpty()) {\n for (Cell cell : result.rawCells()) {\n System.out.println(\"-------------------------\");\n System.out.println(\"列族: \" + Bytes.toString(CellUtil.cloneFamily(cell)));\n System.out.println(\"列: \" + Bytes.toString(CellUtil.cloneQualifier(cell)));\n System.out.println(\"值: \" + Bytes.toString(CellUtil.cloneValue(cell)));\n }\n }\n\n table.close();\n }\n\n /**\n * 扫描全表\n *\n * @param tableName\n * @throws IOException\n */\n public static void scanTable(String tableName) throws IOException {\n\n Table table = conn.getTable(TableName.valueOf(tableName));\n\n Scan scan = new Scan();\n\n ResultScanner scanner = table.getScanner(scan);\n\n for (Result result : scanner) {\n for (Cell cell : result.rawCells()) {\n System.out.println(\"-------------------------\");\n System.out.println(\"RowKey: \" + Bytes.toString(CellUtil.cloneRow(cell)));\n System.out.println(\"列族: \" + Bytes.toString(CellUtil.cloneFamily(cell)));\n System.out.println(\"列: \" + Bytes.toString(CellUtil.cloneQualifier(cell)));\n System.out.println(\"值: \" + Bytes.toString(CellUtil.cloneValue(cell)));\n }\n }\n\n table.close();\n }\n\n\n /**\n * 删除数据\n *\n * @param tableName\n * @param rowKey\n * @param cf\n * @param cn\n * @throws IOException\n */\n public static void deleteData(String tableName, String rowKey, String cf, String cn) throws IOException {\n\n Table table = conn.getTable(TableName.valueOf(tableName));\n\n Delete delete = new Delete(Bytes.toBytes(rowKey));\n // 在生产环境中尽量使用addColumns()方法,addColumn()方法慎用\n // 不然会出现一些很诡异的现象\n delete.addColumns(Bytes.toBytes(cf), Bytes.toBytes(cn));\n\n table.delete(delete);\n\n table.close();\n }\n\n public static void close() {\n\n if (admin != null) {\n try {\n admin.close();\n } catch (IOException e) {\n e.printStackTrace();\n }\n }\n\n if (conn != null) {\n try {\n conn.close();\n } catch (IOException e) {\n e.printStackTrace();\n }\n }\n\n }\n\n public static void main(String[] args) throws IOException {\n\n System.out.println(isTableExist(\"student\"));\n\n createTable(\"student\", \"info1\", \"info2\");\n\n close();\n }\n}\n"
},
{
"alpha_fraction": 0.6514936089515686,
"alphanum_fraction": 0.6642958521842957,
"avg_line_length": 21.677419662475586,
"blob_id": "c367ab17a37a4b309215439ff5b9c85ec6c6bef8",
"content_id": "eccf201dba94f3ad65bc8150f6e73fb09f2fadd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 703,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 31,
"path": "/Spark/spark_streaming/demo2.py",
"repo_name": "PowersYang/LearningBigdata",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\n\nos.environ['PYSPARK_PYTHON'] = '/usr/local/bin/python3'\n\n\ndef updateFunc(new_values, last_sum):\n if last_sum is None:\n last_sum = 0\n return sum(new_values) + (last_sum or 0)\n\n\nif __name__ == '__main__':\n sc = SparkContext('local[*]', 'WordCount')\n ssc = StreamingContext(sc, 5)\n ssc.checkpoint('checkpoint')\n\n lines = ssc.socketTextStream('localhost', 9999)\n\n words = lines.flatMap(lambda line: line.split(' '))\n\n pairs = words.map(lambda word: (word, 1))\n\n word_counts = pairs.updateStateByKey(updateFunc)\n\n word_counts.pprint()\n\n ssc.start()\n ssc.awaitTermination()\n"
},
{
"alpha_fraction": 0.7128602862358093,
"alphanum_fraction": 0.7161862254142761,
"avg_line_length": 21.549999237060547,
"blob_id": "1c2acb43232251c4b3742cdb9c6ca11cb4326210",
"content_id": "536118d2ac9a75638ab271a5a1ea5b68faeb2903",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 926,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 40,
"path": "/Kafka/src/main/java/com/ysir308/interceptor/TimeInterceptor.java",
"repo_name": "PowersYang/LearningBigdata",
"src_encoding": "UTF-8",
"text": "package com.ysir308.interceptor;\n\nimport org.apache.kafka.clients.producer.ProducerInterceptor;\nimport org.apache.kafka.clients.producer.ProducerRecord;\nimport org.apache.kafka.clients.producer.RecordMetadata;\n\nimport java.util.Map;\n\n/**\n * 在传入的数据中添加时间戳\n */\npublic class TimeInterceptor implements ProducerInterceptor {\n\n\n @Override\n public void configure(Map<String, ?> map) {\n\n }\n\n @Override\n public ProducerRecord onSend(ProducerRecord producerRecord) {\n long timeStamp = System.currentTimeMillis();\n\n String value = producerRecord.value().toString();\n\n ProducerRecord newRecord = new ProducerRecord(producerRecord.topic(), producerRecord.partition(), timeStamp + \",\" + value);\n return newRecord;\n }\n\n @Override\n public void onAcknowledgement(RecordMetadata recordMetadata, Exception e) {\n\n }\n\n @Override\n public void close() {\n\n }\n\n}\n"
},
{
"alpha_fraction": 0.6722689270973206,
"alphanum_fraction": 0.6776164770126343,
"avg_line_length": 29.78823471069336,
"blob_id": "61ee4590f63c4e6e28580f5e2c4fc1caafdc484a",
"content_id": "ae74a82f48790ba50a7fbc23f68fddf11f26f891",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 2680,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 85,
"path": "/Hadoop/src/main/java/com/ysir308/flowcount/FlowCount.java",
"repo_name": "PowersYang/LearningBigdata",
"src_encoding": "UTF-8",
"text": "package com.ysir308.flowcount;\n\nimport org.apache.hadoop.conf.Configuration;\nimport org.apache.hadoop.fs.Path;\nimport org.apache.hadoop.io.LongWritable;\nimport org.apache.hadoop.io.Text;\nimport org.apache.hadoop.mapreduce.Job;\nimport org.apache.hadoop.mapreduce.Mapper;\nimport org.apache.hadoop.mapreduce.Reducer;\nimport org.apache.hadoop.mapreduce.lib.input.FileInputFormat;\nimport org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;\n\nimport java.io.IOException;\n\npublic class FlowCount {\n public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {\n Configuration conf = new Configuration();\n Job job = Job.getInstance(conf);\n\n job.setJarByClass(FlowCount.class);\n\n job.setMapperClass(FlowMapper.class);\n job.setReducerClass(FlowReduce.class);\n\n job.setMapOutputKeyClass(Text.class);\n job.setMapOutputValueClass(FlowBean.class);\n\n job.setOutputKeyClass(Text.class);\n job.setOutputValueClass(FlowBean.class);\n\n // 设置自定义分区规则\n job.setPartitionerClass(ProvincePartitioner.class);\n // 自定义分区规则必须设置NumReduceTasks,默认为1\n job.setNumReduceTasks(5);\n\n FileInputFormat.setInputPaths(job, new Path(args[0]));\n FileOutputFormat.setOutputPath(job, new Path(args[1]));\n\n boolean result = job.waitForCompletion(true);\n\n System.exit(result ? 0 : 1);\n }\n}\n\n\nclass FlowMapper extends Mapper<LongWritable, Text, Text, FlowBean> {\n\n Text k = new Text();\n FlowBean v = new FlowBean();\n\n @Override\n protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {\n String line = value.toString();\n String[] fields = line.split(\"\\t\");\n\n k.set(fields[1]); // 手机号\n\n long upFlow = Long.parseLong(fields[fields.length - 3]); // 上行流量\n long downFlow = Long.parseLong(fields[fields.length - 2]);\n\n v.setUpFlow(upFlow);\n v.setDownFlow(downFlow);\n// v.setSumFlow(upFlow, downFlow);\n\n context.write(k, v);\n }\n}\n\nclass FlowReduce extends Reducer<Text, FlowBean, Text, FlowBean> {\n @Override\n protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws IOException, InterruptedException {\n long sum_upFlow = 0;\n long sum_downFlow = 0;\n\n for (FlowBean flowBean : values) {\n sum_upFlow += flowBean.getUpFlow();\n sum_downFlow += flowBean.getDownFlow();\n }\n\n FlowBean v = new FlowBean();\n v.set(sum_upFlow, sum_downFlow);\n\n context.write(key, v);\n }\n}\n\n"
},
{
"alpha_fraction": 0.7233009934425354,
"alphanum_fraction": 0.7346278429031372,
"avg_line_length": 22.80769157409668,
"blob_id": "a1ead01aa11c9e494d9331acbcbcc175d6091536",
"content_id": "c4dd175daee284a4bd3b4306649d928cdc3c147b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 628,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 26,
"path": "/Spark/spark_streaming/demo.py",
"repo_name": "PowersYang/LearningBigdata",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\n\nos.environ['PYSPARK_PYTHON'] = '/usr/local/bin/python3'\n\nsc = SparkContext('local[*]', 'WordCount')\nssc = StreamingContext(sc, 1)\n\nlines = ssc.socketTextStream('localhost', 9999)\n\n# 更换数据源\n# lines = ssc.textFileStream(\"../in/streaming\")\n\nwords = lines.flatMap(lambda line: line.split(' '))\n\npairs = words.map(lambda word: (word, 3))\n\nword_counts = pairs.reduceByKey(lambda x, y: x + y)\n\n# Print the first ten elements of each RDD generated in this DStream to the console\nword_counts.pprint()\n\nssc.start()\nssc.awaitTermination()"
},
{
"alpha_fraction": 0.6564383506774902,
"alphanum_fraction": 0.6772602796554565,
"avg_line_length": 29.93220329284668,
"blob_id": "25929ed06788206bd6973228e9f2df16c0218098",
"content_id": "ac03c4e9c232780a05ea7720fff3f8052ebdf402",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1961,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 59,
"path": "/Kafka/src/main/java/com/ysir308/producer/IntercetorProducer.java",
"repo_name": "PowersYang/LearningBigdata",
"src_encoding": "UTF-8",
"text": "package com.ysir308.producer;\n\nimport org.apache.kafka.clients.producer.KafkaProducer;\nimport org.apache.kafka.clients.producer.ProducerConfig;\nimport org.apache.kafka.clients.producer.ProducerRecord;\n\nimport java.util.ArrayList;\nimport java.util.Properties;\n\npublic class IntercetorProducer {\n public static void main(String[] args) {\n\n\n // 创建Kafka生产者的配置信息\n Properties properties = new Properties();\n\n // 指定连接的集群\n properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, \"hadoop102:9092\");\n\n // 指定ACK应答级别\n properties.put(ProducerConfig.ACKS_CONFIG, \"all\");\n\n // 重试次数\n properties.put(ProducerConfig.RETRIES_CONFIG, 1);\n\n // 批次大小 16k\n properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);\n\n // 等待时间\n properties.put(ProducerConfig.LINGER_MS_CONFIG, 1);\n\n // RecordAccumulator缓冲区大小 32M\n properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);\n\n properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, \"org.apache.kafka.common.serialization.StringSerializer\");\n properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, \"org.apache.kafka.common.serialization.StringSerializer\");\n\n // 添加拦截器,注意添加顺序\n ArrayList<String> interceptors = new ArrayList<>();\n interceptors.add(\"com.ysir308.interceptor.TimeInterceptor\");\n interceptors.add(\"com.ysir308.interceptor.CounterInterceptor\");\n properties.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, interceptors);\n\n\n // 创建生产者对象\n KafkaProducer<String, String> producer = new KafkaProducer<>(properties);\n\n for (int i = 0; i < 10; i++) {\n\n // 发送数据\n producer.send(new ProducerRecord<String, String>(\"first\", \"ysir \" + i));\n }\n\n\n // 关闭资源\n producer.close();\n\n }\n}\n"
},
{
"alpha_fraction": 0.7248533964157104,
"alphanum_fraction": 0.7280108332633972,
"avg_line_length": 30.239437103271484,
"blob_id": "37766f3987f3b104bbf14470e443c7b241a0d1ac",
"content_id": "4613418cfb05710276cc616779dee79144e3756d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 2217,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 71,
"path": "/Hadoop/src/main/java/com/ysir308/kvtextinputformat/KeyValueText.java",
"repo_name": "PowersYang/LearningBigdata",
"src_encoding": "UTF-8",
"text": "package com.ysir308.kvtextinputformat;\n\nimport org.apache.hadoop.conf.Configuration;\nimport org.apache.hadoop.fs.Path;\nimport org.apache.hadoop.io.LongWritable;\nimport org.apache.hadoop.io.Text;\nimport org.apache.hadoop.mapreduce.Job;\nimport org.apache.hadoop.mapreduce.Mapper;\nimport org.apache.hadoop.mapreduce.Reducer;\nimport org.apache.hadoop.mapreduce.lib.input.FileInputFormat;\nimport org.apache.hadoop.mapreduce.lib.input.KeyValueLineRecordReader;\nimport org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;\nimport org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;\n\nimport java.io.IOException;\n\n\npublic class KeyValueText {\n public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {\n Configuration conf = new Configuration();\n conf.set(KeyValueLineRecordReader.KEY_VALUE_SEPERATOR, \" \");\n\n Job job = Job.getInstance(conf);\n\n job.setJarByClass(KeyValueText.class);\n job.setMapperClass(KeyValueMapper.class);\n job.setReducerClass(KeyValueReducer.class);\n\n job.setMapOutputKeyClass(Text.class);\n job.setMapOutputValueClass(LongWritable.class);\n\n job.setOutputKeyClass(Text.class);\n job.setOutputValueClass(LongWritable.class);\n\n FileInputFormat.setInputPaths(job, new Path(args[0]));\n job.setInputFormatClass(KeyValueTextInputFormat.class);\n\n FileOutputFormat.setOutputPath(job, new Path(args[1]));\n\n job.waitForCompletion(true);\n }\n}\n\nclass KeyValueMapper extends Mapper<Text, Text, Text, LongWritable> {\n\n LongWritable v = new LongWritable(1);\n\n @Override\n protected void map(Text key, Text value, Context context) throws IOException, InterruptedException {\n\n context.write(key, v);\n\n }\n}\n\n\nclass KeyValueReducer extends Reducer<Text, LongWritable, Text, LongWritable> {\n LongWritable v = new LongWritable();\n\n @Override\n protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {\n long sum = 0l;\n for (LongWritable value : values) {\n sum += value.get();\n }\n\n v.set(sum);\n\n context.write(key, v);\n }\n}"
},
{
"alpha_fraction": 0.5567010045051575,
"alphanum_fraction": 0.5956472158432007,
"avg_line_length": 24.676469802856445,
"blob_id": "c6ef47415aa971939703d55d44d73c3c85559d03",
"content_id": "9d53777519972b5d99a5bda6c6d0f3b9005e48a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 995,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 34,
"path": "/Hadoop/src/main/java/com/ysir308/flowcount/ProvincePartitioner.java",
"repo_name": "PowersYang/LearningBigdata",
"src_encoding": "UTF-8",
"text": "package com.ysir308.flowcount;\n\nimport org.apache.hadoop.io.Text;\nimport org.apache.hadoop.mapreduce.Partitioner;\n\npublic class ProvincePartitioner extends Partitioner<Text, FlowBean> {\n\n /**\n * 按照手机号前缀分区\n * 将136、137、138、139开头的手机号分别统计到不同的文件中\n */\n @Override\n public int getPartition(Text key, FlowBean value, int numPartitions) {\n // key和value分别是map阶段的输出\n // key是手机号,value是流量信息(FlowBean对象)\n\n // 获取手机号前三位\n String prePhoneNum = key.toString().substring(0, 3);\n\n int partition = 4;\n\n if (\"136\".equals(prePhoneNum)) {\n partition = 0;\n } else if (\"137\".equals(prePhoneNum)) {\n partition = 1;\n } else if (\"138\".equals(prePhoneNum)) {\n partition = 2;\n } else if (\"139\".equals(prePhoneNum)) {\n partition = 3;\n }\n\n return partition;\n }\n}\n"
}
] | 10 |
EiTamOnya/fasta-manipulation-scripts | https://github.com/EiTamOnya/fasta-manipulation-scripts | 5599502e42f79e219f49a71516f183dd5e130362 | 78b9beac60dff6fac5669dbee1a65c6f9ff57492 | 0f63312bf547e2b62d95d8381b7c1739950e2036 | refs/heads/master | 2023-03-08T01:18:53.891986 | 2021-02-19T08:35:03 | 2021-02-19T08:35:03 | 338,785,524 | 0 | 0 | null | 2021-02-14T11:04:06 | 2021-02-14T11:21:33 | 2021-02-19T08:35:03 | Python | [
{
"alpha_fraction": 0.6218140721321106,
"alphanum_fraction": 0.6274362802505493,
"avg_line_length": 30.75,
"blob_id": "301063a67fd39dc840641efda06cdb99a05bdd8a",
"content_id": "12997b186c3749318091ecffcac21e78cc052b04",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2668,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 84,
"path": "/replace_header.py",
"repo_name": "EiTamOnya/fasta-manipulation-scripts",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport argparse\nimport glob\nimport os\nimport sys\nimport re\nfrom tqdm import tqdm\n\n\n# get the flags and parse them\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dir\", help=\"Directory in which the files are located\",\n action=\"store\", default=None)\nparser.add_argument(\"-o\", \"--output\", \n help=\"Directory in which the new files are created\",\n action=\"store\", default='output_script_results')\nparser.add_argument(\"-d\", \n help=\"Enter a delimiter, ex: '-d -'\",\n action=\"store\", default='_')\nparser.add_argument(\"-r\", \n help=\"Enter a range, ex: '-r 1-4'\",\n action=\"store\", default='2-4')\nargs = parser.parse_args()\n\n# directory will be overwritten later\ndirectory = ''\noutput_folder = args.output\ndelimeter = args.d\nmy_range = args.r\nsub_str = re.compile(r'X{1,}')\n\n# use to exit when there's an exception or the end of the script\ndef exit_message(number):\n input('\\nPress any key to exit!')\n sys.exit(number)\n\ndef replace_x(matchobj):\n return 'N' * len(matchobj.group(0))\n\n# check if the user has input a directory\nif args.dir is None:\n directory = input('Please input the file directory: ')\nelse:\n directory = args.dir\n\n# check if the input directory is valid\ntry:\n os.chdir(directory)\nexcept Exception as ex:\n print('Not a valid directory\\n', ex)\n exit_message(1)\n\n# check if the output folder already exists and/or can be created\nif os.path.isdir(output_folder) is False:\n try:\n os.mkdir(output_folder)\n except Exception as ex:\n print('Cannot create output directory!\\n', ex)\n exit_message(1)\n\n# get all files\nfiles = glob.glob('*.fa')\nlines = []\n\n# get the start and end of the range\nranges = my_range.split('-')\n\n# iterate the files and also use tqdm for the progress bar\nfor file in tqdm(files, desc=\"Processing...\", colour='#cc4722'):\n # split the file in an array and join only the required parts\n new_name = delimeter.join(file.split(delimeter)[int(ranges[0]):int(ranges[1])]).replace('.fa', '')\n try:\n # read the file contents and replace the first line\n with open(file, 'r') as rfile:\n lines = rfile.readlines()\n lines[0] = '>' + new_name + '\\n'\n rfile.close()\n read_data = re.sub(sub_str, repl=replace_x, string=''.join(lines))\n # write the amended contents to a new file\n with open(f'{output_folder}/{new_name}.fa', 'w') as wfile:\n wfile.write(read_data)\n wfile.close()\n except Exception as ex:\n print(f'\\nCannot process file - {file}\\n', ex)\n\n"
},
{
"alpha_fraction": 0.3076923191547394,
"alphanum_fraction": 0.6153846383094788,
"avg_line_length": 12,
"blob_id": "b7ae0d6ae1f26e840cda9974b174d1126a4abb4a",
"content_id": "d15f9c52a758c725b3605e78d480000936a45ca1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 13,
"license_type": "no_license",
"max_line_length": 12,
"num_lines": 1,
"path": "/requirements.txt",
"repo_name": "EiTamOnya/fasta-manipulation-scripts",
"src_encoding": "UTF-8",
"text": "tqdm==4.56.2\n"
},
{
"alpha_fraction": 0.5805425643920898,
"alphanum_fraction": 0.5840972661972046,
"avg_line_length": 28.207651138305664,
"blob_id": "be4bfffff03366f8c647ede275d5d9b7254d1849",
"content_id": "947abc0a1024e49fa4dcd43f20ccb19fbe0dbdd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5345,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 183,
"path": "/replace_n.py",
"repo_name": "EiTamOnya/fasta-manipulation-scripts",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport argparse\nimport glob\nimport os\nimport sys\nimport re\nfrom tqdm import tqdm\n\n# set as global var so the num_contig() can use it\ncount = 0\n\ndef main():\n global count\n replacement = None\n occurances = dict()\n nfile, output, nflag, directory, stat = get_cli_args()\n\n # check if the user has input the -n flag\n if nflag:\n replacement = return_n\n else:\n replacement = num_contig\n\n # check if the user has input the -d flag\n if directory:\n change_dir(directory)\n create_output(output)\n files = glob.glob('*.fasta')\n print(f'Processing {len(files)} files: ', end='')\n for i, file in enumerate(tqdm(files, desc=\"Processing...\", colour='#cc4722')):\n # make sure the counter is 0 before each file\n count = 0\n # get the number of occurances in file and put it in a dict\n occurance = str(\n replace_file_contents(file, replacement, output))\n occurances[file] = occurance\n else:\n create_output(output)\n occurance = str(replace_file_contents(nfile, replacement, output))\n occurances[nfile] = occurance\n\n # check if the user has requested to see the stats\n if stat:\n print('\\n\\nStats:\\n', end='')\n print('File | Occurances ')\n for occ in occurances:\n print(occ + ' | ' + occurances[occ])\n else:\n print('\\nDone!')\n\ndef get_cli_args():\n \"\"\"Get the cli args, parse and\n return them as vars.\n\n Returns:\n nfile (str): path to the file\n output (str): path to output directory\n nflag (bool): flag for replacement text\n directory (str): path to file directory\n stat (bool): flag for stats\n \"\"\"\n # get the flags and parse them\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--file\", help=\"Provide only one file\",\n action=\"store\", default=None)\n parser.add_argument(\"-o\", \"--output\", \n help=\"A directory which will store the new file/s\",\n action=\"store\", default='output_script_results')\n parser.add_argument(\"-n\", \n help=\"Use 10k x N as the replacement text\",\n action=\"store_true\", default=None)\n parser.add_argument(\"-d\", \"--dir\", \n help=\"Provide a directory of files\",\n action=\"store\", default=None)\n parser.add_argument(\"-s\", \"--stat\", \n help=\"Show number of changes per file\",\n action=\"store_true\", default=None)\n args = parser.parse_args()\n\n # store the values from the flags in vars\n nfile = args.file\n output = args.output\n nflag = args.n\n directory = args.dir\n stat = args.stat\n\n return nfile, output, nflag, directory, stat\n\n\ndef num_contig(matchobj):\n \"\"\"Get the match object and use a global counter\n to increment the replacement text.\n\n Args:\n matchobj (object): match from a regex function\n\n Returns:\n str: the replacement text\n \"\"\"\n global count\n count += 1\n return f'\\n>Contig_new_{count}\\n'\n\n\ndef return_n(matchobj):\n return 'N' * 10000\n\n\ndef replace_file_contents(my_file, replacement, output):\n \"\"\"Load the file contents, replace the text based on pattern\n and write the new contents to a new file.\n\n Args:\n my_file (str): path to a file\n replacement (func): the replacement text\n output (str): the output folder\n\n Returns:\n int: number of occurances of the pattern\n \"\"\"\n # this will catch groups of two or more Ns\n sub_str = re.compile(r'N{2,}')\n\n read_data = ''\n\n # change the new file name, remove any directories if only one file\n new_file = my_file.split('/')[-1].replace('fasta', 'fa')\n \n # try to open the file and load all the data in memory\n try:\n with open(my_file, 'r') as rf:\n read_data = rf.read()\n rf.close()\n except Exception as ex:\n print('Couldn\\'t open file\\n', ex)\n sys.exit()\n\n # iterate the data and replace the substring with the new string\n read_data = re.subn(sub_str, repl=replacement, string=read_data)\n \n # write to a file from memory\n try:\n with open(f'{output}/{new_file}', 'w') as wf:\n wf.write(read_data[0])\n wf.close()\n except Exception as ex:\n print('Couldn\\'t write to file\\n', ex)\n sys.exit()\n \n # return the number of changes per file\n return read_data[1]\n\ndef create_output(output):\n \"\"\"Create the output folder\n check if it already exists first.\n\n Args:\n output (str): path to the output folder\n \"\"\"\n # check if the output folder already exists and/or can be created\n if os.path.isdir(output) is False:\n try:\n os.mkdir(output)\n except Exception as ex:\n print('Cannot create output directory!\\n', ex)\n sys.exit()\n\n\ndef change_dir(directory):\n \"\"\"Navigate to the provided directory\n check if it's a valid directory.\n\n Args:\n directory (str): path to the directory\n \"\"\"\n try:\n os.chdir(directory)\n except Exception as ex:\n print('Not a valid directory\\n', ex)\n sys.exit()\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.7184466123580933,
"alphanum_fraction": 0.7297734618186951,
"avg_line_length": 23.719999313354492,
"blob_id": "02ec37dbca2ee99255031ff89f5de23e8c66208a",
"content_id": "e42530497c4d25e7e443305ee9dc59c3c7600e67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 618,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 25,
"path": "/README.md",
"repo_name": "EiTamOnya/fasta-manipulation-scripts",
"src_encoding": "UTF-8",
"text": "## Fasta manipulation scripts\n\n### Requirements\nPython 3.8 is required to run the scripts\n\nPlease run the below:\n\n```\npython -m pip install -r requirements.txt\n```\n\n### Instructions\nRun the below in order to get more information about a script\n\n```\n ./repacle_header.py --help\n ./replace_n.py --help\n```\n\n### Usage\n`replace_header.py` will replace the header in a .fa file based on the file name\nand the selected delimiter and range, it will also change any Xs to Ns inside the file\n\n`replace_n.py` will repace all occurances of two or more Ns in a .fasta file with\na new header or a 10 000 Ns based on the user input\n"
}
] | 4 |
Vlada04/bst | https://github.com/Vlada04/bst | 8550f87cd0b38c2a43203c084be8df4bbb48e7ed | bda30f5e5bf55426c996b49f018aed5e9919e169 | 59942218d114fc6084d3f811e935646c6d80db36 | refs/heads/main | 2023-04-22T23:39:11.344602 | 2021-05-12T07:10:35 | 2021-05-12T07:10:35 | 366,443,985 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4994778037071228,
"alphanum_fraction": 0.5080940127372742,
"avg_line_length": 29.00261116027832,
"blob_id": "fb116cf1f0410d3c215b31c8e9c5734b2d557269",
"content_id": "23322778a53f8440cf34eb6257d2beec8b166c97",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11490,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 383,
"path": "/linkedbst.py",
"repo_name": "Vlada04/bst",
"src_encoding": "UTF-8",
"text": "'''\nModule represents binary search tree\n'''\nimport random\nimport time\nfrom abstractcollection import AbstractCollection\nfrom bstnode import BSTNode\nfrom linkedstack import LinkedStack\nfrom math import log\n\nclass LinkedBST(AbstractCollection):\n \"\"\"An link-based binary search tree implementation.\"\"\"\n\n def __init__(self, sourceCollection=None):\n \"\"\"Sets the initial state of self, which includes the\n contents of sourceCollection, if it's present.\"\"\"\n self._root = None\n AbstractCollection.__init__(self, sourceCollection)\n\n def __str__(self):\n \"\"\"Returns a string representation with the tree rotated\n 90 degrees counterclockwise.\"\"\"\n\n def recurse(node, level):\n string = \"\"\n if node != None:\n string += recurse(node.right, level + 1)\n string += \"| \" * level\n string += str(node.data) + \"\\n\"\n string += recurse(node.left, level + 1)\n return string\n\n return recurse(self._root, 0)\n\n def __iter__(self):\n \"\"\"Supports a preorder traversal on a view of self.\"\"\"\n if not self.isEmpty():\n stack = LinkedStack()\n stack.push(self._root)\n while not stack.isEmpty():\n node = stack.pop()\n yield node.data\n if node.right != None:\n stack.push(node.right)\n if node.left != None:\n stack.push(node.left)\n\n def preorder(self):\n \"\"\"Supports a preorder traversal on a view of self.\"\"\"\n return None\n\n def inorder(self):\n \"\"\"Supports an inorder traversal on a view of self.\"\"\"\n lyst = list()\n\n def recurse(node):\n if node != None:\n recurse(node.left)\n lyst.append(node.data)\n recurse(node.right)\n\n recurse(self._root)\n return iter(lyst)\n\n def postorder(self):\n \"\"\"Supports a postorder traversal on a view of self.\"\"\"\n return None\n\n def levelorder(self):\n \"\"\"Supports a levelorder traversal on a view of self.\"\"\"\n return None\n\n def __contains__(self, item):\n \"\"\"Returns True if target is found or False otherwise.\"\"\"\n return self.find(item) != None\n\n def find(self, item):\n \"\"\"If item matches an item in self, returns the\n matched item, or None otherwise.\"\"\"\n\n def recurse(node):\n if node is None:\n return None\n elif item == node.data:\n return node.data\n elif item < node.data:\n return recurse(node.left)\n else:\n return recurse(node.right)\n\n return recurse(self._root)\n\n def clear(self):\n \"\"\"Makes self become empty.\"\"\"\n self._root = None\n self._size = 0\n\n def add(self, item):\n \"\"\"Adds item to the tree.\"\"\"\n\n # Helper function to search for item's position\n def recurse(node):\n # New item is less, go left until spot is found\n if item < node.data:\n if node.left == None:\n node.left = BSTNode(item)\n else:\n recurse(node.left)\n # New item is greater or equal,\n # go right until spot is found\n elif node.right == None:\n node.right = BSTNode(item)\n else:\n recurse(node.right)\n # End of recurse\n\n # Tree is empty, so new item goes at the root\n if self.isEmpty():\n self._root = BSTNode(item)\n # Otherwise, search for the item's spot\n else:\n recurse(self._root)\n self._size += 1\n\n def remove(self, item):\n \"\"\"Precondition: item is in self.\n Raises: KeyError if item is not in self.\n postcondition: item is removed from self.\"\"\"\n if not item in self:\n raise KeyError(\"Item not in tree.\"\"\")\n\n # Helper function to adjust placement of an item\n def lift_max_to_top(top):\n # Replace top's datum with the maximum datum in the left subtree\n # Pre: top has a left child\n # Post: the maximum node in top's left subtree\n # has been removed\n # Post: top.data = maximum value in top's left subtree\n parent = top\n current_node = top.left\n while not current_node.right == None:\n parent = current_node\n current_node = current_node.right\n top.data = current_node.data\n if parent == top:\n top.left = current_node.left\n else:\n parent.right = current_node.left\n\n # Begin main part of the method\n if self.isEmpty(): return None\n\n # Attempt to locate the node containing the item\n item_removed = None\n pre_root = BSTNode(None)\n pre_root.left = self._root\n parent = pre_root\n direction = 'L'\n current_node = self._root\n while not current_node == None:\n if current_node.data == item:\n item_removed = current_node.data\n break\n parent = current_node\n if current_node.data > item:\n direction = 'L'\n current_node = current_node.left\n else:\n direction = 'R'\n current_node = current_node.right\n\n # Return None if the item is absent\n if item_removed == None: return None\n\n # The item is present, so remove its node\n\n # Case 1: The node has a left and a right child\n # Replace the node's value with the maximum value in the\n # left subtree\n # Delete the maximium node in the left subtree\n if not current_node.left == None \\\n and not current_node.right == None:\n lift_max_to_top(current_node)\n else:\n\n # Case 2: The node has no left child\n if current_node.left == None:\n new_child = current_node.right\n\n # Case 3: The node has no right child\n else:\n new_child = current_node.left\n\n # Case 2 & 3: Tie the parent to the new child\n if direction == 'L':\n parent.left = new_child\n else:\n parent.right = new_child\n\n # All cases: Reset the root (if it hasn't changed no harm done)\n # Decrement the collection's size counter\n # Return the item\n self._size -= 1\n if self.isEmpty():\n self._root = None\n else:\n self._root = pre_root.left\n return item_removed\n\n def replace(self, item, new_item):\n \"\"\"\n If item is in self, replaces it with new_item and\n returns the old item, or returns None otherwise.\"\"\"\n probe = self._root\n while probe != None:\n if probe.data == item:\n old_data = probe.data\n probe.data = new_item\n return old_data\n elif probe.data > item:\n probe = probe.left\n else:\n probe = probe.right\n return None\n\n def height(self):\n '''\n Return the height of tree\n :return: int\n '''\n\n def height1(top):\n '''\n Helper function\n :param top: any\n :return: int\n '''\n if top is None:\n return -1\n else:\n return 1 + max(height1(top.left), height1(top.right))\n return height1(self._root)\n\n def is_balanced(self):\n '''\n Return True if tree is balanced\n :return: bool\n '''\n formula = (2 * log(self._size + 1, 2)) - 1\n if self.height() < formula:\n return True\n else:\n return False\n\n def range_find(self, low, high):\n '''\n Returns a list of the items in the tree, where low <= item <= high.\"\"\"\n :param low: int\n :param high: int\n :return: list\n '''\n lst = []\n result_lst = []\n for elem in self:\n lst.append(elem)\n for elem in sorted(lst):\n if elem >= low and elem <= high:\n result_lst.append(elem)\n return result_lst\n\n def rebalance(self):\n '''\n Rebalances the tree.\n :return: LinkedBST()\n '''\n lst = []\n for elem in self:\n lst.append(elem)\n lstt = sorted(lst)\n self.clear()\n def rebalance1(lst):\n if not lst:\n return None\n mid = (len(lst)) // 2\n self.add(lst[mid])\n rebalance1(lst[:mid])\n rebalance1(lst[mid+1:])\n return self\n rebalance1(lstt)\n\n\n def successor(self, item):\n \"\"\"\n Returns the smallest item that is larger than\n item, or None if there is no such item.\n :param item: int\n :type item: any\n :return: int\n \"\"\"\n lst = []\n for elem in self:\n if elem > item:\n lst.append(elem)\n if len(lst) == 0:\n return None\n else:\n return min(lst)\n\n def predecessor(self, item):\n \"\"\"\n Returns the largest item that is smaller than\n item, or None if there is no such item.\n :param item: int\n :type item: any\n :return: int\n \"\"\"\n lst = []\n for elem in self:\n if elem < item:\n lst.append(elem)\n if len(lst) == 0:\n return None\n else:\n return max(lst)\n\n\n def demo_bst(self, path):\n \"\"\"\n Demonstration of efficiency binary search tree for the search tasks.\n :param path: path to file\n :type path: str\n :return: string with time of search\n :rtype: str\n \"\"\"\n lst = []\n with open(path, 'r') as file:\n for i in file:\n lst.append(i[:-1])\n print(\"Please wait...\")\n start_time1 = time.time()\n small_lst = []\n for _ in range(10000):\n small_lst.append(random.choice(lst))\n \n for i in small_lst:\n i in lst\n time1 = time.time() - start_time1\n\n start_time2 = time.time()\n bst = LinkedBST()\n for word in lst[:900]:\n bst.add(word)\n\n for _ in range(10000):\n bst.find(random.choice(lst[:900]))\n time2 = time.time() - start_time2\n\n start_time3 = time.time()\n tree = LinkedBST()\n for _ in range(len(lst)):\n tree.add(random.choice(lst))\n \n for _ in range(10000):\n tree.find(random.choice(lst))\n time3 = time.time() - start_time3\n\n start_time4 = time.time()\n tree.rebalance()\n\n for _ in range(10000):\n tree.find(random.choice(lst))\n time4 = time.time() - start_time4\n\n return f\"Find 10000 words from list - {time1} sec.\\n\\\nFind 10000 words from binary tree (sorted in alphabetic order) - {time2} sec.\\n\\\nFind 10000 words from binary tree (random order) - {time3} sec.\\n\\\nFind 10000 words from balanced binary tree - {time4} sec.\"\n\nif __name__ == \"__main__\":\n bst = LinkedBST()\n for i in [1, 2, 3, 4, 5]:\n bst.add(i)\n print(bst.demo_bst(\"words.txt\"))"
}
] | 1 |
D-shaibi/Path-3 | https://github.com/D-shaibi/Path-3 | a39a25f1b4bbf9f3a82a95deeb10ee94f6096476 | 022ceb08953006e56026a55e2bbf601726222e71 | 04e4d8cd5d01fd8e8500090c2d351e83616da536 | refs/heads/master | 2022-11-09T10:42:44.615234 | 2020-06-28T16:43:58 | 2020-06-28T16:43:58 | 272,958,776 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7753623127937317,
"alphanum_fraction": 0.8007246255874634,
"avg_line_length": 33.375,
"blob_id": "a4d0ebcac74ee083b16912393ff8570ce0a0da1f",
"content_id": "9535f0b48ee4706fdd001adc98ef21ff2e950d95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 276,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 8,
"path": "/Reading_URL.py",
"repo_name": "D-shaibi/Path-3",
"src_encoding": "UTF-8",
"text": "from bs4 import BeautifulSoup\nimport requests\n\nurl= \"https://www.theguardian.com/environment/2020/jun/26/leading-scientist-criticises-uk-over-its-climate-record\"\ndata= requests.get(url).text\ndata_storage= BeautifulSoup(data, features=\"html.parser\").text\n\nprint(data_storage)\n\n"
},
{
"alpha_fraction": 0.5764119625091553,
"alphanum_fraction": 0.6428571343421936,
"avg_line_length": 27.66666603088379,
"blob_id": "a7ac97b66cae6dfa4105b9924f6faf08dd046acf",
"content_id": "6da6d74cac174a8bbb07b083e4a412e92ea51523",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 602,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 21,
"path": "/Face_Detection.py",
"repo_name": "D-shaibi/Path-3",
"src_encoding": "UTF-8",
"text": "import cv2\nimport numpy\n\n# downloaded the file from github\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nvideocapture = cv2.VideoCapture(0)\n\nwhile True:\n ret, pic = videocapture.read()\n faces= face_cascade.detectMultiScale(pic, 1.5, 4)\n for (x, y, w, h) in faces:\n cv2.circle(pic,(int(x+w/2),int(y+h/2)),int(h/2),(255,255,255),2)\n cv2.putText(pic,'person',(x,y), cv2.FONT_HERSHEY_TRIPLEX, 2, (255,255,255), 2)\n \n cv2.imshow('face_detection', pic)\n k= cv2.waitKey(30) & 0xff\n if k == ord('q'):\n break\n \ncv2.destroyAllWindows()\n"
},
{
"alpha_fraction": 0.6486486196517944,
"alphanum_fraction": 0.6756756901741028,
"avg_line_length": 17.5,
"blob_id": "621922875bda819110a0ff1add5f5db5657d0724",
"content_id": "f85433db046327739c782aa9449baaaf809d66dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 37,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 2,
"path": "/README.md",
"repo_name": "D-shaibi/Path-3",
"src_encoding": "UTF-8",
"text": "# Path-3\nMy work on AI will be here.\n"
}
] | 3 |
Harderboy/blog | https://github.com/Harderboy/blog | ce44616719bf6e1932f971582ea2d724ef3d0199 | fa2e445ac85207c029bd69b36864f55abc757af2 | 4f06226657cd0d0e29c69e4357baf9ebb04aa96b | refs/heads/main | 2023-07-01T22:50:12.910221 | 2021-08-15T09:30:04 | 2021-08-15T09:30:04 | 378,435,942 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8264462947845459,
"alphanum_fraction": 0.8264462947845459,
"avg_line_length": 26,
"blob_id": "b6efbbe19a47d777fd316412685140f75864dcd5",
"content_id": "3ce04b9d86b00069d195b7497a11db0d979270cf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 272,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 9,
"path": "/home/admin.py",
"repo_name": "Harderboy/blog",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom home.models import ArticleCategory, Article, Comment\n# Register your models here.\n\n# 注册模型\n# 使用django自带的后台站点管理\nadmin.site.register(ArticleCategory)\nadmin.site.register(Article)\nadmin.site.register(Comment)"
},
{
"alpha_fraction": 0.5872204303741455,
"alphanum_fraction": 0.6067092418670654,
"avg_line_length": 25.642553329467773,
"blob_id": "d0156720f88690e953d6fcdbbbe9644f29cde53a",
"content_id": "eeb01212bb5eaf0e7bf524ba3653bce91d1ca0e5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7070,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 235,
"path": "/blog/settings.py",
"repo_name": "Harderboy/blog",
"src_encoding": "UTF-8",
"text": "\"\"\"\nDjango settings for blog project.\n\nGenerated by 'django-admin startproject' using Django 2.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.2/ref/settings/\n\"\"\"\n\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 't-5&$6-s%rg7-xzm3+79%7iq_o@@m)gl3icw%*oe&l5t!sfznu'\n\n# SECURITY WARNING: don't run with debug turned on in production!\n# DEBUG = True\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n # 子应用的注册\n 'users.apps.UsersConfig',\n 'home.apps.HomeConfig'\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n# 注意不能带py后缀\nROOT_URLCONF = 'blog.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'blog.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql', # 数据库引擎\n 'HOST': 'localhost', # 数据库主机名\n 'PORT': '3306', # 数据库端口号\n 'USER': 'blog', # 数据库用户名\n 'PASSWORD': '123456', # 数据库用户密码\n 'NAME': 'blog', # 数据库名\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.2/topics/i18n/\n\n# 修改语言\n# LANGUAGE_CODE = 'en-us'\nLANGUAGE_CODE = 'zh-Hans'\n\n# 修改时区\n# TIME_ZONE = 'UTC'\nTIME_ZONE = 'Asia/Shanghai'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.2/howto/static-files/\n\nSTATIC_URL = '/static/'\n# 设置静态资源路径\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'static')\n]\n\n# redis 的配置\n\nCACHES = {\n 'default': { # 默认\n 'BACKEND': 'django_redis.cache.RedisCache',\n # redis默认是6379端口,第0的数据库,这里我们选择第0个数据库,123456是密码\n # 'LOCATION': 'redis://:[email protected]:6379/0',\n # 无密码\n 'LOCATION': 'redis://127.0.0.1:6379/0',\n 'OPTIONS': {\n 'CLIENT_CLASS': 'django_redis.client.DefaultClient',\n }\n },\n 'session': { # 会话 session\n 'BACKEND': 'django_redis.cache.RedisCache',\n # redis默认是6379端口,第0的数据库,这里我们选择第1个数据库,123456是密码\n # 'LOCATION': 'redis://:[email protected]:6379/1',\n # 无密码\n 'LOCATION': 'redis://127.0.0.1:6379/1',\n 'OPTIONS': {\n 'CLIENT_CLASS': 'django_redis.client.DefaultClient',\n }\n }\n}\n# session由数据库存储改为由redis存储\nSESSION_ENGINE = 'django.contrib.sessions.backends.cache'\nSESSION_CACHE_ALIAS = 'session'\n\n\n# 日志配置\n\nLOGGING = {\n 'version': 1,\n # 是否禁用已经存在的日志器\n 'disable_existing_loggers': False,\n # 日志格式器配置\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)s [%(threadName)s: %(thread)d]'\n '%(pathname)s: %(funcName)s: %(lineno)d %(levelname)s - %(message)s'\n },\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(lineno)d %(process)d %(thread)d %(message)s'\n },\n 'simple': { # 简单格式\n 'format': '%(levelname)s %(message)s'\n },\n },\n # 过滤器test配置\n 'filters': {\n 'require_debug_true': { # django 在 debug模式下才输出日志\n '()': 'django.utils.log.RequireDebugTrue',\n }\n },\n # 处理器配置\n 'handlers': {\n # 终端处理器配置,向终端输出日志\n 'console': {\n 'level': 'INFO',\n 'filters': ['require_debug_true'],\n 'class': 'logging.StreamHandler',\n 'formatter': 'verbose' # 使用上面定义的standard格式器\n },\n # 文件处理器配置,向文件中输出日志\n 'file': {\n 'level': 'INFO',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': os.path.join(BASE_DIR, 'logs/blog.log'), # 日志文件位置\n 'maxBytes': 1024*1024*1024, # 文件大小 达到1G自动分割\n 'backupCount': 5, # 保存备份文件的数量\n 'formatter': 'verbose', # 输出格式,使用上面定义的standard格式器\n 'encoding': 'utf-8' # 指定文件编码\n }\n },\n # 配置日志实例\n 'loggers': { # 日志器\n 'django': { # 日志实例名,定义了一个django日志器\n 'handlers': ['console', 'file'], # 可同时向终端和文件中输出日志\n # 'filters': ['test'],\n 'propagate': True, # 是否继续传递日志信息\n 'level': 'INFO' # 日志接收器的最低日志级别\n }\n }\n}\n\n# 自定义User模型代替系统的User\nAUTH_USER_MODEL = 'users.User'\n\n# 如果用户未登录的话,则会进行默认的跳转\n# 默认的跳转链接是:account/login/?next=xxx\n# 修改系统的未登录的跳转链接\nLOGIN_URL = '/login/'\n\n# 设置上传的图片保存到media\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media/')\n\n# 设置图片访问的统一路由\nMEDIA_URL = '/media/'"
},
{
"alpha_fraction": 0.5426608324050903,
"alphanum_fraction": 0.5525863766670227,
"avg_line_length": 26.150259017944336,
"blob_id": "27fa35de4fad17aadecf9bd0fa880ac36648a0d7",
"content_id": "a705d31c1b3565708acbabae00f3f4f984a474c0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6187,
"license_type": "permissive",
"max_line_length": 122,
"num_lines": 193,
"path": "/home/views.py",
"repo_name": "Harderboy/blog",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, reverse, redirect\nfrom django.views import View\nfrom home.models import ArticleCategory, Article, Comment\nfrom django.http import HttpResponseNotFound\nfrom django.core.paginator import Paginator, EmptyPage\n\n\n# Create your views here.\n\n\nclass IndexView(View):\n \"\"\"首页展示\"\"\"\n\n def get(self, request):\n \"\"\"\n 1、获取分类信息\n 2、获取用户点击的分类id\n 3、根据分类id进行分类的查询\n 4、获取分页参数\n 5、根据分类信息查询文章数据\n 6、创建分页器\n 7、进行分页处理\n 8、组织数据传递给模版\n :param request:\n :return:\n \"\"\"\n # 获取博客分类信息\n categories = ArticleCategory.objects.all()\n\n # ?cat_id=xxx&page_num=xxx&page_size=xxx\n # 获取用户点击的分类id\n cat_id = request.GET.get('cat_id', 1)\n page_num = request.GET.get('page_num', 1)\n page_size = request.GET.get('page_size', 10)\n # 判断分类id\n try:\n category = ArticleCategory.objects.get(id=cat_id)\n except ArticleCategory.DoesNotExist:\n return HttpResponseNotFound('没有此分类')\n\n # 分页数据\n articles = Article.objects.filter(\n category=category\n )\n #\n # 创建分页器:每页N条记录\n paginator = Paginator(articles, page_size)\n # 获取每页文章数据\n try:\n page_articles = paginator.page(page_num)\n except EmptyPage:\n # 如果没有分页数据,默认给用户404\n return HttpResponseNotFound('empty page')\n # 获取列表页总页数\n total_page = paginator.num_pages\n\n context = {\n 'categories': categories,\n 'category': category,\n 'articles': page_articles,\n 'page_size': page_size,\n 'total_page': total_page,\n 'page_num': page_num,\n }\n # return render(request, 'index.html')\n return render(request, 'index.html', context=context)\n\n\n\"\"\"\ninsert into tb_article(avatar,tags,title,sumary,content,total_views,comments_count,created,updated,author_id, category_id)\nselect avatar,tags,title,sumary,content,total_views,comments_count,created,updated,author_id, category_id from tb_article;\n\"\"\"\n\n\nclass DetailView(View):\n\n \"\"\"\n 1、接收文章id信息\n 2、根据文章id进行文章数据的查询\n 3、查询分类数据\n 4、获取分页请求参数\n 5、根据文章信息查询评论数\n 6、创建分页器\n 7、进行分页处理\n 8、组织模版数据\n \"\"\"\n\n def get(self, request):\n # detail/?id=xxx&page_num=xxx&page_size=xxx\n # 1、获取文档id\n id = request.GET.get('id')\n\n # 2、根据文章id进行文章数据的查询\n try:\n article = Article.objects.get(id=id)\n except Article.DoesNotExist:\n return render(request, '404.html')\n else:\n article.total_views += 1\n article.save()\n\n # 获取分类信息\n categories = ArticleCategory.objects.all()\n\n # 获取热点数据,查询浏览量前10的文章数量\n # '-total_views' 表明数据应该按total_views以倒序排列\n hot_articles = Article.objects.order_by('-total_views')[:10]\n\n # 获取分页请求\n page_num = request.GET.get('page_num', 1)\n page_size = request.GET.get('page_size', 10)\n\n # 获取当前文章的评论数据\n comments = Comment.objects.filter(\n article=article\n ).order_by('-created')\n # 获取评论总数\n total_count = comments.count()\n\n # 创建分页器:每页N条记录\n paginator = Paginator(comments, page_size)\n # 获取每页评论数据\n try:\n page_comments = paginator.page(page_num)\n except EmptyPage:\n # 如果page_num不正确,默认给用户404\n return HttpResponseNotFound('empty page')\n # 获取列表页总页数\n total_page = paginator.num_pages\n\n # 组织模版数据\n context = {\n 'categories': categories,\n 'category': article.category,\n 'article': article,\n 'hot_articles': hot_articles,\n 'total_count': total_count,\n 'comments': page_comments,\n 'page_size': page_size,\n 'total_page': total_page,\n 'page_num': page_num,\n }\n\n return render(request, 'detail.html', context=context)\n\n def post(self, request):\n \"\"\"\n 1、现接收用户信息\n 2、判断用户是否登录\n 3、登录用户可以接收form数据\n 3、1 接收评论数据\n 3、2 验证文章是否存在\n 3、3 保存评论数据\n 3、4 修改文章评论数量\n 4、未登录用户则跳转到登录页面\n :param request:\n :return:\n \"\"\"\n # 获取用户信息\n user = request.user\n\n # 判断用户是否登录\n if user and user.is_authenticated:\n # 接收数据\n id = request.POST.get('id')\n content = request.POST.get('content')\n\n # 判断文章id是否存在\n try:\n article = Article.objects.get(id=id)\n except Article.DoesNotExist:\n return HttpResponseNotFound('没有此文章')\n\n # 保存到数据\n Comment.objects.create(\n content=content,\n article=article,\n user=user\n )\n # 修改文章评论数量\n article.comments_count += 1\n article.save()\n # 拼接跳转路由\n path = reverse('home:detail') + '?id={}'.format(article.id)\n return redirect(path)\n else:\n # 没有登录则跳转到登录页面\n return redirect(reverse('users:login'))\n\n\"\"\"\ninsert into tb_comment(content,created,article_id,user_id)\nselect content,created,article_id,user_id from tb_comment;\n\"\"\""
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6208333373069763,
"avg_line_length": 14,
"blob_id": "2c2fdb7666c251f5b71a982c150bef26261a9470",
"content_id": "aa1833c9f151ed07655e6bd3b534a3869a40412a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TOML",
"length_bytes": 240,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 16,
"path": "/Pipfile",
"repo_name": "Harderboy/blog",
"src_encoding": "UTF-8",
"text": "[[source]]\nurl = \"https://pypi.tuna.tsinghua.edu.cn/simple/\"\nverify_ssl = true\nname = \"pypi\"\n\n[packages]\ndjango = \"==2.1.8\"\npymysql = \"*\"\ndjango-redis = \"*\"\ncryptography = \"*\"\npillow = \"*\"\n\n[dev-packages]\n\n[requires]\npython_version = \"3.8\"\n"
},
{
"alpha_fraction": 0.4675324559211731,
"alphanum_fraction": 0.6818181872367859,
"avg_line_length": 14.399999618530273,
"blob_id": "e25c4aa164703a97f938d1d9750d7b05ba1a48c5",
"content_id": "71e09e42945c799314227077a5cccf8936fc30b4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 154,
"license_type": "permissive",
"max_line_length": 19,
"num_lines": 10,
"path": "/requirements.txt",
"repo_name": "Harderboy/blog",
"src_encoding": "UTF-8",
"text": "cffi==1.14.6\ncryptography==3.4.7\nDjango==2.1.8\ndjango-redis==5.0.0\nPillow==8.3.1\npycparser==2.20\nPyMySQL==1.0.2\npytz==2021.1\nredis==3.5.3\nsqlparse==0.4.1\n"
},
{
"alpha_fraction": 0.6379928588867188,
"alphanum_fraction": 0.6702508926391602,
"avg_line_length": 9.296296119689941,
"blob_id": "e8da4380af929f966e74a178007e609d1a7d9e94",
"content_id": "1ef9fe56f718878c4133282c87dfb8f161300154",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 475,
"license_type": "permissive",
"max_line_length": 50,
"num_lines": 27,
"path": "/README.md",
"repo_name": "Harderboy/blog",
"src_encoding": "UTF-8",
"text": "# Blog\n\n使用 Python-Django 实现从 0 开发一个博客系统。\n\n该博客主要包括以下几部分功能:\n\n- 注册\n- 登录\n- 忘记密码\n- 用户中心\n- 写博客\n- 博客首页\n- 博客详情\n\n首页展示:\n\n\n\n\nTodo:\n\n- 文档整理\n- 容器化:备份成 Docker 镜像,上传到 Docker Hub\n- CI/CD:使用 Travis CI/Github Action + systemd 自动化部署\n- 添加中间件(Trace 等)\n\n*更新于:2021年08月15日*\n\n"
}
] | 6 |
algorithmic-trading/currency-arbitrage | https://github.com/algorithmic-trading/currency-arbitrage | 8a75d448bac841de6369dea3b7765acf2acbc1a4 | 8bbe77bab0fa5efd0a46eb3eb0f7909c1f80abd1 | e14f7e94cdc7e96c9fb1c571c177fae48509b54b | refs/heads/master | 2017-10-02T20:43:23.927261 | 2016-07-17T05:29:40 | 2016-07-17T05:29:40 | 63,251,192 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.644719660282135,
"alphanum_fraction": 0.6531942486763,
"avg_line_length": 35.52381134033203,
"blob_id": "d3112b5ffe42f875390322fb08eb55b9b42b948e",
"content_id": "3fcf996740903e9263a26a6b8747e74e56f8c72f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1534,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 42,
"path": "/currency_arbitrage/views.py",
"repo_name": "algorithmic-trading/currency-arbitrage",
"src_encoding": "UTF-8",
"text": "from __future__ import with_statement\nfrom django.shortcuts import render\nfrom django.template.context_processors import csrf\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django.http import JsonResponse\n\n\n# Create your views here.\ndef index(request):\n return render(request, 'index.html')\n\n\ndef about(request):\n return render(request, 'index.html')\n\n\n@ensure_csrf_cookie\ndef triangular_arbitrage(request):\n print('control reached')\n\n if request.method == \"GET\":\n print('GET view of triangular_arbitrage reached')\n return render(request, '404.html')\n\n elif request.method == \"POST\":\n print('POST view of triangular_arbitrage reached')\n c = {}\n c.update(csrf(request))\n\n a_b_bid_quote = float(request.POST.get('a_b_bid', 0))\n a_b_ask_quote = float(request.POST.get('a_b_ask', 0))\n a_c_bid_quote = float(request.POST.get('a_c_bid', 0))\n a_c_ask_quote = float(request.POST.get('a_c_ask', 0))\n b_c_bid_quote = float(request.POST.get('b_c_bid', 0))\n b_c_ask_quote = float(request.POST.get('b_c_ask', 0))\n principal_amount = float(request.POST.get('notional_amount', 0))\n\n arbitrage_amount = principal_amount * (1/a_b_ask_quote) * (1/b_c_ask_quote) * a_c_bid_quote\n if arbitrage_amount < principal_amount:\n arbitrage_amount = principal_amount * (1 / a_c_ask_quote) * b_c_bid_quote * a_b_bid_quote\n\n return JsonResponse({'status': 'success', 'value': (arbitrage_amount - principal_amount)})\n"
},
{
"alpha_fraction": 0.7336244583129883,
"alphanum_fraction": 0.7336244583129883,
"avg_line_length": 31.714284896850586,
"blob_id": "ee67dbf417889a402293d502d6b53f1618929146",
"content_id": "eb65f3820fbb0a2d7276e90935e4c45fc431eca1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 229,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 7,
"path": "/currency_arbitrage/urls.py",
"repo_name": "algorithmic-trading/currency-arbitrage",
"src_encoding": "UTF-8",
"text": "# app specific urls\nfrom django.conf.urls import patterns, url\n\nurlpatterns = patterns('',\n url(r'^index', 'currency_arbitrage.views.home'),\n url(r'^triangular_arbitrage', 'currency_arbitrage.views.triangular_arbitrage')\n)\n"
}
] | 2 |
keerthy97/django_level_2 | https://github.com/keerthy97/django_level_2 | 065baf581fb42513a1bc467c7576a81427f7f41f | bd05d8b8c311c7aa9a13721092cd0ff43ee3593e | 7f24f04cec86c21ae93d921b16219ab5aa60df32 | refs/heads/master | 2022-04-12T07:57:18.171069 | 2020-04-08T11:44:18 | 2020-04-08T11:44:18 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6836158037185669,
"alphanum_fraction": 0.700564980506897,
"avg_line_length": 18.77777862548828,
"blob_id": "0f9c55852264ad1cf834862beb884a68b5b241ad",
"content_id": "0052dd3c0967cae123730bae295046281867306c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 177,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 9,
"path": "/pro2/proapp2/urls.py",
"repo_name": "keerthy97/django_level_2",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom django.urls import path\nfrom proapp2 import views\n\nurlpatterns = [\n path('users/', views.users),\n path('index_1/', views.index_1),\n\n]"
},
{
"alpha_fraction": 0.7303370833396912,
"alphanum_fraction": 0.7528089880943298,
"avg_line_length": 16.799999237060547,
"blob_id": "0150aeebf35cc545f7b76b93cfc512666401e97d",
"content_id": "dd4669a0befde5710b818444294dfd23bcfea9c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 5,
"path": "/pro2/proapp2/apps.py",
"repo_name": "keerthy97/django_level_2",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass Proapp2Config(AppConfig):\n name = 'proapp2'\n"
},
{
"alpha_fraction": 0.7431906461715698,
"alphanum_fraction": 0.7470816969871521,
"avg_line_length": 31,
"blob_id": "60ee238a0d2776496ebe31db9caea32cb7e017a3",
"content_id": "d4cff776b337e365defebd766ca57623abae7ad5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 257,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 8,
"path": "/pro2/proapp2/admin.py",
"repo_name": "keerthy97/django_level_2",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom proapp2.models import User\n# Register your models here.\n# admin.site.register(User)\n\nclass UserAdmin(admin.ModelAdmin):\n list_display = ['id', 'first_name', 'last_name', 'email']\nadmin.site.register(User,UserAdmin)\n\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 17,
"blob_id": "02bb04c3c46dcc6c074d463e6f61fec802b08797",
"content_id": "e9213d191a3c03ced483dfd69543d6540b934967",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 36,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 2,
"path": "/README.md",
"repo_name": "keerthy97/django_level_2",
"src_encoding": "UTF-8",
"text": "# django_level_2\nIt contains models\n"
},
{
"alpha_fraction": 0.646258533000946,
"alphanum_fraction": 0.6581632494926453,
"avg_line_length": 23.5,
"blob_id": "2afd15cf9d529fd941e122a39632f236ffffdc1f",
"content_id": "afdfe22941b3c677bc30f9a75945211cff0803f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 588,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 24,
"path": "/pro2/proapp2/views.py",
"repo_name": "keerthy97/django_level_2",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\n# from django.http import HttpResponse\nfrom proapp2.models import User\nfrom proapp2.forms import NewUserForm\n\n\n# Create your views here.\ndef index_1(request):\n return render(request, 'proapp2/index_1.html')\n\n\ndef users(request):\n form = NewUserForm()\n\n if request.method == \"POST\":\n form = NewUserForm(request.POST)\n\n if form.is_valid():\n form.save(commit=True)\n return index_1(request)\n else:\n print(\"ERROR FORM INVALID\")\n\n return render(request, 'proapp2/users.html', {'form': form})\n"
}
] | 5 |
manhof/loadgeneration | https://github.com/manhof/loadgeneration | 15351722c105cb141de91919b89a598f91d399d1 | b645b10a4e2c6d70fabbbb9c56efcb67766cc949 | d0810fbaaf6821027dfaeb0a90cc9e4b1f8ef7c6 | refs/heads/master | 2021-01-19T06:42:54.644559 | 2016-06-07T15:36:29 | 2016-06-07T15:36:29 | 60,622,251 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7916666865348816,
"alphanum_fraction": 0.7916666865348816,
"avg_line_length": 32,
"blob_id": "1440c3485650498691f8b099e29f4baacd5ce250",
"content_id": "55864e4e25566f21ee3ffca3e78b967cf220e358",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 264,
"license_type": "permissive",
"max_line_length": 116,
"num_lines": 8,
"path": "/README.md",
"repo_name": "manhof/loadgeneration",
"src_encoding": "UTF-8",
"text": "# load Generation\nA way to do video load generation testing using vLc\n\n*You need to have VLC installed to run this testing suite. Please go to videolan.org to download the latest version*\n\nThis script is intended to open video streams in a headless mode\n\nHave fun\n"
},
{
"alpha_fraction": 0.6627971529960632,
"alphanum_fraction": 0.6738529801368713,
"avg_line_length": 29.066667556762695,
"blob_id": "b7ae3db2af13883396ba3a322c58e0b63acf54ce",
"content_id": "fb12075842d8b078eac5d378db25ba8556c3d14a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1809,
"license_type": "permissive",
"max_line_length": 165,
"num_lines": 60,
"path": "/loadprogram.py",
"repo_name": "manhof/loadgeneration",
"src_encoding": "UTF-8",
"text": "#!/bin/python\n#this script will be used to setup and do \n#Load generation testing \n\nimport ConfigParser\nimport os\nimport yum\nimport sys\nimport subprocess\nfrom sys import argv\nfrom decimal import *\nfrom subprocess import *\n\n#config_file = load.conf\n#print 'config file:', config_file\n\nconfig = ConfigParser.ConfigParser()\nconfig.readfp( open( '/home/admin/Load/load.conf'))\n\n#setting variables from config file\n\nnos= int(config.get('Section 1', 'nos'))\nlogging= str(config.get('Section 1', 'logging'))\nlog_file= str(config.get('Section 1', 'log_file'))\nload_number = int(config.get('Section 1', 'load_number'))\nwpbs = Decimal(config.get('Section 1', 'wpbs'))\n#open(config.get('Section 1', 'test_script'), 'w').close()\ntest_script = open(config.get('Section 1', 'test_script'),\"w\")\nlogginginfo= '--verbose 2 --fileloggin --logfile' +log_file \n\nyb = yum.YumBase()\nif yb.rpmdb.searchNevra(name='vlc'):\n\tprint \"VLC installed\"\nelse:\n\tprint \"Please install VLC before continuing\"\n\texit()\n\ntest_script.write('#!/bin/bash\\n')\n\n\nfor x in range (load_number,0,-1):\n\tfor y in range (nos,0,-1):\n\t\ty = str(y)\n\t\ttry:\n\t\t\tif logging in ['y','Y', 'yes', 'Yes', 'YES']:\n\t\t\t\tteststring = 'nohup vlc ' + logginginfo + '--intf dummy --quiet --no-sout-display-audio --no-sout-display-video ' + config.get( 'Section 1', 'url' + (y)) +' &' \n\t\t\telse:\n\t\t\t\tteststring = 'nohup vlc --intf dummy --quiet --no-sout-display-audio --no-sout-display-video ' + config.get ('Section 1', 'url' + (y)) + ' &'\n\t\t\ttest_script.write( teststring)\t\t\n\t\texcept ConfigParser.NoOptionerror:\n\t\t\tprint \"missing url\"+ (y)\n\t\t\tbreak\n\ttest_script.write('sleep ' + str(wpbs) + '\\n')\n\ntest_script.close()\t\n\nprint \"Running Tests\"\nos.chmod(config.get('Section 1', 'test_script'), 0777)\nsubprocess.call(config.get('Section 1', 'test_script'),shell=True)\nexit()\n\n\n\t\t\n"
}
] | 2 |
DrDoofenshmirz/Heart-Disease-Prediction | https://github.com/DrDoofenshmirz/Heart-Disease-Prediction | cadfde093c11f3c77a187a23ab9bb288758d1b87 | 397e578bcaf88349d4bfbc0a713a33b9bd693f59 | 27638a6a313ffc72dbd2f7e29c0b90471fe21da2 | refs/heads/main | 2023-08-20T00:39:01.742986 | 2021-10-30T19:19:56 | 2021-10-30T19:19:56 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6598725914955139,
"alphanum_fraction": 0.6675159335136414,
"avg_line_length": 22.59375,
"blob_id": "4708ca6bbe35e486c9cb3af41bdfca5d41bef59c",
"content_id": "019bf46aa36019a542370b1f703441a587cfc550",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 785,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 32,
"path": "/model.py",
"repo_name": "DrDoofenshmirz/Heart-Disease-Prediction",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport pandas as pd\r\nimport sklearn\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression\r\nimport pickle\r\n\r\n\r\n\r\n# Load the csv file\r\ndf = pd.read_csv(\"heart.csv\")\r\n\r\nprint(df.head())\r\n\r\n# Select independent and dependent variable\r\nX = df[['age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach',\r\n 'exang', 'oldpeak', 'slope', 'ca', 'thal']]\r\ny = df[\"target\"]\r\n\r\n# Split the dataset into train and test\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state= 5)\r\n\r\n\r\n\r\n\r\nclassifier = LogisticRegression(random_state = 51, penalty = 'l2')\r\nclassifier.fit(X_train, y_train)\r\n\r\n\r\n\r\n# Make pickle file of our model\r\npickle.dump(classifier, open(\"model.pkl\", \"wb\"))"
},
{
"alpha_fraction": 0.8148148059844971,
"alphanum_fraction": 0.8148148059844971,
"avg_line_length": 12.5,
"blob_id": "57d1996db56a3ca1e88816a16b19f6b522d60bca",
"content_id": "3136ac7ab0b6d0dbf66074573bc656b8bebe6243",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 27,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 2,
"path": "/README.md",
"repo_name": "DrDoofenshmirz/Heart-Disease-Prediction",
"src_encoding": "UTF-8",
"text": "# deployment\nML deployment\n"
}
] | 2 |
antonioforte/Video-Gallery | https://github.com/antonioforte/Video-Gallery | 3cabe7cfaa5bc9f9aa4ee7c7a7cefba1f7cead8a | 1e48d657b0547e9f796381da0f16be179d64e9f8 | d8877a8644dc03fb6c97adbc0f28255b0b196328 | refs/heads/master | 2020-05-30T15:28:13.958450 | 2011-09-22T14:44:07 | 2011-09-22T14:44:07 | 2,437,679 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7121396064758301,
"alphanum_fraction": 0.7291350364685059,
"avg_line_length": 56.8070182800293,
"blob_id": "5d60159ae73c1a1b3cefd5d84fb94a6cbd679561",
"content_id": "fde6a6333ae5f769ea4e679d7054a6db1d5e9833",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6590,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 114,
"path": "/res/gui/gui.py",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'videoplayerframe.ui'\n#\n# Created: Tue Dec 21 23:20:42 2010\n# by: PyQt4 UI code generator 4.7.2\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\nclass Ui_Frame(object):\n def setupUi(self, Frame):\n Frame.setObjectName(\"Frame\")\n Frame.resize(755, 110)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Ignored)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(Frame.sizePolicy().hasHeightForWidth())\n Frame.setSizePolicy(sizePolicy)\n Frame.setFrameShape(QtGui.QFrame.StyledPanel)\n Frame.setFrameShadow(QtGui.QFrame.Raised)\n self.gridLayout = QtGui.QGridLayout(Frame)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.seekSlider = phonon.Phonon.SeekSlider(Frame)\n self.seekSlider.setObjectName(\"seekSlider\")\n self.gridLayout.addWidget(self.seekSlider, 0, 0, 1, 5)\n self.volumeSlider = phonon.Phonon.VolumeSlider(Frame)\n self.volumeSlider.setObjectName(\"volumeSlider\")\n self.gridLayout.addWidget(self.volumeSlider, 1, 0, 1, 5)\n self.horizontalLayout = QtGui.QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.previousBTN = QtGui.QPushButton(Frame)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.previousBTN.sizePolicy().hasHeightForWidth())\n self.previousBTN.setSizePolicy(sizePolicy)\n self.previousBTN.setObjectName(\"previousBTN\")\n self.horizontalLayout.addWidget(self.previousBTN)\n self.playBTN = QtGui.QPushButton(Frame)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.playBTN.sizePolicy().hasHeightForWidth())\n self.playBTN.setSizePolicy(sizePolicy)\n self.playBTN.setObjectName(\"playBTN\")\n self.horizontalLayout.addWidget(self.playBTN)\n self.nextBTN = QtGui.QPushButton(Frame)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.nextBTN.sizePolicy().hasHeightForWidth())\n self.nextBTN.setSizePolicy(sizePolicy)\n self.nextBTN.setObjectName(\"nextBTN\")\n self.horizontalLayout.addWidget(self.nextBTN)\n self.gridLayout.addLayout(self.horizontalLayout, 2, 0, 1, 1)\n spacerItem = QtGui.QSpacerItem(84, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.gridLayout.addItem(spacerItem, 2, 1, 1, 1)\n self.horizontalLayout_3 = QtGui.QHBoxLayout()\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.playlistBTN = QtGui.QPushButton(Frame)\n self.playlistBTN.setObjectName(\"playlistBTN\")\n self.horizontalLayout_3.addWidget(self.playlistBTN)\n self.enhanceBTN = QtGui.QPushButton(Frame)\n self.enhanceBTN.setObjectName(\"enhanceBTN\")\n self.horizontalLayout_3.addWidget(self.enhanceBTN)\n self.gridLayout.addLayout(self.horizontalLayout_3, 2, 2, 1, 1)\n spacerItem1 = QtGui.QSpacerItem(138, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.gridLayout.addItem(spacerItem1, 2, 3, 1, 1)\n self.horizontalLayout_2 = QtGui.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.subsBTN = QtGui.QPushButton(Frame)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.subsBTN.sizePolicy().hasHeightForWidth())\n self.subsBTN.setSizePolicy(sizePolicy)\n self.subsBTN.setObjectName(\"subsBTN\")\n self.horizontalLayout_2.addWidget(self.subsBTN)\n self.fullscreenBTN = QtGui.QPushButton(Frame)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.fullscreenBTN.sizePolicy().hasHeightForWidth())\n self.fullscreenBTN.setSizePolicy(sizePolicy)\n self.fullscreenBTN.setObjectName(\"fullscreenBTN\")\n self.horizontalLayout_2.addWidget(self.fullscreenBTN)\n self.labelTime = QtGui.QLabel(Frame)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.labelTime.sizePolicy().hasHeightForWidth())\n self.labelTime.setSizePolicy(sizePolicy)\n self.labelTime.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n self.labelTime.setObjectName(\"labelTime\")\n self.horizontalLayout_2.addWidget(self.labelTime)\n self.gridLayout.addLayout(self.horizontalLayout_2, 2, 4, 1, 1)\n\n self.retranslateUi(Frame)\n QtCore.QMetaObject.connectSlotsByName(Frame)\n\n def retranslateUi(self, Frame):\n Frame.setWindowTitle(QtGui.QApplication.translate(\"Frame\", \"Frame\", None, QtGui.QApplication.UnicodeUTF8))\n self.previousBTN.setText(QtGui.QApplication.translate(\"Frame\", \"previous\", None, QtGui.QApplication.UnicodeUTF8))\n self.playBTN.setText(QtGui.QApplication.translate(\"Frame\", \"play\", None, QtGui.QApplication.UnicodeUTF8))\n self.nextBTN.setText(QtGui.QApplication.translate(\"Frame\", \"next\", None, QtGui.QApplication.UnicodeUTF8))\n self.playlistBTN.setText(QtGui.QApplication.translate(\"Frame\", \"playlist\", None, QtGui.QApplication.UnicodeUTF8))\n self.enhanceBTN.setText(QtGui.QApplication.translate(\"Frame\", \"enhance\", None, QtGui.QApplication.UnicodeUTF8))\n self.subsBTN.setText(QtGui.QApplication.translate(\"Frame\", \"subs\", None, QtGui.QApplication.UnicodeUTF8))\n self.fullscreenBTN.setText(QtGui.QApplication.translate(\"Frame\", \"fullscreen\", None, QtGui.QApplication.UnicodeUTF8))\n self.labelTime.setText(QtGui.QApplication.translate(\"Frame\", \"00:00:00 / 00:00:00\", None, QtGui.QApplication.UnicodeUTF8))\n\nfrom PyQt4 import phonon\n"
},
{
"alpha_fraction": 0.5303353667259216,
"alphanum_fraction": 0.5365332961082458,
"avg_line_length": 32.55813980102539,
"blob_id": "e6b9798fb46fce2580db0942e794562145b06893",
"content_id": "46517491e050779098a1c0a5bac7381f99f97a8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14521,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 430,
"path": "/lib/query_db.py",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "import os\nimport cPickle\nfrom xml.etree import ElementTree as ET\nimport templates\nimport urllib\nimport common\nimport time\nfrom PySide import QtCore\n\n\n\nclass queryDbWorker(QtCore.QThread):\n postBuildDbWorkerSig = QtCore.Signal(str)\n \n def __init__(self, parent = None):\n QtCore.QThread.__init__(self, parent)\n self.exiting = False\n self.lib = common.common() \n self.templates = templates.templates()\n\n\n def __del__(self):\n self.exiting = True\n\n\n def set_values(self,curdir,configxml,dbpath,query):\n self.curdir = curdir\n self.configxml = configxml\n self.dbpath = dbpath\n self.db = self.get_db(self.dbpath)\n \n self.query = query\n self.mode = query[0]\n self.locprofile = query[1]\n self.loclabel = query[2]\n self.locurl = query[3]\n self.locid = self.lib.get_locid(self.locprofile+self.loclabel+self.locurl)\n\n\n\n def run(self):\n ''' \n There two levels: front and afterfront. \n The front level gets the front html of each profile, this is equal to all profiles.\n The afterfront level is different to each profile.\n '''\n html = 'not'\n \n if self.locprofile == 'Movies':\n if self.mode == 'front':\n html = self.get_front_html()\n if self.mode == 'afterfront':\n html = self.get_movie_html()\n\n if self.locprofile == 'TV':\n if self.mode == 'front':\n html = self.get_front_html()\n if self.mode == 'afterfront':\n html = self.get_tv_show()\n\n if self.locprofile == 'Races':\n if self.mode == 'front':\n html = self.get_front_html()\n if self.mode == 'afterfront':\n html = self.get_races_season()\n\n self.postBuildDbWorkerSig.emit(html)\n\n \n \n \n def get_front_html(self):\n ''' Get front page of all profils'''\n html = self.get_common_html()\n body = html.find('body')\n\n wrapper = ET.SubElement(body, 'div')\n wrapper.attrib['id'] = 'frontWrapper'\n \n data = self.db[self.locid]['data']\n for movie in sorted(data.keys()):\n\n div = ET.SubElement(wrapper,'div')\n div.attrib['class'] = 'movieWrapper'\n div.attrib['data-movie_label'] = movie\n \n # get image \n img = ET.SubElement(div,'img')\n img.attrib['class'] = 'movieWrapperCover'\n if data[movie]['hasSmallPic'] == 'yes':\n img.attrib['src'] = 'file://' + os.path.join(self.locurl, movie, 'folder.jpg')\n else:\n # only gets screen from screens folder when its in movie mode\n # because the other modes have one nested folder\n has_screen = self.get_movie_screen(os.path.join(self.locurl, movie))\n if has_screen[0] == 'yes':\n img.attrib['src'] = 'file://' + has_screen[1] \n else:\n img.attrib['src'] = 'file://'+os.path.join(self.curdir, 'res', 'graphics', 'default_200x100.png')\n\n span = ET.SubElement(div,'span')\n span.attrib['data-label'] = movie\n span.text = self.lib.getShortString(20,movie)\n\n return ET.tostring(html)\n \n \n \n \n def get_movie_screen(self,url):\n has_screen = ['not','not']\n screensdir = os.path.join(url,'screens')\n\n if os.path.exists(screensdir):\n screens = os.listdir(screensdir)\n if len(screens) != 0:\n has_screen[0] = 'yes'\n has_screen[1] = os.path.join(url,'screens',screens[5])\n \n return has_screen\n \n \n\n \n \n \n \n\n \n\n\n def get_movie_html(self):\n html = self.get_common_html()\n body = html.find('body')\n\n wrapper = ET.SubElement(body, 'div')\n wrapper.attrib['class'] = 'listWrapper'\n \n movie = self.query[4]\n data = self.db[self.locid]['data'][movie]\n\n div = ET.SubElement(wrapper,'div')\n div.attrib['class'] = 'listItemWrapper'\n \n div_after_pic = ET.SubElement(div,'div',{'class':'divAfterPic'}) \n span = ET.SubElement(div_after_pic,'span',{'class':'spanSeasonNumber'})\n \n span_show_storyboard = ET.SubElement(div_after_pic,'span',{'class':'spanShowStoryboard'})\n span_show_storyboard.text = 'storyboard'\n \n span_show_episodes = ET.SubElement(div_after_pic,'span',{'class':'spanShowEpisodes'})\n span_show_episodes.text = 'files'\n span.text = movie\n\n img = ET.SubElement(div,'img')\n img.attrib['class'] = 'movieBigWrapperCover'\n if data['hasBigPic'] == 'yes':\n img.attrib['src'] = 'file://'+os.path.join(self.locurl,movie,'folder_big.jpg')\n else:\n img.attrib['src'] = 'file://'+os.path.join(self.curdir, 'res', 'graphics', 'default_720x200.png')\n\n filestable = self.get_files_table(data['files'])\n div.append(filestable)\n\n return ET.tostring(html)\n\n\n\n\n\n\n\n def get_tv_show(self):\n html = self.get_common_html()\n body = html.find('body')\n\n wrapper = ET.SubElement(body, 'div')\n wrapper.attrib['class'] = 'listWrapper'\n \n show = self.query[4]\n data = self.db[self.locid]['data'][show]\n seasons = data['seasons']\n\n for season in sorted(seasons.keys()):\n div = ET.SubElement(wrapper,'div')\n div.attrib['class'] = 'listItemWrapper'\n \n div_after_pic = ET.SubElement(div,'div',{'class':'divAfterPic'})\n span = ET.SubElement(div_after_pic,'span',{'class':'spanSeasonNumber'})\n span.text = season\n \n span_show_storyboard = ET.SubElement(div_after_pic,'span',{'class':'spanShowStoryboard'})\n span_show_storyboard.text = 'storyboard'\n \n span_show_episodes = ET.SubElement(div_after_pic,'span',{'class':'spanShowEpisodes'})\n span_show_episodes.text = 'episodes'\n\n img = ET.SubElement(div,'img')\n img.attrib['class'] = 'movieBigWrapperCover'\n if seasons[season]['hasBigPic'] == 'yes':\n img.attrib['src'] = 'file://'+os.path.join(self.locurl,show,season,'folder_big.jpg')\n else:\n img.attrib['src'] = 'file://'+os.path.join(self.curdir, 'res', 'graphics', 'default_720x200.png')\n\n filestable = self.get_files_table(seasons[season]['files'])\n div.append(filestable)\n\n return ET.tostring(html)\n\n\n\n\n def get_races_season(self):\n html = self.get_common_html()\n body = html.find('body')\n\n wrapper = ET.SubElement(body, 'div')\n wrapper.attrib['class'] = 'listWrapper'\n\n season = self.query[4]\n data = self.db[self.locid]['data'][season]\n races = data['races']\n\n for race in sorted(races.keys()):\n div = ET.SubElement(wrapper,'div')\n div.attrib['class'] = 'listItemWrapper'\n \n div_after_pic = ET.SubElement(div,'div',{'class':'divAfterPic'})\n span = ET.SubElement(div_after_pic,'span',{'class':'spanSeasonNumber'})\n span.text = race\n \n span_show_storyboard = ET.SubElement(div_after_pic,'span',{'class':'spanShowStoryboard'})\n span_show_storyboard.text = 'storyboard'\n \n span_show_episodes = ET.SubElement(div_after_pic,'span',{'class':'spanShowEpisodes'})\n span_show_episodes.text = 'files'\n\n img = ET.SubElement(div,'img')\n img.attrib['class'] = 'movieBigWrapperCover'\n if races[race]['hasBigPic'] == 'yes':\n img.attrib['src'] = 'file://'+os.path.join(self.locurl,season,race,'folder_big.jpg')\n else:\n img.attrib['src'] = 'file://'+os.path.join(self.curdir, 'res','graphics', 'default_720x200.png')\n\n filestable = self.get_files_table(races[race]['files'])\n div.append(filestable)\n\n return ET.tostring(html)\n\n\n\n\n\n\n def get_files_table(self, files):\n table = ET.Element('table',{'class':'videoFilesTable'})\n \n for fullurl in sorted(files.keys()):\n url, filename = os.path.split(fullurl)\n name, ext = os.path.splitext(filename)\n \n tr = ET.SubElement(table, 'tr')\n \n # 1st cell\n td9 = ET.SubElement(tr, 'td')\n img = ET.SubElement(td9,'img')\n img.attrib['class'] = 'fileScreen'\n img.attrib['data-url'] = fullurl\n hasscreen = self.lib.checkFile(os.path.join(url,'screens',name+' - 2.jpg'))\n if hasscreen == 'yes':\n img.attrib['src'] = 'file://'+os.path.join(url,'screens',urllib.quote(name+' - 2.jpg'))\n else:\n img.attrib['src'] = 'file://'+os.path.join(self.curdir, 'res', 'graphics', 'default_200x100.png')\n\n # 2nd cell\n td0 = ET.SubElement(tr, 'td')\n td0.text = name\n\n # 3th cell\n td1 = ET.SubElement(tr, 'td')\n \n span1 = ET.SubElement(td1, 'span')\n span1.text = str(files[fullurl]['width'])+'x'+str(files[fullurl]['height'])\n\n span2 = ET.SubElement(td1, 'span')\n duration = time.strftime('%H:%M:%S', time.gmtime(files[fullurl]['duration']))\n span2.text = duration\n \n span3 = ET.SubElement(td1, 'span')\n date = time.strftime('%Y-%m-%d',time.gmtime(float(files[fullurl]['date'])))\n span3.text = date\n \n span4 = ET.SubElement(td1, 'span')\n span4.text = files[fullurl]['hasSubs']\n \n span4 = ET.SubElement(td1, 'span')\n span4.text = str(files[fullurl]['size'])+' mb'\n \n span6 = ET.SubElement(td1, 'span')\n span6.text = ext\n \n # 4rd cell\n tdplayers = ET.SubElement(tr, 'td')\n players = self.configxml.find('video_players')\n for player in players.getiterator('app'):\n s = ET.SubElement(tdplayers, 'span')\n s.attrib['data-video_url'] = fullurl\n s.attrib['data-app'] = player.text\n s.attrib['class'] = 'video_launcher'\n s.text = player.text\n return table\n\n\n\n \n\n def get_db(self,dbpath):\n data = cPickle.load(open(dbpath, 'rb'))\n return data\n \n \n \n def get_common_html(self):\n html = ET.Element('html')\n head = ET.SubElement(html, 'head')\n \n meta = ET.SubElement(head, 'meta')\n meta.attrib['http-equiv'] = 'content-type'\n meta.attrib['content'] = 'text/html; charset=utf-8'\n \n style = ET.SubElement(head, 'link')\n style.attrib['rel'] = 'stylesheet'\n style.attrib['type'] = 'text/css'\n style.attrib['href'] = 'file://' + os.path.join(self.curdir, 'site', 'style.css')\n \n script2 = ET.SubElement(head, 'script')\n script2.attrib['type'] = 'text/javascript'\n script2.attrib['src'] = 'file://' + os.path.join(self.curdir, 'site', 'js', 'dom_helper.js')\n script2.text = '/*nonsense*/'\n \n script3 = ET.SubElement(head, 'script')\n script3.attrib['type'] = 'text/javascript'\n script3.attrib['src'] = 'file://' + os.path.join(self.curdir, 'site', 'js', 'navletters.js')\n script3.text = '/*nonsense*/'\n \n script1 = ET.SubElement(head, 'script')\n script1.attrib['type'] = 'text/javascript'\n script1.attrib['src'] = 'file://' + os.path.join(self.curdir, 'site', 'js', 'common.js')\n script1.text = '/*nonsense*/'\n \n title = ET.SubElement(head, 'title')\n title.text = self.loclabel\n \n body = ET.SubElement(html, 'body')\n body.attrib['data-profile'] = self.locprofile\n body.attrib['data-label'] = self.loclabel\n body.attrib['data-url'] = self.locurl\n body.attrib['data-mode'] = self.mode\n return html \n \n \n \n# for item in self.data.keys():\n# print('after',self.data[item]['data'].keys())\n\n# for k, v in data.iteritems():\n# print('shit',k,v)\n\n# self.lib.print_nice(movie)\n# print('et dump',ET.tostring(html))\n# self.lib.prettyPrintET(html)\n\n\n\n\nclass queryDb(QtCore.QObject):\n postQueryDbSig = QtCore.Signal(str)\n startQueryDbSig = QtCore.Signal(str)\n endQueryDbSig = QtCore.Signal(str)\n\n @QtCore.Slot(str,str,str,str,str,str)\n def go(self,*args):\n self.theargs = args\n self.worker = queryDbWorker()\n self.worker.set_values(self.curdir,self.configxml,self.dbpath,args)\n self.worker.finished.connect(self.say_end)\n \n # Commenting this because it is causing a flash\n # self.worker.started.connect(self.say_start)\n self.worker.postBuildDbWorkerSig.connect(self.post_html)\n self.worker.start()\n\n\n def set_values(self,curdir,configxml,dbpath):\n self.curdir = curdir\n self.configxml = configxml\n self.dbpath = dbpath\n \n\n def say_end(self):\n self.endQueryDbSig.emit('End query')\n\n\n def say_start(self):\n html = ET.Element('html')\n head = ET.SubElement(html, 'head')\n \n meta = ET.SubElement(head, 'meta')\n meta.attrib['http-equiv'] = 'content-type'\n meta.attrib['content'] = 'text/html; charset=utf-8'\n \n style = ET.SubElement(head, 'link')\n style.attrib['rel'] = 'stylesheet'\n style.attrib['type'] = 'text/css'\n style.attrib['href'] = 'file://' + os.path.join(self.curdir, 'site', 'style.css')\n \n goback = ET.SubElement(html,'div', {'id':'goback'})\n span = ET.SubElement(goback,'span', {'class':'novisibility'})\n span.text = 'iamherejustforshow'\n \n div = ET.SubElement(html,'div',{'class':'loadingDiv'})\n for item in self.theargs:\n div2 = ET.SubElement(div, 'div')\n div2.text = item\n \n self.startQueryDbSig.emit(ET.tostring(html))\n \n \n def post_html(self,html):\n self.postQueryDbSig.emit(html)\n\n \n \n \n \n \n \n \n \n \n \n"
},
{
"alpha_fraction": 0.5899999737739563,
"alphanum_fraction": 0.5899999737739563,
"avg_line_length": 26.227272033691406,
"blob_id": "c4f50270e0f54157882943357aae1faa49764dc1",
"content_id": "c02d9c60d39b799eeeacd413876e7c5c953f651b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 600,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 22,
"path": "/lib/launch_video.py",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "import subprocess\nfrom PySide import QtCore\n\n\nclass launchVideo(QtCore.QObject):\n startHereSignal = QtCore.Signal(str)\n\n @QtCore.Slot(str,str)\n def go(self,app,video_url):\n if app != 'here':\n self.open_external_app(video_url,app)\n if app == 'here':\n self.startHereSignal.emit(video_url)\n\n\n def open_external_app(self,video_url,app): \n try:\n cmd = [app]\n cmd.append(video_url)\n retval = subprocess.Popen(cmd)\n except Exception as e:\n print (\"Error could not execute launchVideo. \",e.args)\n\n"
},
{
"alpha_fraction": 0.5395783185958862,
"alphanum_fraction": 0.5419114828109741,
"avg_line_length": 36.769737243652344,
"blob_id": "d86313ff380dd03ac50afe7cd7e92c242c32ff10",
"content_id": "e4bb96dc210bedc667033b7ac6e9c3d1b6c09bce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11572,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 304,
"path": "/lib/build_db.py",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "import os\nimport cPickle\nimport json\nfrom xml.etree import ElementTree as ET\nfrom hachoir_parser import createParser\nfrom hachoir_metadata import extractMetadata\nimport templates\nimport common\nfrom PySide import QtCore\n\n\n\nclass buildDbWorker(QtCore.QThread):\n postBuildDbWorkerSig = QtCore.Signal(str)\n \n def __init__(self, parent = None):\n QtCore.QThread.__init__(self, parent)\n self.exiting = False\n self.lib = common.common() \n self.templates = templates.templates()\n\n\n def __del__(self):\n self.exiting = True\n\n\n def set_values(self,curdir,configxml,dbpath):\n self.curdir = curdir\n self.configxml = configxml\n self.dbpath = dbpath\n self.video_folders = self.configxml.find('video_folders')\n self.exts = self.configxml.find('video_exts').text\n\n\n def run(self):\n # global dict\n data = {}\n\n # for each location\n locs = self.video_folders.getiterator('folder')\n for loc in locs:\n i = locs.index(loc)\n \n loclabel = loc.attrib['label']\n locprofile = loc.attrib['profile']\n locurl = loc.text\n locid = self.lib.get_locid(locprofile+loclabel+locurl)\n\n # create dict with location details\n # the data key holds files found\n data[locid] = {}\n data[locid]['label'] = loclabel\n data[locid]['profile'] = locprofile\n data[locid]['url'] = locurl\n data[locid]['data'] = self.scan_folders(loclabel,locprofile,locurl)\n\n self.send_update_signal(data[locid],i,locs)\n self.savedb(data)\n #self.lib.print_nice(data)\n\n\n def send_update_signal(self,data,i,locs):\n ul = ET.Element('ul')\n li1 = ET.SubElement(ul, 'li')\n li1.text = 'Searching: '+str(i+1)+' of '+str(len(locs))\n \n li5 = ET.SubElement(ul, 'li')\n li5.text = 'Found: '+str(len(data['data']))\n \n li2 = ET.SubElement(ul, 'li')\n li2.text = 'Profile: '+data['profile']\n\n li3 = ET.SubElement(ul, 'li')\n li3.text = 'Label: '+data['label']\n \n li4 = ET.SubElement(ul, 'li')\n li4.text = 'Url: '+data['url']\n\n li6 = ET.SubElement(ul, 'li')\n li6.text = '-----------------'\n\n self.postBuildDbWorkerSig.emit(ET.tostring(ul))\n \n \n\n\n def scan_folders(self,loclabel,locprofile,locurl):\n data = {}\n if locprofile == 'Movies':\n data = self.get_movies(locurl)\n if locprofile == 'TV':\n data = self.get_tv(locurl)\n if locprofile == 'Races':\n data = self.get_races(locurl)\n return data\n \n \n \n def get_movies(self,locurl): \n data = {} \n\n movies = self.lib.getDirDirs(locurl)\n for movie in movies:\n data[movie] = {}\n data[movie]['hasDesc'] = self.lib.checkFile(os.path.join(locurl,movie, 'desc.txt')) \n data[movie]['hasSmallPic'] = self.lib.checkFile(os.path.join(locurl,movie, 'folder.jpg')) \n data[movie]['hasBigPic'] = self.lib.checkFile(os.path.join(locurl,movie, 'folder_big.jpg')) \n data[movie]['files'] = {}\n\n videofiles = self.lib.walkDirFilter(os.path.join(locurl,movie),self.exts)\n for file in videofiles:\n full_url = file[0]\n ext = file[1]\n filename = file[2]\n # this is costing scanning time\n meta = self.get_metadata(full_url)\n \n data[movie]['files'][full_url] ={}\n data[movie]['files'][full_url]['size'] = self.lib.getFileSize(full_url)\n data[movie]['files'][full_url]['date'] = self.lib.getFileDate(full_url)\n data[movie]['files'][full_url]['hasSubs'] = self.lib.checkFile(full_url.replace(ext,'.srt')) \n \n data[movie]['files'][full_url]['duration'] = self.get_duration_seconds(meta)\n data[movie]['files'][full_url]['width'] = self.get_metadata_entry(meta,'width')\n data[movie]['files'][full_url]['height'] = self.get_metadata_entry(meta,'height')\n\n return data\n \n\n\n def get_tv(self,locurl):\n data = {} \n \n # get shows\n shows = self.lib.getDirDirs(locurl)\n for show in shows:\n data[show] = {}\n data[show]['hasDesc'] = self.lib.checkFile(os.path.join(locurl,show, 'desc.txt')) \n data[show]['hasSmallPic'] = self.lib.checkFile(os.path.join(locurl,show, 'folder.jpg')) \n data[show]['hasBigPic'] = self.lib.checkFile(os.path.join(locurl,show, 'folder_big.jpg')) \n data[show]['seasons'] = {}\n\n # get seasons\n seasons = self.lib.getDirDirs(os.path.join(locurl,show))\n for season in seasons:\n data[show]['seasons'][season] = {}\n data[show]['seasons'][season]['hasDesc'] = self.lib.checkFile(os.path.join(locurl,show,season,'desc.txt')) \n data[show]['seasons'][season]['hasSmallPic'] = self.lib.checkFile(os.path.join(locurl,show,season,'folder.jpg')) \n data[show]['seasons'][season]['hasBigPic'] = self.lib.checkFile(os.path.join(locurl,show,season,'folder_big.jpg')) \n data[show]['seasons'][season]['files'] = {}\n \n # get files\n videofiles = self.lib.walkDirFilter(os.path.join(locurl,show,season),self.exts)\n for file in videofiles:\n full_url = file[0]\n ext = file[1]\n filename = file[2]\n # this is costing scanning time\n meta = self.get_metadata(full_url)\n \n data[show]['seasons'][season]['files'][full_url] ={}\n data[show]['seasons'][season]['files'][full_url]['size'] = self.lib.getFileSize(full_url)\n data[show]['seasons'][season]['files'][full_url]['date'] = self.lib.getFileDate(full_url)\n data[show]['seasons'][season]['files'][full_url]['hasSubs'] = self.lib.checkFile(full_url.replace(ext,'.srt')) \n \n data[show]['seasons'][season]['files'][full_url]['duration'] = self.get_duration_seconds(meta)\n data[show]['seasons'][season]['files'][full_url]['width'] = self.get_metadata_entry(meta,'width')\n data[show]['seasons'][season]['files'][full_url]['height'] = self.get_metadata_entry(meta,'height')\n\n return data\n\n\n\n def get_races(self,locurl):\n '''Lots of repeated code i know.\n This will be extended to add more things.\n - picture of circuit\n - diagram of circuit\n - winner, podium\n '''\n data = {} \n \n # get seasons\n seasons = self.lib.getDirDirs(locurl)\n for season in seasons:\n data[season] = {}\n data[season]['hasDesc'] = self.lib.checkFile(os.path.join(locurl,season, 'desc.txt')) \n data[season]['hasSmallPic'] = self.lib.checkFile(os.path.join(locurl,season, 'folder.jpg')) \n data[season]['hasBigPic'] = self.lib.checkFile(os.path.join(locurl,season, 'folder_big.jpg')) \n data[season]['races'] = {}\n\n # get races\n races = self.lib.getDirDirs(os.path.join(locurl,season))\n for race in races:\n data[season]['races'][race] = {}\n data[season]['races'][race]['hasDesc'] = self.lib.checkFile(os.path.join(locurl,season,race,'desc.txt')) \n data[season]['races'][race]['hasSmallPic'] = self.lib.checkFile(os.path.join(locurl,season,race,'folder.jpg')) \n data[season]['races'][race]['hasBigPic'] = self.lib.checkFile(os.path.join(locurl,season,race,'folder_big.jpg')) \n data[season]['races'][race]['files'] = {}\n data[season]['races'][race]['mysnaps'] = []\n \n # get mysnaps\n mysnaps = self.lib.walkDir(os.path.join(locurl,season,race,'mysnaps'))\n for snap in mysnaps:\n data[season]['races'][race]['mysnaps'].append(snap[2])\n\n # get files\n videofiles = self.lib.walkDirFilter(os.path.join(locurl,season,race),self.exts)\n for file in videofiles:\n full_url, ext, filename = file[0], file[1], file[2]\n\n # this is costing scanning time\n meta = self.get_metadata(full_url)\n \n data[season]['races'][race]['files'][full_url] ={}\n data[season]['races'][race]['files'][full_url]['size'] = self.lib.getFileSize(full_url)\n data[season]['races'][race]['files'][full_url]['date'] = self.lib.getFileDate(full_url)\n data[season]['races'][race]['files'][full_url]['hasSubs'] = self.lib.checkFile(full_url.replace(ext,'.srt')) \n \n data[season]['races'][race]['files'][full_url]['duration'] = self.get_duration_seconds(meta)\n data[season]['races'][race]['files'][full_url]['width'] = self.get_metadata_entry(meta,'width')\n data[season]['races'][race]['files'][full_url]['height'] = self.get_metadata_entry(meta,'height')\n\n return data\n\n\n\n def get_duration_seconds(self,meta):\n secs = 0\n duration = self.get_metadata_entry(meta,'duration')\n try:\n if duration.seconds != 0:\n secs = duration.seconds\n except Exception as e:\n print (\"Error getting duration \",e.args)\n return secs\n\n\n def get_metadata(self,fullurl):\n metadata = 'not'\n try:\n filename, realname = unicode(fullurl), fullurl\n parser = createParser(filename, realname)\n metadata = extractMetadata(parser)\n except Exception as e:\n print (\"Error getting metadata \",e.args)\n return metadata \n\n \n def get_metadata_entry(self,meta,field):\n entry = 'not'\n try:\n entry = meta.get(field)\n except Exception as e:\n print (\"Error get_metadata_entry \",e.args)\n return entry\n\n\n\n def savedb(self,data):\n cPickle.dump(data, open(self.dbpath, 'wb'),-1)\n \n a = json.dumps(data,indent=4)\n b = open(self.dbpath+'.json', 'w')\n b.write(a)\n b.close()\n\n\n\n\n\n\n\nclass buildDb(QtCore.QObject):\n postBuildDbSig = QtCore.Signal(str)\n startBuildDbSig = QtCore.Signal(str)\n endBuildDbSig = QtCore.Signal(str)\n\n @QtCore.Slot()\n def go(self):\n self.worker = buildDbWorker()\n self.worker.set_values(self.curdir,self.configxml,self.dbpath)\n self.worker.finished.connect(self.say_end)\n self.worker.started.connect(self.say_start)\n self.worker.postBuildDbWorkerSig.connect(self.post_html)\n self.worker.start()\n\n\n def set_values(self,curdir,configxml,dbpath):\n self.curdir = curdir\n self.configxml = configxml\n self.dbpath = dbpath\n \n\n def say_end(self):\n self.endBuildDbSig.emit('End scanning')\n\n\n def say_start(self):\n self.startBuildDbSig.emit('Started scanning')\n \n \n def post_html(self,html):\n self.postBuildDbSig.emit(html)\n\n \n \n \n \n \n \n \n \n \n "
},
{
"alpha_fraction": 0.5120678544044495,
"alphanum_fraction": 0.5120678544044495,
"avg_line_length": 28.960784912109375,
"blob_id": "bcf5975e34c48f947e853686f0a1ab351cd27e56",
"content_id": "914b7d22bb82b4841b0ca0fd9d474ab10ff7ac74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1533,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 51,
"path": "/lib/myconfig.py",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "\n\nclass MyConfig:\n\n\n def go(self,appPath,commonLib,templatesLib,theXml):\n self.commonLib = commonLib\n self.templatesLib = templatesLib\n \n print ('lib.MyConfig.go')\n \n \n # html = self.writeConfigTabs(theXml.find('configTabs'))\n# html = self.getVideoSettings(theXml.find('video_folders'))\n# return html\n \n\n# \n# def getVideoSettings(self,xml):\n# wrapper = ET.Element('div')\n# \n# it = xml.getiterator('folder')\n# for folder in it:\n# profileLabel = folder.attrib['profile']\n# profilePath = folder.text\n# n = str(it.index(folder))\n# \n# profile = ET.SubElement(wrapper, \"div\")\n#\n# label = ET.SubElement(profile, \"label\")\n# label.attrib['for'] = 'vid_'+n\n# label.text = profileLabel\n#\n# input = ET.SubElement(profile, \"input\")\n# input.attrib['id'] = 'vid_'+n\n# input.attrib['type'] = 'text'\n# input.attrib['value'] = profilePath\n# \n# button = ET.SubElement(profile, \"button\")\n# button.text = 'remove'\n#\n# return ET.tostring(wrapper) \n# \n# \n# \n# def writeConfigTabs(self,xml):\n# wrapper = ET.Element('div')\n# wrapper.attrib['id'] = 'config_tabs_wrapper'\n# \n# it = xml.getiterator('tab')\n# for tab in it:\n# profileLabel = tab.attrib['label']\n# profile = tab.attrib['profile']\n\n\n\n"
},
{
"alpha_fraction": 0.5484238862991333,
"alphanum_fraction": 0.5504359602928162,
"avg_line_length": 36.70379638671875,
"blob_id": "26a1ccd7b84ce55a293323fab672a40840fc3629",
"content_id": "4549eba00b24d8d29fb8aca202627d44164cc608",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14910,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 395,
"path": "/main.pyw",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nfrom xml.etree import ElementTree as ET\nimport threading\nimport sqlite3\nimport subprocess\nimport sys\nimport os\nimport time\n\nfrom PySide import QtCore\nfrom PySide import QtGui\nfrom PySide import QtWebKit\nfrom PySide import QtNetwork\nfrom PySide.phonon import Phonon\n\nimport lib.common\nimport lib.templates\nimport lib.build_db\nimport lib.query_db\nimport lib.launch_video\nimport lib.video_player\nimport lib.get_screens\n\n# pyside-uic videoplayerframe.ui -o gui.py\n# pyuic4 videoplayerframe.ui -o gui.py\n\nclass MainWindow(QtGui.QMainWindow):\n def __init__(self):\n QtGui.QMainWindow.__init__(self)\n self.lib = lib.common.common() \n self.templates = lib.templates.templates()\n \n self.curdir = self.getMainDir()\n self.configxml = self.lib.getXml(os.path.join(self.curdir, 'config.xml'))\n self.dbpath = os.path.join(self.curdir, 'res', 'data.db')\n \n self.resize(1200, 700)\n self.setWindowTitle('New Video Gallery')\n self.setWindowIcon(QtGui.QIcon(os.path.join(self.curdir, 'res','graphics', 'app_icon.png')))\n self.center()\n\n self.buildDB = lib.build_db.buildDb()\n self.buildDB.postBuildDbSig.connect(self.buildDB_update_status)\n self.buildDB.startBuildDbSig.connect(self.buildDB_start)\n self.buildDB.endBuildDbSig.connect(self.buildDB_end)\n\n self.queryDB = lib.query_db.queryDb()\n self.queryDB.set_values(self.curdir, self.configxml, self.dbpath)\n self.queryDB.postQueryDbSig.connect(self.queryDB_post_query)\n self.queryDB.startQueryDbSig.connect(self.queryDB_start)\n self.queryDB.endQueryDbSig.connect(self.queryDB_end)\n \n self.launch_video = lib.launch_video.launchVideo()\n self.launch_video.startHereSignal.connect(self.play_here)\n \n self.get_screens = lib.get_screens.getScreens()\n \n # create webkit and load start page\n self.theinit('not')\n\n \n def play_here(self, video_url):\n #remove goback div because it will be created\n # based on the tag body attributes\n goback = self.mainframe.findFirstElement('div#goback')\n goback.removeFromDocument()\n self.backhtml = self.mainframe.toHtml()\n \n self.web.close()\n\n vid = lib.video_player.thePlayer(self, video_url)\n back = self.findChild(QtGui.QPushButton, 'back')\n back.clicked.connect(self.goback)\n\n\n def goback(self):\n childs1 = self.findChild(Phonon.VideoPlayer, 'player')\n childs2 = self.findChild(QtGui.QFrame, 'framePlayControl')\n childs1.deleteLater()\n childs2.deleteLater()\n # create webkit and load the last html page loaded\n self.theinit(self.backhtml)\n \n\n def theinit(self, html):\n self.web = QtWebKit.QWebView(self)\n self.web.loadFinished.connect(self.web_load_end)\n\n self.web.setRenderHints(QtGui.QPainter.HighQualityAntialiasing | \n QtGui.QPainter.SmoothPixmapTransform | \n QtGui.QPainter.TextAntialiasing)\n\n self.websettings = self.web.settings()\n self.websettings.setAttribute(QtWebKit.QWebSettings.DeveloperExtrasEnabled, 7)\n #self.websettings.setAttribute(QtWebKit.QWebSettings.ZoomTextOnly,9)\n\n self.page = self.web.page()\n self.mainframe = self.page.mainFrame()\n self.mainframe.javaScriptWindowObjectCleared.connect(self.js_cleared)\n\n startpage = os.path.join(self.curdir, 'site', 'index.html')\n if html == 'not':\n self.web.load(QtCore.QUrl(startpage))\n else:\n self.web.setHtml(html)\n \n self.setCentralWidget(self.web)\n self.web.show()\n\n\n def buildDB_update_status(self, html):\n print('buildDB_update_status')\n results = self.mainframe.findFirstElement(\"div#results\")\n results.setStyleProperty('display','block')\n results.setPlainText('')\n results.appendInside(html)\n \n \n def buildDB_start(self, html):\n print('buildDB_start')\n results = self.mainframe.findFirstElement(\"div#results\")\n results.setStyleProperty('display','block')\n results.setPlainText('Starting scan...')\n \n def buildDB_end(self, html):\n print('buildDB_end')\n results = self.mainframe.findFirstElement(\"div#results\")\n results.setStyleProperty('display','block')\n span = ET.Element('span')\n span.text = 'Ending scan...'\n results.appendInside(ET.tostring(span))\n\n\n\n def queryDB_post_query(self, html):\n print('queryDB_post_query')\n self.web.setHtml(html)\n \n def queryDB_start(self, html):\n print('queryDB_start', html)\n self.web.setHtml(html)\n \n def queryDB_end(self, html):\n print('queryDB_end')\n\n\n\n def center(self):\n screen = QtGui.QDesktopWidget().screenGeometry()\n size = self.geometry()\n self.move((screen.width() - size.width()) / 2, (screen.height() - size.height()) / 2)\n \n\n def keyPressEvent(self, event):\n '''This method does not need to be instanciated\n because self is a MainWindow.\n MainWindow looks for a keyPressEvent method'''\n if event.key() == QtCore.Qt.Key_X:\n self.showFullScreen()\n if event.key() == QtCore.Qt.Key_C:\n self.showNormal()\n if event.key() == QtCore.Qt.Key_U:\n self.web.setZoomFactor(1.5)\n if event.key() == QtCore.Qt.Key_I:\n self.web.setZoomFactor(1)\n if event.key() == QtCore.Qt.Key_O:\n self.web.setZoomFactor(0.8)\n if event.key() == QtCore.Qt.Key_S:\n self.theinit('not')\n\n\n def web_load_end(self):\n print ('web_load_End')\n title = self.web.title()\n \n self.create_goback()\n self.apply_events_front()\n self.apply_events_afterfront()\n self.get_screens.set_values(self.mainframe)\n \n if title == 'Hall':\n self.get_starting_html()\n if title == 'Edit':\n self.buildDB.set_values(self.curdir, self.configxml, self.dbpath)\n self.mainframe.addToJavaScriptWindowObject(\"build_db\", self.buildDB)\n \n \n \n def create_goback(self):\n '''\n Create goback menu according the body tag attributtes.\n If the attributes indicate that we are in an afterfront page profile, \n a back link, which calls the front level of the profile is created.\n If we are in front level just create link to home.\n '''\n body = self.mainframe.findFirstElement('body')\n mode = body.hasAttribute('data-mode')\n\n if mode:\n goback = ET.Element('div', {'id':'goback'})\n leftdiv = ET.SubElement(goback, 'div', {'class':'left'})\n homelink = ET.SubElement(leftdiv, 'a')\n homelink.attrib['href'] = 'file://' + os.path.join(self.curdir, 'site', 'index.html')\n homelink.text = 'Hall'\n \n if body.attribute('data-mode') != 'front':\n back = ET.SubElement(leftdiv, 'span', {'id':'backspan'})\n back.attrib['data-mode'] = body.attribute('data-mode')\n back.attrib['data-profile'] = body.attribute('data-profile')\n back.attrib['data-label'] = body.attribute('data-label')\n back.attrib['data-url'] = body.attribute('data-url')\n back.text = body.attribute('data-label')\n\n body.appendInside(ET.tostring(goback))\n backspan = self.mainframe.findFirstElement('span#backspan')\n backspan.evaluateJavaScript('''\n this.addEventListener('mouseup', function(e) { \n var profile = this.getAttribute('data-profile');\n var label = this.getAttribute('data-label');\n var url = this.getAttribute('data-url');\n \n query_db.go('front',profile,label,url,'none','none');\n },false);\n ''')\n\n\n\n def apply_events_afterfront(self):\n body = self.mainframe.findFirstElement('body')\n mode = body.hasAttribute('data-mode')\n if mode:\n if body.attribute('data-mode') != 'front':\n \n # show hide files\n for span in body.findAll('span.spanShowEpisodes').toList():\n span.evaluateJavaScript('''\n this.addEventListener('mouseup', function(e) { \n var wrapper = this.parentNode.parentNode;\n var table = wrapper.getElementsByTagName('table')[0];\n var table_state = table.style.display;\n \n if (table_state == 'table'){table.style.display = 'none';}\n else{table.style.display = 'table';}\n },false);''')\n\n\n # show hide img storyboard\n for span_ in body.findAll('span.spanShowStoryboard').toList():\n span_.evaluateJavaScript('''\n this.addEventListener('mouseup', function(e) { \n var wrapper = this.parentNode.parentNode;\n var img = wrapper.getElementsByTagName('img')[0];\n var img_state = img.style.display;\n \n if (img_state == 'inline'){img.style.display = 'none';}\n else{img.style.display = 'inline';}\n },false);''')\n\n\n # launch videos\n for span2 in body.findAll('span.video_launcher').toList():\n span2.evaluateJavaScript('''\n this.addEventListener('mouseup', \n function(e) { \n var video_url = this.getAttribute('data-video_url');\n var app = this.getAttribute('data-app');\n\n launch_video.go(app,video_url);\n },false);''')\n \n \n \n \n \n \n # get screens apply events\n for file_screen in body.findAll('img.fileScreen').toList():\n file_screen.evaluateJavaScript('''\n this.addEventListener('mouseover', \n function(e) { \n var url = this.getAttribute('data-url');\n get_screens.gomouseover(url);\n }\n ,false);\n \n this.addEventListener(\"mouseout\", \n function(evt) { \n var url = this.getAttribute('data-url');\n get_screens.gomouseout(url);\n }, \n false);''')\n \n \n \n\n def apply_events_front(self):\n body = self.mainframe.findFirstElement('body')\n mode = body.attribute('data-mode')\n locprofile = body.attribute('data-profile')\n loclabel = body.attribute('data-label')\n locurl = body.attribute('data-url')\n\n if mode == 'front':\n for movie in body.findAll('div.movieWrapper').toList():\n movie.evaluateJavaScript('''\n this.addEventListener('mouseup', function(e) { \n var profile = document.body.getAttribute('data-profile');\n var label = document.body.getAttribute('data-label');\n var url = document.body.getAttribute('data-url');\n var movie = this.getAttribute('data-movie_label');\n \n query_db.go('afterfront',profile,label,url,movie,'none');\n },false);\n ''')\n\n\n\n def js_cleared(self):\n print ('Javascript objects cleared')\n self.mainframe.addToJavaScriptWindowObject(\"query_db\", self.queryDB)\n self.mainframe.addToJavaScriptWindowObject(\"launch_video\", self.launch_video)\n self.mainframe.addToJavaScriptWindowObject(\"get_screens\", self.get_screens)\n\n\n def get_starting_html(self):\n indexwrapper = self.mainframe.findFirstElement('div#indexWrapper')\n if indexwrapper:\n folders = self.get_starting_html_string()\n indexwrapper.appendInside(folders)\n \n for folder in indexwrapper.findAll('div.indexFolderWrapper').toList():\n folder.evaluateJavaScript('''\n this.addEventListener('mouseup', function(e) { \n var profile = this.getAttribute('data-profile');\n var label = this.getAttribute('data-label');\n var url = this.getAttribute('data-url');\n \n query_db.go('front',profile,label,url,'none','none');\n },false);\n ''')\n \n \n def get_starting_html_string(self):\n html = ''\n folders = self.configxml.find('video_folders')\n for folder in folders.getiterator('folder'):\n div = ET.Element('div', {'class':'indexFolderWrapper'})\n div.text = folder.attrib['label']\n div.attrib['data-profile'] = folder.attrib['profile']\n div.attrib['data-label'] = folder.attrib['label']\n div.attrib['data-url'] = folder.text\n \n html += ET.tostring(div, 'utf-8')\n return html\n \n\n \n def getMainDir(self):\n '''Get script or exe directory.'''\n if hasattr(sys, 'frozen'): #py2exe, cx_freeze\n app_path = os.path.dirname(sys.executable)\n print ('Executing exe', app_path)\n elif __file__: #source file \n app_path = os.path.abspath(os.path.dirname(__file__))\n print ('Executing source file', app_path)\n return app_path \n \n \n\n \n \n \nif __name__ == \"__main__\":\n '''Get script or exe directory.'''\n app_path = ''\n if hasattr(sys, 'frozen'): #py2exe, cx_freeze\n app_path = os.path.dirname(sys.executable)\n elif __file__: #source file\n app_path = os.path.dirname(__file__)\n\n app = QtGui.QApplication(sys.argv)\n app.setApplicationName(\"New video gallery\")\n \n splash_pix = QtGui.QPixmap(os.path.join(app_path, 'res', 'graphics', 'splash.png'))\n splash = QtGui.QSplashScreen(splash_pix)\n splash.setMask(splash_pix.mask())\n splash.show()\n\n time.sleep(3)\n\n main = MainWindow()\n splash.finish(main)\n main.show()\n\n sys.exit(app.exec_())\n\n\n\n\n \n\n\n\n\n"
},
{
"alpha_fraction": 0.5334800481796265,
"alphanum_fraction": 0.5359949469566345,
"avg_line_length": 32.9555549621582,
"blob_id": "38b6e33fdaeaf23eb43a1ebf4e0367c0e0651775",
"content_id": "966840a664fdbde6d0a7bbcdab8795595f805a17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3181,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 90,
"path": "/lib/get_screens.py",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "import os\nfrom PySide import QtCore\n\n\n\n\n\nclass getScreens(QtCore.QObject):\n\n def set_values(self,frame):\n self.frame = frame\n\n\n @QtCore.Slot(str)\n def gomouseover(self,url):\n self.calling_img = self.get_calling_img(url)\n self.screenslist = self.get_movie_screens(url) \n\n screensobj = QtCore.QObject()\n screensobj.setObjectName(\"screens\")\n\n self.intervalobj = QtCore.QObject()\n self.intervalobj.setObjectName(\"intervalobj\")\n\n self.frame.addToJavaScriptWindowObject(\"intervalobj\",self.intervalobj)\n self.frame.addToJavaScriptWindowObject(\"screens\",screensobj)\n\n if len(self.screenslist) != 0 and self.calling_img:\n print('found',len(self.screenslist),'screens')\n screensobj.setProperty(\"screenlist\",self.screenslist)\n self.calling_img.evaluateJavaScript('''\n var screens = screens.screenlist;\n var cur_screen = 0;\n var img_el = this;\n\n img_el.setAttribute('data-defaultsrc',img_el.getAttribute('src'));\n intervalobj = window.setInterval(\"cycle_screen()\",1000);\n \n function cycle_screen(){\n if (cur_screen == screens.length){\n cur_screen = 0;\n }\n img_el.setAttribute('src','file:///'+screens[cur_screen]);\n cur_screen++;\n }''')\n \n \n @QtCore.Slot(str)\n def gomouseout(self,url):\n # sometimes mouseout is called before any mouseover\n if self.calling_img:\n self.calling_img.evaluateJavaScript('''\n clearInterval(intervalobj);\n var img_el = this;\n img_el.setAttribute('src',img_el.getAttribute('data-defaultsrc'));''')\n \n \n \n def get_movie_screens(self,url):\n dirname, filename = os.path.split(url)\n filebasename = os.path.splitext(filename)[0]\n screensdir = os.path.join(dirname,'screens')\n screenslist = []\n \n if os.path.exists(screensdir):\n try:\n screens = os.listdir(screensdir)\n for screen in screens:\n screenurl = os.path.join(screensdir,screen)\n screenfilebasename,ext = os.path.splitext(screen)\n # excess chars used for numbering\n\n nchars = len(screenfilebasename.replace(filebasename,''))\n screenlabel = screenfilebasename[:-nchars]\n\n if screenlabel == filebasename:\n screenslist.append(screenurl)\n except Exception as e:\n print (\"Error getImgOvers.get_movie_screens : \",e)\n return screenslist\n \n \n \n def get_calling_img(self,url):\n calling_img = False\n imgs = self.frame.findAllElements(\"img.fileScreen\").toList()\n for img in imgs:\n if img.attribute('data-url') == url:\n calling_img = img\n return calling_img\n \n \n \n \n \n \n \n \n \n \n \n \n \n "
},
{
"alpha_fraction": 0.6146970987319946,
"alphanum_fraction": 0.6201589107513428,
"avg_line_length": 29.059701919555664,
"blob_id": "b805cc838907bd98fc4dd3ad78391d0014a7b215",
"content_id": "46be5ab135e9d44b38bf3db42f9769184f7acdba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2014,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 67,
"path": "/site/js/navletters.js",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "\nvar navletters = new Object();\nnavletters.wrapper = '';\nnavletters.all = [];\nnavletters.letters = [];\n\nnavletters.init = function(id,num){\n var wrapper = document.getElementById(id);\n var childs = wrapper.childNodes;\n navletters.wrapper = wrapper;\n\n for(var e = 0; e < childs.length; e++){\n var label = childs[e].getAttribute('data-movie_label');\n var firstletter = label.substr(0,1);\n navletters.all.push(childs[e]);\n \n if (navletters.letters.indexOf(firstletter) == -1){\n navletters.letters.push(firstletter);\n }\n }\n \n if (navletters.all.length > num){\n wrapper.style.marginTop = '85px';\n navletters.createButtons();\n }\n}\n\n\nnavletters.createButtons = function(){\n var buttonsdiv = document.createElement('div');\n buttonsdiv.id = 'buttonsdiv';\n \n var buttonall = document.createElement('button');\n buttonall.textContent = 'all';\n buttonall.addEventListener('mouseup',navletters.sort,false);\n buttonsdiv.appendChild(buttonall);\n \n for(var e = 0; e < navletters.letters.length; e++){\n var button = document.createElement('button');\n button.textContent = navletters.letters[e];\n button.addEventListener('mouseup',navletters.sort,false);\n buttonsdiv.appendChild(button);\n }\n document.body.insertBefore(buttonsdiv,navletters.wrapper);\n}\n\n\nnavletters.sort = function(evt){\n dom_deleteAllChildNodes(navletters.wrapper);\n var query = event.srcElement.textContent;\n\n if (query != 'all'){\n for(var e = 0; e < navletters.all.length; e++){\n var label = navletters.all[e].getAttribute('data-movie_label');\n var firstletter = label.substr(0,1);\n if (firstletter == query){\n navletters.wrapper.appendChild(navletters.all[e]);\n }\n }\n }else{\n for(var e = 0; e < navletters.all.length; e++){\n navletters.wrapper.appendChild(navletters.all[e]);\n }\n } \n}\n\n\n/* end */"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5482093691825867,
"avg_line_length": 16.731706619262695,
"blob_id": "a2d8e5cc0d40add340b6f5c4f711ee52adf4dbf8",
"content_id": "c33ddabfaf696044e87d7994f899b17a041adbc0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 726,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 41,
"path": "/readme.txt",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "Video Gallery is an application to play video files.\n\nIt is built with Pyside and webkit.\n\nThis is very alpha so use with care.\n\nProfiles must obey the following folder structure:\n\nSeries folder:\n Andromeda\n folder.png - 200*100\n folder_big.png - \n s1\n 1.avi\n 2.avi\n \n\nMovies folder:\n Avatar\n folder.png - 200*100\n folder_big.png - \n movie.avi\n\n\nDocs folder:\n BBC - Planet Earth\n folder.png - 200*100\n folder_big.png - \n doc.avi\n\n\nSports folder:\n F1\n 2009\n folder.png - 200*100\n 04 - F1 - Bahrain GP\n race.avi\n\n\nBUGS:\n Subtiles hide part of the video image."
},
{
"alpha_fraction": 0.5083729028701782,
"alphanum_fraction": 0.5136215686798096,
"avg_line_length": 28.40441131591797,
"blob_id": "4ec863abae14c7c72d750350063cba8acba1f7e2",
"content_id": "7cc65ce5fe032ad0375d5ab5ca82418a6f338c7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4001,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 136,
"path": "/lib/common.py",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "from xml.etree import ElementTree as ET\nimport os\nimport base64\nimport pprint\n\nclass common:\n\n def getShortString(self,nC,theStr):\n strOut = ''\n if len(theStr) <= nC:\n strOut = theStr\n else:\n for c in range(0,nC):\n strOut += theStr[c]\n strOut += '...'\n return strOut\n\n\n def getXml(self,filename):\n try:\n tree = ET.parse(filename)\n print (\"Sucess parsing xml file: \",filename)\n return tree\n except Exception as e:\n print (\"Error could not get xml file: \", e)\n \n \n def deleteFile(self,full_url):\n try:\n print ('Lib.common.deleteFile')\n if os.path.exists(full_url):\n os.remove(full_url)\n except Exception as e:\n print (\"Error Lib.common.deleteFile : \", e)\n \n\n def get_locid(self,data):\n a = base64.b64encode(data)\n return a\n\n\n def walkDir(self,dir):\n items = []\n try:\n for root,dirs,files in os.walk(unicode(dir)):\n for file in files:\n full_url = os.path.join(dir,root,file)\n url, ext = os.path.splitext(full_url)\n items.append([full_url,ext,file])\n except Exception as e:\n print (\"Error Lib.common.walkDir : \", e)\n return items\n \n\n def print_nice(self,data):\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(data)\n \n \n def walkDirFilter(self,dir,exts):\n items = []\n try:\n for root,dirs,files in os.walk(unicode(dir)):\n for file in files:\n full_url = os.path.join(dir,root,file)\n url, ext = os.path.splitext(full_url)\n if ext in exts:\n items.append([full_url,ext,file])\n except Exception as e:\n print (\"Error Lib.common.walkDirFilter : \", e)\n return items\n \n \n def getFileDate(self,full_url):\n fileDate = 'not'\n try:\n fileDate = str(os.path.getmtime(full_url))\n except Exception as e:\n print (\"Error Lib.common.getFileDate: \",e)\n return fileDate\n \n\n def getFileSize(self,full_url):\n fileSize = 0\n try:\n fileSize = str(os.path.getsize(full_url) /1024/1024)\n except Exception as e:\n print (\"Error Lib.common.getFileSizeNew: \",e)\n return fileSize \n \n\n def getElementById(self,xml,id):\n '''Iterate through all elements in xml\n to find the one with the specified id'''\n theel = 'not'\n for el in xml.getiterator():\n atrbs = el.items() # returns a list of pairs\n for pair in atrbs:\n if pair[0] == 'id' and pair[1] == id:\n theel = el\n return theel \n \n \n def writeXMLFile(self,savepath,tagXmlData):\n try:\n #You need to wrap the root element in an Elementtree object, before you can write it:\n ET.ElementTree(tagXmlData).write(savepath, encoding='utf-8')\n except Exception as e:\n print (\"Error Lib.common.writeXMLFile. \",e) \n\n \n def getDirDirs(self,dir):\n dirs = []\n try:\n for item in os.listdir(unicode(dir)):\n fullpath = os.path.join(dir,item)\n if os.path.isdir(fullpath):\n dirs.append(item)\n except Exception as e:\n print (\"Error Lib.common.getDirDirs : \",e.args[0]) \n \n dirs.sort(cmp=None, key=None, reverse=False)\n return dirs \n \n \n def checkFile(self,url):\n r = 'no'\n if os.path.exists(url):\n r = 'yes'\n return r \n \n\n def prettyPrintET(self,element):\n from xml.dom.minidom import parseString\n txt = ET.tostring(element)\n print (parseString(txt).toprettyxml()) \n\n"
},
{
"alpha_fraction": 0.5468900799751282,
"alphanum_fraction": 0.550918459892273,
"avg_line_length": 34.44827651977539,
"blob_id": "c4dec5d3f5298ae582abf10e77635421eb95b08b",
"content_id": "c579c1d34bb9b10bd40b3b5ef86d94c03f592d5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6206,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 174,
"path": "/tools/create_storyboard.py",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "from xml.etree import ElementTree as ET\nimport Image\nimport ImageOps\nimport sys\nimport os\nimport time\nimport random\nimport itertools\n\n\n'''storyboard = folder_big.jpg\ncreate storyboard in folder of video file\n'''\n\nclass Main:\n def __init__(self):\n print('----------------------------------')\n print('Create storyboard start')\n self.configxml = self.get_config_xml()\n self.curdir = os.path.abspath(os.path.dirname(__file__))\n \n self.delete_existing_folderbig = self.configxml.find('delete_existing_folderbig').text\n self.columns = int(self.configxml.find('folderbigcolumns').text)\n self.rows = int(self.configxml.find('folderbigrows').text)\n self.vsep = int(self.configxml.find('folderbigvsep').text)\n self.hsep = int(self.configxml.find('folderbighsep').text)\n self.imagewidth = int(self.configxml.find('folderbigthumbwidth').text)\n self.imageheight = int(self.configxml.find('folderbigthumbheight').text)\n \n self.totalfolderbig = 0\n self.dirswithoutfolderbig = []\n begin_time = time.time()\n\n # go !!!\n profiles = self.get_dirs()\n #profiles = ['/media/vault_big/video/TESTING']\n exts = self.configxml.find('video_exts').text.split(',')\n for profile in profiles:\n self.run(profile,exts)\n \n \n print('-------------------------------------')\n for item in self.dirswithoutfolderbig:\n print('no folder big',item)\n total_time = time.strftime(\"%H:%M:%S\", \n time.gmtime(time.time() - begin_time))\n print('time taken',total_time)\n print('total folder big',self.totalfolderbig)\n print('the end')\n print('-------------------------------------')\n \n \n \n def run(self,profile,exts):\n allfiles = self.get_all_video_files(profile, exts)\n for uniqueurl in self.get_unique_urls(allfiles):\n folderbigpath = os.path.join(uniqueurl,'folder_big.jpg')\n screensdir = os.path.join(uniqueurl,'screens')\n hasscreensdir = os.path.exists(screensdir)\n hasfolderbig = os.path.exists(folderbigpath)\n \n if hasscreensdir:\n if not hasfolderbig or self.delete_existing_folderbig == 'yes':\n screens = os.listdir(screensdir)\n self.create_big_image(screensdir,screens,folderbigpath)\n if not hasscreensdir:\n self.dirswithoutfolderbig.append(uniqueurl)\n \n \n \n def create_big_image(self,screensdir,screens,folderbigpath):\n # http://stackoverflow.com/questions/3907443/python-pil-missing-images\n COLUMNS=self.columns\n ROWS=self.rows\n VSEP=self.vsep\n HSEP=self.hsep\n IMAGE_SIZE=(self.imagewidth,self.imageheight)\n NUMBERTHUMBS = ROWS * COLUMNS \n \n image=Image.new(\"RGB\",\n ((IMAGE_SIZE[0]+HSEP)*COLUMNS+HSEP,\n (IMAGE_SIZE[1]+VSEP)*ROWS+VSEP),\n (0,0,0))\n\n i = 0\n pilscreens = self.get_pilscreens(screens,screensdir,NUMBERTHUMBS,IMAGE_SIZE)\n for row,column in itertools.product(range(ROWS),range(COLUMNS)):\n #print(row,column)\n paste_x = HSEP+column*(IMAGE_SIZE[0]+HSEP)\n paste_y = VSEP+row*(IMAGE_SIZE[1]+VSEP)\n image.paste(pilscreens[i],(paste_x,paste_y))\n i += 1\n \n try:\n image.save(folderbigpath, \"JPEG\")\n self.totalfolderbig += 1\n print ('Saving image',folderbigpath)\n except Exception as e:\n print ('Error saving image: ',e) \n \n \n \n def get_pilscreens(self,screens,screensdir,number_thumbs,image_size):\n method = Image.ANTIALIAS\n bleed = 0\n centering = (0.5,0.5)\n \n defaulthumbpath = self.get_defaulthumbpath()\n defaulthumb = ImageOps.fit(Image.open(defaulthumbpath),image_size,method,bleed,centering)\n pilscreens = [defaulthumb] * number_thumbs\n \n #random.shuffle(screens)\n i = 0\n for screen in screens:\n if i < number_thumbs:\n im = Image.open(os.path.join(screensdir,screen))\n thumb = ImageOps.fit(im,image_size,method,bleed,centering)\n pilscreens[i] = thumb\n i += 1\n return pilscreens\n\n\n\n def get_all_video_files(self, profile, exts):\n allfiles = []\n try:\n for root, dirs, files in os.walk(unicode(profile)):\n for fn in files:\n fullurl = os.path.join(root, fn)\n filebasename, ext = os.path.splitext(fn)\n if ext.lower() in exts:\n allfiles.append(fullurl)\n except Exception as e:\n print (\"Error get_all_video_files. \",e.args)\n print('Profile is : ',profile)\n return allfiles\n\n \n def get_unique_urls(self,allfiles):\n alldirs = []\n for file in allfiles:\n dirname = os.path.dirname(file)\n if dirname not in alldirs:\n alldirs.append(dirname)\n return alldirs \n \n \n def get_config_xml(self):\n script_path = os.path.abspath(os.path.dirname(__file__))\n parent = os.path.normpath(os.path.join(script_path, '..'))\n tree = ET.parse(os.path.join(parent, 'config.xml'))\n return tree\n\n\n def get_defaulthumbpath(self):\n script_path = os.path.abspath(os.path.dirname(__file__))\n parent = os.path.normpath(os.path.join(script_path, '..'))\n defaulthumbpath = os.path.join(parent,'res','graphics','default_thumb_folderbig.png')\n return defaulthumbpath\n \n \n def get_dirs(self):\n folders = []\n video_folders = self.configxml.find('video_folders')\n for folder in video_folders.getiterator('folder'):\n folders.append(folder.text)\n return folders\n \n \n\n\n \nif __name__ == '__main__':\n Main()\n \n \n\n\n \n \n"
},
{
"alpha_fraction": 0.6421325206756592,
"alphanum_fraction": 0.6462335586547852,
"avg_line_length": 26.20121955871582,
"blob_id": "082741f8b13667efba1644ca760048c93d919289",
"content_id": "4f60b8ad9245bf8024628e6726f2886f085abb3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4633,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 164,
"path": "/site/js/dom_helper.js",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "/*xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx*/\r\n\r\nfunction dom_deleteAllChildNodes(holder){\r\n\r\n while(holder.hasChildNodes()){\r\n holder.removeChild(holder.lastChild);\r\n }\r\n\r\n}\r\n/*xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx*/\r\n\r\nfunction dom_HideElementsOfClass(targetClass){\r\n var s = dom_getElementsOfClass(targetClass);\r\n for(var e = 0; e < s.length; e++){\r\n s[e].style.display = 'none';\r\n }\r\n}\r\n/*xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx*/\r\n\r\nfunction dom_showHideEl(el){\r\n if (el.style.display != 'block'){\r\n el.style.display = 'block';\r\n }else{\r\n el.style.display = 'none';\r\n }\r\n}\r\n\r\n/*xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx*/\r\n\r\nfunction dom_containerGetElementsOfClass(container,targetClass){\r\n\r\n var matchingEls = [];\r\n var els = container.getElementsByTagName('*');\r\n for(var e = 0; e < els.length; e++){\r\n \r\n if (els[e].className == targetClass){\r\n matchingEls.push(els[e]);\r\n } \r\n \r\n }\r\n return matchingEls;\r\n}\r\n\r\n/*xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx*/\r\n\r\nfunction dom_getElementsOfClass(targetClass){\r\n\r\n var matchingEls = [];\r\n var els = document.getElementsByTagName('*');\r\n for(var e = 0; e < els.length; e++){\r\n \r\n if (els[e].className == targetClass){\r\n matchingEls.push(els[e]);\r\n } \r\n }\r\n return matchingEls;\r\n}\r\n\r\n/*xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx*/\r\n\r\nfunction dom_addPageHeader() {\r\n var headerDiv = document.createElement(\"div\");\r\n headerDiv.setAttribute('id', 'pageHeader');\r\n // insert as body first node\r\n document.body.insertBefore(headerDiv, document.body.childNodes[0]);\r\n}\r\n\r\n/*xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx*/\r\n\r\nfunction dom_addDocumentTitleDiv() {\r\n var docDiv = document.createElement(\"div\");\r\n docDiv.setAttribute('id', 'docDiv');\r\n \r\n var docTitle = document.title;\r\n var docText = document.createTextNode(docTitle);\r\n docDiv.appendChild(docText);\r\n \r\n document.body.appendChild(docDiv);\r\n}\r\n\r\n/*xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx*/\r\n\r\nfunction dom_onDomReady(function_name){\r\n\r\n var hasRun = \"no\";\r\n\r\n /* for Mozilla/Opera9 */\r\n if (document.addEventListener) {\r\n document.addEventListener(\"DOMContentLoaded\", function_name, false);\r\n hasRun = \"yes\";\r\n }\r\n\r\n /* for other browsers */\r\n if (hasRun == \"no\"){\r\n window.onload = function_name;\r\n }\r\n \r\n}\r\n\r\n/*xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx*/\r\n\r\nfunction dom_addEventListener(target, eventType, eventFunction){ \r\n if(target.addEventListener){\r\n target.addEventListener(eventType,eventFunction,false);\r\n }else if(target.attachEvent){\r\n target.attachEvent(\"on\"+eventType,eventFunction);\r\n }else{\r\n alert(\"Could not attach event\");\r\n } \r\n}\r\n\r\n/*xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx*/\r\n\r\nfunction dom_getEventObj(event){ \r\n\r\n var theObj = 'not';\r\n\r\n if(event.target){var theObj = event.target;}\r\n if(event.srcElement){var theObj = event.srcElement;} \r\n \r\n return theObj;\r\n}\r\n\r\n/*xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx*/\r\n\r\nvar print = new Object();\r\n\r\nprint.init = function(){\r\n var hasprintdiv = document.getElementById('print');\r\n var text = '';\r\n \r\n for (var i=0; i<arguments.length; i++) {\r\n text += ' , '+arguments[i];\r\n }\r\n\r\n if (hasprintdiv){\r\n var p = document.createElement('p');\r\n var text = document.createTextNode(text) ;\r\n p.appendChild(text); \r\n hasprintdiv.appendChild(p);\r\n }\r\n \r\n if (!hasprintdiv){\r\n var div = document.createElement('div');\r\n div.setAttribute('id','print'); \r\n document.body.appendChild(div);\r\n\r\n var hasprintdiv = document.getElementById('print');\r\n hasprintdiv.style.background = '#333';\r\n hasprintdiv.style.position = 'absolute';\r\n hasprintdiv.style.bottom = '10px';\r\n hasprintdiv.style.right = '10px';\r\n hasprintdiv.style.height = '150px';\r\n hasprintdiv.style.width = '400px';\r\n hasprintdiv.style.overflow = 'scroll';\r\n \r\n var p = document.createElement('p');\r\n var text = document.createTextNode(text) ;\r\n p.appendChild(text); \r\n hasprintdiv.appendChild(p);\r\n }\r\n\r\n}\r\n/*xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx*/\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6206896305084229,
"alphanum_fraction": 0.6225535869598389,
"avg_line_length": 22.711111068725586,
"blob_id": "42fe17cb71fce7b5c33a8ca6748f588b35b91991",
"content_id": "ab979c549aed78d468e8ef8df579c667279e36ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1073,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 45,
"path": "/lib/templates.py",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "\n\nclass templates:\n\n def MainIndex(self,curdir,title):\n string = '''<!DOCTYPE html>\n<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/xml; charset=utf-8\" />\n<link href=\"file://'''+curdir+'''/style.css\" rel=\"stylesheet\" type=\"text/css\" />\n<tagToBeReplacedBecauseOfFuckingHtmlScriptTags shit=\"true\" />\n<title>'''+title+'''</title>\n</head>\n<body>\n\n<div id=\"goback\">\n <a href=\".buildDatabase\" class=\"floatRight\">refresh</a>\n <a href=\".showFrontPage\">Home</a>\n</div>\n\n\n<div id=\"indexWrapper\"></div>\n\n<div id=\"results\">not</div>\n\n</body></html>'''\n\n return string \n\n\n def Page(self,curdir,title,id):\n string = '''<!DOCTYPE html>\n<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/xml; charset=utf-8\" />\n<link href=\"file://'''+curdir+'''/style.css\" rel=\"stylesheet\" type=\"text/css\" />\n<tagToBeReplacedBecauseOfFuckingHtmlScriptTags shit=\"true\" />\n<title>'''+title+'''</title>\n</head>\n<body>\n<div id=\"goback\">\n <a href=\".showFrontPage\">Home</a>\n</div>\n<div id=\"'''+id+'''\"></div>\n</body></html>'''\n\n return string\n "
},
{
"alpha_fraction": 0.48148149251937866,
"alphanum_fraction": 0.504273533821106,
"avg_line_length": 27.278688430786133,
"blob_id": "8e9607a7632d93817687eb138b3ada6d0145656c",
"content_id": "f09a1ee9dab0209cf420ddbc7a6e3e7b84038b3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1755,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 61,
"path": "/lib/parse_subs.py",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "\n\n\n\nclass srtEntry(object):\n def __init__(self, entry):\n # ['3\\n', '00:01:14,211 --> 00:01:16,911\\n', 'text\\n']\n utf8bom = '\\xef\\xbb\\xbf'\n\n self.index = int(entry[0].lstrip(utf8bom))\n start, arrow, end = entry[1].split()\n self.start = self.parsetime(start)\n self.end = self.parsetime(end)\n self.lines = entry[2:]\n\n\n def parsetime(self,timestr):\n timestr = timestr.replace(',',':')\n hours, minutes, seconds, miliseconds = timestr.split(':')\n \n hours = int(hours)\n minutes = int(minutes)\n seconds = int(seconds)\n miliseconds = int(miliseconds)\n \n milis = (hours*3600000)+(minutes*60000)+(seconds*1000)+miliseconds\n return milis\n \n \n \nclass getSubtitles():\n def __init__(self,subfile):\n self.subfile = subfile\n self.entries = self.go()\n \n \n def go(self):\n newlines = ['\\n','\\r\\n','\\r']\n entry = []\n entries = []\n thefile = open(self.subfile,'r')\n\n for line in thefile:\n if line in newlines:\n try:\n entries.append(srtEntry(entry))\n except Exception as e:\n print('Could not get srt entry',e)\n entry = []\n else:\n entry.append(line)\n \n thefile.close()\n return entries\n \n \n# test \n#entries = getSubtitles('/home/antonio/Dev/python/debulha/my_debulha/Subtitles/samplesubs/Alice in Wonderland.srt')\n#for item in entries.entries:\n# print('******************************')\n# print(item)\n# print(item.index)\n# print(item.start)\n# print(item.end)\n# print(item.lines) \n \n \n \n \n "
},
{
"alpha_fraction": 0.5143790245056152,
"alphanum_fraction": 0.5184873342514038,
"avg_line_length": 32.74021530151367,
"blob_id": "997f22375c15d53d54479d240c4574639e0ec6b5",
"content_id": "5579cdd1b6592b41aa6235213f7fae74f2183352",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9493,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 281,
"path": "/tools/video_thumbnailer.py",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "from xml.etree import ElementTree as ET\nimport Image\nimport ImageOps\nimport subprocess\nimport datetime\nimport sys\nimport os\nimport glob\nimport time\nfrom hachoir_parser import createParser\nfrom hachoir_metadata import extractMetadata\n\n'''Find video files in dirs.\nFor each video.\n Divide video total time by number of shots.\n Create screens dir where video file is found.\n Create screens jpgs in screens dir for each video. \n Resize screens.\n \nTodo : \n When a video file is renamed , \n new screens have to created.\n The old screens will need to be erased.\n This should happen on a normal run.\n'''\n\n\nclass Main:\n def __init__(self):\n print('----------------------------------')\n print('Video Thumbnailer start')\n \n self.configxml = self.get_config_xml()\n self.delete_existing_screens = self.configxml.find('delete_existing_screens').text\n self.nshots = int(self.configxml.find('nshots').text)\n self.screendims = self.configxml.find('screendims').text.split(',')\n self.totalvideos = 0\n self.fileswithzerotime = []\n begin_time = time.time()\n\n # go !!!\n profiles = self.get_dirs()\n exts = self.configxml.find('video_exts').text.split(',')\n for profile in profiles:\n self.run(profile,exts)\n \n print('-------------------------------------')\n for item in self.fileswithzerotime:\n print('0 duration',item)\n total_time = time.strftime(\"%H:%M:%S\", \n time.gmtime(time.time() - begin_time))\n print('time taken',total_time)\n print('total videos',self.totalvideos)\n print('the end')\n print('-------------------------------------')\n\n\n\n\n\n def run(self,profile,exts):\n allfiles = self.get_all_video_files(profile, exts)\n\n # delete_existing_screens \n if self.delete_existing_screens == 'yes':\n for uniqueurl in self.get_unique_urls(allfiles):\n self.delete_screens(os.path.join(uniqueurl,'screens'))\n\n # make screens \n for fullurl in allfiles:\n hasscreens = self.check_if_has_screens(fullurl)\n\n if hasscreens != 'yes':\n dirname, filename = os.path.split(fullurl)\n filebasename,ext = os.path.splitext(filename)\n screensdir = os.path.join(dirname,'screens')\n \n # get metadata\n meta = self.get_metadata(fullurl)\n duration = self.get_start_time(meta)\n\n # if metadata is good\n if duration.seconds != 0:\n self.make_screen_dir(screensdir)\n snapintervalsecs = duration.seconds / self.nshots\n startime = 0\n \n # ffmpeg will be started self.nshots times \n # for each video file.\n # Each time ffmpeg is started the ss flag is incremented.\n # This ss flag is the division between self.nshots\n # and the length of the file \n for i in range(self.nshots):\n cmd = self.get_ffmpeg_cmd(fullurl,screensdir,filebasename,startime,i)\n self.run_cmd(cmd)\n \n # increment start time for the ss flag of ffmpeg\n startime += snapintervalsecs\n \n # resize screens just created\n self.resize_screens(screensdir,filebasename)\n \n if duration.seconds == 0:\n self.fileswithzerotime.append(fullurl)\n self.totalvideos += 1\n\n \n \n def get_ffmpeg_cmd(self,fullurl,screensdir,filebasename,startime,i):\n #ffmpeg -i input.dv -r 25 -ss 00:00:10 -t 00:00:05 -f image2 images%05d.png\n\n cmd = ['ffmpeg']\n\n cmd.append('-ss')\n cmd.append(str(startime))\n\n cmd.append('-i')\n cmd.append(fullurl)\n\n cmd.append('-vframes')\n cmd.append('1')\n\n cmd.append('-f')\n cmd.append('image2')\n\n picspath = os.path.join(screensdir,filebasename+' - '+str(i)+'.jpg')\n cmd.append(picspath)\n\n return cmd\n \n \n \n def resize_screens(self,screensdir,filebasename):\n '''Find screens of video file and resize them.'''\n if os.path.exists(screensdir):\n try:\n screens = os.listdir(screensdir)\n for screen in screens:\n screenpath = os.path.join(screensdir,screen)\n screenfilebasename = os.path.splitext(screen)[0]\n nchars = len(screenfilebasename.replace(filebasename,''))\n screenlabel = screenfilebasename[:-nchars]\n \n if screenlabel == filebasename:\n im = Image.open(screenpath) \n imsizeor = [im.size[0],im.size[1]]\n dims = [int(self.screendims[0]),int(self.screendims[1])]\n\n method = Image.ANTIALIAS\n bleed = 0\n centering = (0.5,0.5)\n e = ImageOps.fit(im,dims,method,bleed,centering)\n e.save(screenpath)\n print('resizing screen : ',screenpath)\n except Exception as e:\n print (\"Error resize_screens: \",e)\n \n\n \n def check_if_has_screens(self,fullurl):\n dirname, filename = os.path.split(fullurl)\n filebasename = os.path.splitext(filename)[0]\n screensdir = os.path.join(dirname,'screens')\n has_screens = 'no'\n \n if os.path.exists(screensdir):\n try:\n screens = os.listdir(screensdir)\n for screen in screens:\n screenfilebasename = os.path.splitext(screen)[0]\n nchars = len(screenfilebasename.replace(filebasename,''))\n screenlabel = screenfilebasename[:-nchars]\n \n if screenlabel == filebasename:\n has_screens = 'yes'\n except Exception as e:\n print (\"Error check_if_has_screens: \",e)\n return has_screens\n \n \n \n def get_all_video_files(self, profile, exts):\n allfiles = []\n try:\n for root, dirs, files in os.walk(unicode(profile)):\n for fn in files:\n fullurl = os.path.join(root, fn)\n filebasename, ext = os.path.splitext(fn)\n if ext.lower() in exts:\n allfiles.append(fullurl)\n except Exception as e:\n print (\"Error get_all_video_files. \",e.args)\n print('Profile is : ',profile)\n return allfiles\n \n \n def run_cmd(self,cmd):\n try:\n retval = subprocess.Popen(cmd,stdout=subprocess.PIPE)\n stdout_value = retval.communicate()[0]\n except Exception as e:\n print (\"Error executing. \",e.args)\n \n \n def get_start_time(self,meta):\n '''catch all possible errors and\n provide a default value'''\n duration = datetime.timedelta(0)\n if meta != 'not' and meta != None and meta.has('duration') != False:\n duration = meta.get('duration')\n return duration\n \n \n def make_screen_dir(self,dirname):\n if not os.path.exists(dirname):\n try:\n os.mkdir(dirname) \n except Exception as e:\n print (\"Error make_screen_dir: \",e)\n \n \n def get_unique_urls(self,allfiles):\n alldirs = []\n for file in allfiles:\n dirname = os.path.dirname(file)\n if dirname not in alldirs:\n alldirs.append(dirname)\n return alldirs\n \n \n def delete_screens(self,dirname):\n if os.path.exists(dirname):\n try:\n screens = os.listdir(dirname)\n for screen in screens:\n screenpath = os.path.join(dirname,screen)\n os.remove(screenpath)\n print('Deleting screen',screenpath)\n \n os.rmdir(dirname)\n print('Deleting dir',dirname)\n except Exception as e:\n print (\"Error delete_screens: \",e)\n\n \n def get_metadata(self,fullurl):\n metadata = 'not'\n try:\n filename, realname = unicode(fullurl), fullurl\n parser = createParser(filename, realname)\n metadata = extractMetadata(parser)\n except Exception as e:\n print('fullurl : ',fullurl)\n print (\"Error getting metadata \",e.args)\n return metadata \n \n \n\n\n\n\n\n def get_config_xml(self):\n script_path = os.path.abspath(os.path.dirname(__file__))\n parent = os.path.normpath(os.path.join(script_path, '..'))\n tree = ET.parse(os.path.join(parent, 'config.xml'))\n return tree\n\n\n def get_dirs(self):\n folders = []\n video_folders = self.configxml.find('video_folders')\n for folder in video_folders.getiterator('folder'):\n folders.append(folder.text)\n return folders\n\n\n\n \nif __name__ == '__main__':\n Main()\n \n \n\n\n"
},
{
"alpha_fraction": 0.5298631191253662,
"alphanum_fraction": 0.5325590968132019,
"avg_line_length": 30.44444465637207,
"blob_id": "fc91960bc2aa4a6b305916c7f99ee423f0f798e6",
"content_id": "0fca41710752fa6c12293fc69805708be64a2ac8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4822,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 153,
"path": "/tools/check_missing.py",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "from xml.etree import ElementTree as ET\nimport Image\nimport ImageOps\nimport subprocess\nimport datetime\nimport sys\nimport os\nimport glob\nimport time\n\n\n\n\nclass Main:\n def __init__(self):\n self.configxml = self.get_config_xml()\n\n # go !!!\n profiles = self.get_dirs()\n exts = self.configxml.find('video_exts').text.split(',')\n for profile in profiles:\n #self.get_videos_without_screens(profile,exts)\n self.get_screens_without_videos(profile,exts)\n \n \n \n def get_screens_without_videos(self,profile,exts):\n allfiles = self.get_all_video_files(profile, exts)\n\n screendirs = set()\n for fullurl in allfiles:\n dirname, filename = os.path.split(fullurl)\n screendirs.add(dirname)\n \n for item in screendirs:\n self.check_orphan_screens(os.path.join(item,'screens'))\n \n \n \n def get_videos_without_screens(self,profile,exts):\n allfiles = self.get_all_video_files(profile, exts)\n\n for fullurl in allfiles:\n hasscreens = self.check_if_has_screens(fullurl)\n if hasscreens == 'no':\n print(fullurl)\n\n\n\n def check_orphan_screens(self,screensdir):\n '''This is not perfect :\n If the name of video is changed but the screen still contains\n the name this will not catch it : \n \n Example : \n\n Star Wars The Clone Wars - S03E07 - Assassin.avi\n renamed to :\n Star Wars The Clone Wars - S03E07 - Assassin - Who.avi\n '''\n \n if os.path.exists(screensdir):\n try:\n screens = []\n for screen in os.listdir(screensdir):\n screens.append(os.path.splitext(screen)[0])\n screenfilebasename = os.path.splitext(screen)[0]\n\n screen_has_video = 'no'\n\n upone = os.path.normpath(os.path.join(screensdir, '..'))\n for videofile in os.listdir(upone):\n filebasename = os.path.splitext(videofile)[0]\n nchars = len(screenfilebasename.replace(filebasename,''))\n screenlabel = screenfilebasename[:-nchars]\n \n if screenlabel == filebasename:\n screen_has_video = 'yes'\n if screen_has_video == 'no':\n print(screensdir,screen)\n self.delete_file(os.path.join(screensdir,screen))\n except Exception as e:\n print (\"Error check_if_has_screens: \",e)\n\n \n\n def get_all_video_files(self, profile, exts):\n allfiles = []\n try:\n for root, dirs, files in os.walk(unicode(profile)):\n for fn in files:\n fullurl = os.path.join(root, fn)\n filebasename, ext = os.path.splitext(fn)\n if ext.lower() in exts:\n allfiles.append(fullurl)\n except Exception as e:\n print (\"Error get_all_video_files. \",e.args)\n print('Profile is : ',profile)\n return allfiles\n\n\n\n def check_if_has_screens(self,fullurl):\n dirname, filename = os.path.split(fullurl)\n filebasename = os.path.splitext(filename)[0]\n screensdir = os.path.join(dirname,'screens')\n has_screens = 'no'\n \n if os.path.exists(screensdir):\n try:\n screens = os.listdir(screensdir)\n for screen in screens:\n screenfilebasename = os.path.splitext(screen)[0]\n nchars = len(screenfilebasename.replace(filebasename,''))\n screenlabel = screenfilebasename[:-nchars]\n \n if screenlabel == filebasename:\n has_screens = 'yes'\n except Exception as e:\n print (\"Error check_if_has_screens: \",e)\n return has_screens\n\n\n\n def get_dirs(self):\n folders = []\n video_folders = self.configxml.find('video_folders')\n for folder in video_folders.getiterator('folder'):\n folders.append(folder.text)\n return folders\n\n\n\n def get_config_xml(self):\n script_path = os.path.abspath(os.path.dirname(__file__))\n parent = os.path.normpath(os.path.join(script_path, '..'))\n tree = ET.parse(os.path.join(parent, 'config.xml'))\n return tree\n\n\n\n def delete_file(self,full_url):\n try:\n if os.path.exists(full_url):\n os.remove(full_url)\n print('Deleting : ',full_url)\n except Exception as e:\n print('Erros deleting : ',e)\n\n\n\nif __name__ == '__main__':\n Main()\n \n \n\n"
},
{
"alpha_fraction": 0.6138284802436829,
"alphanum_fraction": 0.6246557235717773,
"avg_line_length": 33.9466667175293,
"blob_id": "02c48d7492c60a912549113bc814f6fdbfa15a84",
"content_id": "95895ccc073215129ceadb9fb5c980b691a213a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10529,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 300,
"path": "/lib/video_player.py",
"repo_name": "antonioforte/Video-Gallery",
"src_encoding": "UTF-8",
"text": "import time\nimport os\nimport parse_subs\nfrom PySide import QtCore\nfrom PySide import QtGui\nfrom PySide.phonon import Phonon\n\n\nclass thePlayerEventFilter(QtCore.QObject):\n mouseHasMoved = QtCore.Signal(int,int)\n \n def __init__(self,parent):\n QtCore.QObject.__init__(self,parent)\n \n def eventFilter(self, object, event):\n if event.type() == QtCore.QEvent.MouseMove:\n self.mouseHasMoved.emit(event.x(),event.y())\n return True\n else:\n return False\n\n\n\n\n\nclass thePlayer(Phonon.VideoPlayer):\n '''\n This creates a VideoPlayer widget and sets it\n as the main window CentralWidget.\n Then the control widget is appended.\n When the mouse is in the lower part of the \n VideoPlayer widget, the control widget changes its\n geometry to show itself. \n '''\n def __init__(self, parent,video_url):\n Phonon.VideoPlayer.__init__(self,parent)\n self.parent = parent\n\n self.player = Phonon.VideoPlayer(self.parent)\n self.player.setObjectName('player')\n self.parent.setCentralWidget(self.player)\n \n self.playerWidget = self.player.videoWidget()\n self.playerWidget.setCursor(QtCore.Qt.BlankCursor)\n self.player.load(Phonon.MediaSource(video_url))\n\n self.player.show()\n self.player.setVolume(0.5)\n self.player.play()\n\n self.control = playControl(self.parent,self.player)\n\n # Parse mouse events on the player widget\n self.playerWidget.setMouseTracking(True)\n self.filter = thePlayerEventFilter(self)\n self.filter.mouseHasMoved.connect(self.show_control)\n self.playerWidget.installEventFilter(self.filter)\n\n self.playbtn = self.control.frame.findChild(QtGui.QPushButton,'play')\n self.playbtn.clicked.connect(self.play_video)\n \n self.fullscreen = self.control.frame.findChild(QtGui.QPushButton,'fullscreen')\n self.fullscreen.clicked.connect(self.gofullscreen)\n \n self.timelabel = self.control.frame.findChild(QtGui.QLabel,'labelTime')\n self.player.mediaObject().tick.connect(self.show_time)\n\n self.enablesubs = self.control.frame.findChild(QtGui.QCheckBox,'enablesubs')\n self.enablesubs.stateChanged.connect(self.hide_show_subs)\n\n self.hassubs,self.subsentries = self.get_subtitles(video_url)\n if self.hassubs == 'yes' and self.enablesubs.isChecked():\n self.create_subs_label()\n \n\n\n def get_subtitles(self,video_url):\n url, ext = os.path.splitext(video_url)\n subfile = os.path.join(url+'.srt')\n hassubs = 'no'\n entries = []\n \n if os.path.exists(subfile):\n entries = parse_subs.getSubtitles(subfile)\n hassubs = 'yes'\n return hassubs,entries\n \n \n \n def create_subs_label(self):\n winwidth = self.parent.width()\n subspos = self.parent.height() - 50\n\n self.labelsubs = QtGui.QLabel(self.player)\n self.labelsubs.setGeometry(0,subspos,winwidth,50)\n self.labelsubs.setText('subtitles')\n self.labelsubs.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignHCenter)\n self.labelsubs.setStyleSheet(\"QLabel { background-color:#0a0a0a; color : white; }\")\n self.labelsubs.setObjectName(\"labelsubs\")\n self.labelsubs.show()\n\n \n \n def position_subslabel(self):\n winheight = self.parent.height()\n winwidth = self.parent.width()\n labelpos = self.labelsubs.y()\n subspos = winheight - 50\n\n if self.labelsubs.width() != winwidth or labelpos != subspos:\n self.labelsubs.setGeometry(0,subspos,winwidth,50)\n self.labelsubs.updateGeometry()\n self.labelsubs.update()\n \n \n \n def show_time(self,cur_time):\n cur_total = self.player.mediaObject().totalTime()\n curtime = time.strftime(\"%H:%M:%S\", time.gmtime(cur_time/1000))\n curtotal = time.strftime(\"%H:%M:%S\", time.gmtime(cur_total/1000))\n self.timelabel.setText(curtime+' / '+curtotal)\n \n if self.player.findChild(QtGui.QLabel,'labelsubs'):\n self.insert_subtitle(cur_time)\n \n \n \n def insert_subtitle(self,cur_time): \n self.position_subslabel()\n for item in self.subsentries.entries:\n start = item.start\n end = item.end\n\n if cur_time >= start:\n text = ''\n for line in item.lines:\n text += line\n self.labelsubs.setText(text)\n if cur_time >= end:\n self.labelsubs.clear()\n\n \n\n def hide_show_subs(self,state):\n '''Signal reacting to checkbox enable subs'''\n if state == 2:\n self.create_subs_label()\n if state == 0:\n if self.player.findChild(QtGui.QLabel,'labelsubs'):\n self.labelsubs.deleteLater()\n \n \n\n def gofullscreen(self):\n if self.parent.isFullScreen():\n self.parent.showNormal()\n else:\n self.parent.showFullScreen()\n \n\n\n def play_video(self):\n if self.player.isPlaying():\n self.player.pause()\n self.playbtn.setText('play')\n else:\n self.player.play()\n self.playbtn.setText('pause')\n\n\n\n def show_control(self,x,y):\n '''If mouse is on the lower 100px of\n video player widget, show the menu,\n if not hide it.\n '''\n winheight = self.parent.height()\n controlheight = 100\n statbarheight = 0\n yline = winheight - controlheight\n \n if y >= yline and y <= winheight:\n self.show_menu(yline - statbarheight)\n else:\n self.hide_menu()\n \n\n\n def show_menu(self,yline):\n self.control.frame.setGeometry(0,yline,self.parent.width(),100)\n self.control.frame.setAutoFillBackground(True)\n self.control.frame.updateGeometry()\n self.control.frame.update()\n\n\n\n def hide_menu(self):\n self.control.frame.setGeometry(0,0,0,0)\n self.control.frame.updateGeometry()\n self.control.frame.update()\n\n\n\nclass playControl(QtGui.QWidget):\n def __init__(self, parent,vid):\n QtGui.QWidget.__init__(self, parent)\n # row, column, rowSpan, columnSpan\n \n self.frame = QtGui.QFrame(parent)\n self.frame.setObjectName('framePlayControl')\n self.gridLayout = QtGui.QGridLayout()\n\n fixed_size_policy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)\n \n self.seekSlider = Phonon.SeekSlider(self.frame)\n self.seekSlider.setMediaObject(vid.mediaObject())\n self.gridLayout.addWidget(self.seekSlider, 0, 0, 1, 5)\n\n self.volumeSlider = Phonon.VolumeSlider(self.frame)\n self.volumeSlider.setAudioOutput(vid.audioOutput())\n self.gridLayout.addWidget(self.volumeSlider, 1, 0, 1, 5)\n\n #horizontalLayout 1\n self.horizontalLayout_1 = QtGui.QHBoxLayout()\n self.previous = QtGui.QPushButton(self.frame)\n self.previous.setText('previous')\n self.previous.setSizePolicy(fixed_size_policy)\n self.previous.setObjectName('previous')\n self.horizontalLayout_1.addWidget(self.previous)\n \n self.play = QtGui.QPushButton(self.frame)\n self.play.setText('pause')\n self.play.setSizePolicy(fixed_size_policy)\n self.play.setObjectName('play')\n self.horizontalLayout_1.addWidget(self.play)\n \n self.next = QtGui.QPushButton(self.frame)\n self.next.setText('next')\n self.next.setSizePolicy(fixed_size_policy)\n self.next.setObjectName('next')\n self.horizontalLayout_1.addWidget(self.next)\n \n self.back = QtGui.QPushButton(self.frame)\n self.back.setText('back')\n self.back.setSizePolicy(fixed_size_policy)\n self.back.setObjectName('back')\n self.horizontalLayout_1.addWidget(self.back)\n \n self.gridLayout.addLayout(self.horizontalLayout_1, 2, 0, 1, 1)\n spacerItem = QtGui.QSpacerItem(84, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.gridLayout.addItem(spacerItem, 2, 1, 1, 1)\n \n #horizontalLayout 2\n self.horizontalLayout_2 = QtGui.QHBoxLayout()\n self.playlistBTN = QtGui.QPushButton(self.frame)\n self.playlistBTN.setObjectName(\"playlistBTN\")\n self.playlistBTN.setText('playlist')\n self.playlistBTN.setSizePolicy(fixed_size_policy)\n self.horizontalLayout_2.addWidget(self.playlistBTN)\n \n self.enhanceBTN = QtGui.QPushButton(self.frame)\n self.enhanceBTN.setObjectName(\"enhanceBTN\")\n self.enhanceBTN.setText('enhance')\n self.enhanceBTN.setSizePolicy(fixed_size_policy)\n self.horizontalLayout_2.addWidget(self.enhanceBTN)\n \n self.gridLayout.addLayout(self.horizontalLayout_2, 2, 2, 1, 1)\n spacerItem1 = QtGui.QSpacerItem(138, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)\n self.gridLayout.addItem(spacerItem1, 2, 3, 1, 1)\n \n #horizontalLayout 3\n self.horizontalLayout_3 = QtGui.QHBoxLayout()\n self.subsBTN = QtGui.QPushButton(self.frame)\n self.subsBTN.setObjectName(\"subsBTN\")\n self.subsBTN.setSizePolicy(fixed_size_policy)\n self.subsBTN.setText('subs')\n self.horizontalLayout_3.addWidget(self.subsBTN)\n \n self.fullscreen = QtGui.QPushButton(self.frame)\n self.fullscreen.setText('fullscreen')\n self.fullscreen.setSizePolicy(fixed_size_policy)\n self.fullscreen.setObjectName('fullscreen')\n self.horizontalLayout_3.addWidget(self.fullscreen)\n\n self.checkBox = QtGui.QCheckBox(self.frame)\n self.checkBox.setObjectName('enablesubs')\n self.horizontalLayout_3.addWidget(self.checkBox)\n \n self.labelTime = QtGui.QLabel(self.frame)\n self.labelTime.setText('00:00:00 / 00:00:00')\n self.labelTime.setSizePolicy(fixed_size_policy)\n self.labelTime.setObjectName(\"labelTime\")\n self.horizontalLayout_3.addWidget(self.labelTime)\n\n self.gridLayout.addLayout(self.horizontalLayout_3, 2, 4, 1, 1)\n self.frame.setLayout(self.gridLayout)\n\n self.frame.setGeometry(0,0,0,0)\n self.setUpdatesEnabled(True)\n self.frame.show()\n \n\n \n \n \n "
}
] | 17 |
balescream/dijiktraflaskpy | https://github.com/balescream/dijiktraflaskpy | fc625beb6e4e8464703dea71da3f8160bcd4932e | e960102dd44cdc4f6cedadee520289d99223c5ac | 52a09732e4a3080f48fcd9d6acc9d877eb4f7768 | refs/heads/master | 2020-04-29T22:03:16.527005 | 2019-03-19T06:13:16 | 2019-03-19T06:13:16 | 176,433,829 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5361445546150208,
"alphanum_fraction": 0.5873494148254395,
"avg_line_length": 16.44444465637207,
"blob_id": "0ef8cf12e75e11f5a06233d85a4ac4488e029925",
"content_id": "4cfaa5e6bbac53762fb1890e94eb149d462046e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 332,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 18,
"path": "/dbcreat.py",
"repo_name": "balescream/dijiktraflaskpy",
"src_encoding": "UTF-8",
"text": "#creates random database with paths for 50 nodes\r\nimport random\r\nf=open(\"db.txt\",\"a\")\r\nx=0\r\nwhile x < 50:\r\n\tq=random.randint(0,50)\r\n\tf.write(str(q))\r\n\tf.write(\"\t\")\r\n\tp=random.randint(0,50)\r\n\tf.write(str(p))\r\n\tf.write(\"\t\")\r\n\tif p==q:\r\n\t\tf.write('0')\r\n\telse:\r\n\t\tf.write(str(random.randint(1,105)))\r\n\tf.write(\"\\n\")\r\n\tx+=1\r\nf.close();\t\t"
},
{
"alpha_fraction": 0.5089026689529419,
"alphanum_fraction": 0.515113890171051,
"avg_line_length": 28.149999618530273,
"blob_id": "8c616f2dcc648e7b7176a4fa3bb314ae2cdef85d",
"content_id": "6400ef531f94bfc8fdd3fa10240af20f894c70d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2415,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 80,
"path": "/dj.py",
"repo_name": "balescream/dijiktraflaskpy",
"src_encoding": "UTF-8",
"text": "import sys\r\nfrom flask import Flask, request,render_template\r\napp=Flask(__name__) \r\nf=open(\"db.txt\",\"r\") \r\nclass Graph(): \r\n \r\n def __init__(self, vertices): \r\n self.V = vertices \r\n self.graph = [[0 for column in range(vertices)] \r\n for row in range(vertices)] \r\n\r\n def minDistance(self, dist, sptSet): \r\n \r\n # Initilaize minimum distance for next node \r\n min = sys.maxsize\r\n min_index=0\r\n # Search not nearest vertex not in the \r\n # shortest path tree \r\n for v in range(self.V): \r\n if dist[v] < min and sptSet[v] == False: \r\n min = dist[v] \r\n min_index = v \r\n return min_index \r\n \r\n \r\n # using adjacency matrix representation \r\n def dijkstra(self, src): \r\n parent=[None]*self.V\r\n dist = [sys.maxsize] * self.V\r\n dist[src] = 0\r\n sptSet = [False] * self.V\r\n parent[src]=-1 \r\n for cout in range(self.V): \r\n # Pick the minimum distance vertex from \r\n # the set of vertices not yet processed. \r\n # u is always equal to src in first iteration \r\n u = self.minDistance(dist, sptSet) \r\n \r\n # Put the minimum distance vertex in the \r\n # shortest path tree \r\n sptSet[u] = True\r\n # Update dist value of the adjacent vertices \r\n \r\n for v in range(self.V): \r\n if self.graph[u][v] > 0 and sptSet[v] == False and dist[v] > dist[u] + self.graph[u][v]: \r\n dist[v] = dist[u] + self.graph[u][v] \r\n parent[v]=u; \r\n \r\n \r\n return parent\r\n \r\n# Driver program\r\[email protected]('/',methods=['POST','GET'])\r\ndef homepage():\r\n return render_template('input.html') \r\ng = Graph(51)\r\nfor x in f:\r\n t=x.split(\"\\t\")\r\n g.graph[int(t[0])][int(t[1])]=int(t[2])\r\[email protected]('/result',methods=['POST','GET'])\r\ndef result():\r\n if request.method=='POST':\r\n # if not result['source']|| not result['dest']:\r\n \r\n parent=g.dijkstra(int(request.form['source']))\r\n l=[None]\r\n\r\n l.append(request.form['dest'])\r\n i=request.form['dest']\r\n i=int(i)\r\n while not parent[i]==-1:\r\n l.append(parent[i])\r\n i=parent[i] \r\n l.reverse()\r\n l.pop()\r\n\r\n return render_template(\"index.html\",l=l)\r\n\r\nif __name__=='__main__':\r\n app.run(debug=True, port=5000) \r\n "
}
] | 2 |
mertcancoskuner/malSearcher | https://github.com/mertcancoskuner/malSearcher | 9c12db2573402a47949c85153d7263f46ac4d66f | 8a977b76b9e050b0f6babc99d4f62563836f9b99 | 71cd4b8ccf888aa000813a97c8f06918039b4e3b | refs/heads/master | 2020-05-04T07:15:49.924276 | 2019-04-01T17:07:22 | 2019-04-01T17:07:22 | 179,023,919 | 0 | 0 | null | 2019-04-02T07:35:36 | 2019-04-01T17:07:24 | 2019-04-01T17:07:22 | null | [
{
"alpha_fraction": 0.44717901945114136,
"alphanum_fraction": 0.4664499759674072,
"avg_line_length": 31.87786293029785,
"blob_id": "34be3c6702aa6bb333bb5ef151d5a6ceb8a07361",
"content_id": "b1d12cb8f362a6e354873ea2b03476d03f313a31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8614,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 262,
"path": "/linuxEnum.py",
"repo_name": "mertcancoskuner/malSearcher",
"src_encoding": "UTF-8",
"text": "# [X] .bash_profile .bashrc\n# [X] Accounts\n# [X] Hidden Files\n# [X] Kernel Modules\n# [X] Scheduled Task\n# [X] Setuid Setgid\n# [X] Trap\n# [X] Sudo\n# [X] Sudo Caching\n# [X] History\n# [X] Certificates\n# [ ] Clipboard\n# [X] Environment Variables\n# [X] Active Connections\n# [X] tmp Folder\n# [X] Processes\n# [X] Firewall status\n# [X] Services\n# [ ] Devices\n# [ ] Disks\n# [ ] Partitions\n# [X] Mount\n# [X] fstab\n# [X] Credentials in Files\n# [X] SSH trusted keys\n# [X] System Info\n# [X] Network Info\n# [X] Passwd & Shadow\n# [X] ARP\n# [X] Home folders\n# [X] World writable files\n# [X] Apps installed\n# [X] Login history\n# [X] Groups\n\n#!/usr/bin/python\n\nimport os\n\ndef executeCmd(cmd, level):\n if cmd is not \"\":\n stdout = os.popen(cmd[0], 'r')\n res = stdout.read().split('\\n')\n printOut(res, level)\n# else:\n# for i in res:\n# innerOut = os.popen(cmd[1] + \" \" + i, 'r')\n# innerRes = stdout.read().split('\\n')\n# printOut(innerRes, level)\n\n\ndef printOut(out, level):\n if out is not '':\n for i in out:\n print(((level * 4 * ' ') + '{:}').format(i))\n print\n print\n\ndef banner():\n print\n print(\" ___ ___ _ _ \")\n print(\" / _ \\ / _ \\| | | | \")\n print(\" _ __| | | | | | | |_| |_ ___ _ __ \")\n print(\" | '__| | | | | | | __| __/ _ \\ '_ \\ \")\n print(\" | | | |_| | |_| | |_| || __/ | | |\")\n print(\" |_| \\___/ \\___/ \\__|\\__\\___|_| |_|\")\n print\n print(\"{:^}\".format('malSearcher by Mert Degirmenci'))\n print('___________________________________________________')\n print\n\ndef main():\n banner()\n print(('{}').format(\"[+] SYSTEM INFO\"))\n print(((2 * 4 * ' ') + '{:}').format(\"[-] /etc/issue\"))\n executeCmd({0:'cat /etc/issue'}, 3)\n print(((2 * 4 * ' ') + '{:}').format(\"[-] /proc/version\"))\n executeCmd({0:'cat /proc/version'}, 3)\n print(((2 * 4 * ' ') + '{:}').format(\"[-] hostname\"))\n executeCmd({0:'hostname'}, 3)\n print(((2 * 4 * ' ') + '{:}').format(\"[-] uname -a\"))\n executeCmd({0:'uname -a'}, 3)\n\n print(('{}').format(\"[+] NETWORK INFO\"))\n executeCmd({0:'ip a'}, 1)\n print\n executeCmd({0:'ifconfig -a'}, 1)\n print\n executeCmd({0:'route'}, 1)\n print\n executeCmd({0:'netstat -antup'}, 1)\n\n print(('{}').format(\"[+] MOUNT\"))\n executeCmd({0:'mount'}, 1)\n\n print(('{}').format(\"[+] FSTAB\"))\n executeCmd({0:'cat /etc/fstab'}, 1)\n\n print(('{}').format(\"[+] PASSWD\"))\n executeCmd({0:'cat /etc/passwd'}, 1)\n\n print(('{}').format(\"[+] GROUPS\"))\n executeCmd({0:'cat /etc/group'}, 1)\n \n print(('{}').format(\"[+] SHADOW\"))\n executeCmd({0:'cat /etc/shadow'}, 1)\n \n print(('{}').format(\"[+] BASH CONFIG FILES\"))\n print(((2 * 4 * ' ') + '{:}').format(\"[-] /etc/profile\"))\n executeCmd({0:'cat /etc/profile'}, 3)\n print\n print(((2 * 4 * ' ') + '{:}').format(\"[-] /etc/bash.bashrc\"))\n executeCmd({0:'cat /etc/bash.bashrc'}, 3)\n print\n stdout = os.popen(\"find /home -name *bashrc 2>/dev/null\", 'r')\n res = stdout.read().split('\\n')\n for i in res:\n if i != \"\":\n print(((2 * 4 * ' ') + '{:}').format(\"[-] \" + i))\n stdout = os.popen(\"cat \" + i, 'r')\n res = stdout.read().split('\\n')\n printOut(res, 3)\n print \n\n stdout = os.popen(\"find /home -name *bash_profile 2>/dev/null\", 'r')\n res = stdout.read().split('\\n')\n for i in res:\n if i != \"\":\n print(((2 * 4 * ' ') + '{:}').format(\"[-] \" + i))\n stdout = os.popen(\"cat \" + i, 'r')\n res = stdout.read().split('\\n')\n printOut(res, 3)\n print \n\n stdout = os.popen(\"find /home -name *profile 2>/dev/null\", 'r')\n res = stdout.read().split('\\n')\n for i in res:\n if i != \"\":\n print(((2 * 4 * ' ') + '{:}').format(\"[-] \" + i))\n stdout = os.popen(\"cat \" + i, 'r')\n res = stdout.read().split('\\n')\n printOut(res, 3)\n print \n\n print(('{}').format(\"[+] HIDDEN FILES\"))\n executeCmd({0:\"find / -name '.*' -exec ls -ld {} \\; 2>/dev/null\"}, 1)\n\n print(('{}').format(\"[+] LOGIN HISTORY\"))\n executeCmd({0:'w'}, 1)\n print\n executeCmd({0:\"last\"}, 1)\n\n print(('{}').format(\"[+] SUDOERS\"))\n executeCmd({0:\"cat /etc/sudoers\"}, 1)\n\n print(('{}').format(\"[+] SUDO CACHING\"))\n executeCmd({0:\"find /var/db/sudo -exec ls -ld {} \\; 2>/dev/null\"}, 1)\n\n print(('{}').format(\"[+] HOME FOLDERS\"))\n executeCmd({0:\"ls -lA /home\"}, 1)\n\n print(('{}').format(\"[+] ENVIRONMENT VARIABLE\"))\n executeCmd({0:\"env\"}, 1)\n\n print(('{}').format(\"[+] TMP FOLDER\"))\n executeCmd({0:\"ls -lA /tmp\"}, 1)\n\n print(('{}').format(\"[+] PROCESSES\"))\n executeCmd({0:\"ps aux\"}, 1)\n\n print(('{}').format(\"[+] WORLD WRITABLE FOLDERS\"))\n executeCmd({0:\"find / \\( -wholename '/home/homedir*' -prune \\) -o \\( -type d -perm -0002 \\) -exec ls -ld '{}' ';' 2>/dev/null | grep root\"}, 1)\n\n print(('{}').format(\"[+] WORLD WRITABLE FILES\"))\n executeCmd({0:\"find / \\( -wholename '/home/homedir*' -prune \\) -o \\( -type d -perm -0002 \\) -exec ls -ld '{}' ';' 2>/dev/null | grep -v root\"}, 1)\n\n print(('{}').format(\"[+] SUID & SGID\"))\n executeCmd({0:\"find / \\( -perm -2000 -o -perm -4000 \\) -exec ls -ld {} \\; 2>/dev/null\"}, 1)\n\n print(('{}').format(\"[+] HISTORY FILES\"))\n stdout = os.popen(\"find /home -name *history 2>/dev/null\", 'r')\n res = stdout.read().split('\\n')\n for i in res:\n if i != \"\":\n print(((2 * 4 * ' ') + '{:}').format(\"[-] \" + i))\n stdout = os.popen(\"cat \" + i, 'r')\n res = stdout.read().split('\\n')\n printOut(res, 3)\n print \n\n print(('{}').format(\"[+] SSH TRUSTED KEYS\"))\n stdout = os.popen(\"find /home -name authorized_keys 2>/dev/null\", 'r')\n res = stdout.read().split('\\n')\n for i in res:\n if i != \"\":\n print(((2 * 4 * ' ') + '{:}').format(\"[-] \" + i))\n stdout = os.popen(\"cat \" + i, 'r')\n res = stdout.read().split('\\n')\n printOut(res, 3)\n print \n\n # https://unix.stackexchange.com/questions/97244/list-all-available-ssl-ca-certificates\n print(('{}').format(\"[+] CERTIFICATES\"))\n executeCmd({0:\"awk -v cmd='openssl x509 -noout -subject' '/BEGIN/{close(cmd)};{print | cmd}' < /etc/ssl/certs/ca-certificates.crt\"}, 1)\n\n print(('{}').format(\"[+] SCHEDULED JOBS\"))\n stdout = os.popen(\"find /etc -name cron* 2>/dev/null\", 'r')\n res = stdout.read().split('\\n')\n for i in res:\n if i != \"\":\n stdout = os.popen(\"ls \" + i, 'r')\n res = stdout.read().split('\\n')\n for j in res:\n if j != \"\":\n path = i + \"/\" + j\n print(((2 * 4 * ' ') + '{:}').format(\"[-] \" + path))\n stdout = os.popen(\"cat \" + path, 'r')\n res = stdout.read().split('\\n')\n printOut(res, 2)\n print \n\n print(('{}').format(\"[+] FIREWALL\"))\n print(((2 * 4 * ' ') + '{:}').format(\"[-] Firewall Status\"))\n executeCmd({0:\"systemctl status iptables\"}, 2)\n print\n iptables = {0:'filter', 1:'nat', 2:'mangle', 3:'raw', 4:'security'}\n for i in range(len(iptables)):\n print(((2 * 4 * ' ') + '{:}').format(\"[-] \" + str(iptables[i])))\n stdout = os.popen(\"iptables -vL -t \" + str(iptables[i]), 'r')\n res = stdout.read().split('\\n')\n printOut(res, 3)\n print \n\n print(('{}').format(\"[+] APPS INSTALLED\"))\n executeCmd({0:\"apt list --installed\"}, 1)\n print\n executeCmd({0:\"dpkg -l\"}, 1)\n\n print(('{}').format(\"[+] SERVICES\"))\n executeCmd({0:\"systemctl -l --type service --all\"}, 1)\n\n print(('{}').format(\"[+] KERNEL MODULES\"))\n executeCmd({0:\"lsmod\"}, 1)\n\n print(('{}').format(\"[+] ARP\"))\n executeCmd({0:\"arp -a\"}, 1)\n\n print(('{}').format(\"[+] TRAP\"))\n executeCmd({0:\"trap -l\"}, 1)\n\n # https://github.com/sleventyeleven/linuxprivchecker/blob/master/linuxprivchecker.py\n print(('{}').format(\"[+] CREDENTIALS IN FILES\"))\n print(((2 * 4 * ' ') + '{:}').format(\"[-] Config Files @ /etc\"))\n executeCmd({0:\"find /etc -name '*.c*' 2>/dev/null | xargs -l10 egrep 'pwd|password' 2>/dev/null\"}, 3)\n print(((2 * 4 * ' ') + '{:}').format(\"[-] Files @ /home\"))\n executeCmd({0:\"find /home 2>/dev/null | xargs -l10 egrep 'pwd|password' 2>/dev/null\"}, 3)\n\n# print ('{}').format(\"[+] HASHES\")\n# executeCmd({0:\"find / -exec md5sum {} \\;\"})\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 1 |
Beanhauer/test | https://github.com/Beanhauer/test | 1913ecafaacefb76856b0908e1f8cab0d8680683 | 624b38403f90bd532a2ee35f40887557cebd6b79 | a3e9c77a72a06a3a25a42b021710122f9102bce7 | refs/heads/master | 2020-03-25T08:50:20.475809 | 2018-08-05T18:36:33 | 2018-08-05T18:36:33 | 143,634,037 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.612994372844696,
"alphanum_fraction": 0.6299434900283813,
"avg_line_length": 24.285715103149414,
"blob_id": "8e7e2968ba05d43820f437577bf07393b3f6a891",
"content_id": "ec5b5034a9ed1023ab690fa656a14c4c86042659",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 354,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 14,
"path": "/doctest.py",
"repo_name": "Beanhauer/test",
"src_encoding": "UTF-8",
"text": "def main():\n import docx\n doc1 = docx.Document(\"patent patent Alice.docx\")\n doc2 = docx.Document()\n fulltext = []\n for para in doc1.paragraphs:\n fulltext.append(para.text)\n print(fulltext)\n for para in fulltext:\n doc2.add_paragraph(para)\n doc2.save(\"doc2.docx\")\n print(\"MOM KNOWS EVERYTHING FOREVER\")\n\nmain()\n"
}
] | 1 |
NURx2/CleaningManager | https://github.com/NURx2/CleaningManager | 858c95d815443c5222b7492057bac53cfc8ea2ca | 0ea2980dd9c7972c650660ac0c5188cf1356447a | 65bcd7a1b26c76b2ef8da1853b6e9b4adf23a71a | refs/heads/master | 2020-07-29T09:59:42.996727 | 2019-09-20T09:36:35 | 2019-09-20T09:36:35 | 209,754,075 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.8062499761581421,
"avg_line_length": 160,
"blob_id": "aa5e2d3015c6ee64b16a8663c0772c5e5a0005e5",
"content_id": "54cf2b4098e4302bc57bcba82a0e6364a816fbe5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 160,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 1,
"path": "/README.md",
"repo_name": "NURx2/CleaningManager",
"src_encoding": "UTF-8",
"text": "This is the application for roommates in a dormitory, which allows to track the execution and quality of a cleaning. Uses Python 3, SQLAlchemy, Flask, Openpyxl."
},
{
"alpha_fraction": 0.6574074029922485,
"alphanum_fraction": 0.6759259104728699,
"avg_line_length": 26,
"blob_id": "912b3407ab9acb6ad243afa7969c59af32f8e549",
"content_id": "94538d8448e0fa88edeef44da6554c31b29fb0f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 216,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 8,
"path": "/src/modules/executor.py",
"repo_name": "NURx2/CleaningManager",
"src_encoding": "UTF-8",
"text": "from src.database import db\n\n\nclass Executor(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(16))\n telegram_id = db.Column(db.String(32))\n # db.session.add(my_object)\n"
},
{
"alpha_fraction": 0.7234042286872864,
"alphanum_fraction": 0.7234042286872864,
"avg_line_length": 22.375,
"blob_id": "1c3530d3919e4a9601f78082e73fc04df722eed6",
"content_id": "3abd6e42c69572b65b80bf6baad588bb7e7080de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 188,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 8,
"path": "/src/database/additions.py",
"repo_name": "NURx2/CleaningManager",
"src_encoding": "UTF-8",
"text": "from . import db\nfrom typing import List\nfrom src.modules.executor import Executor\n\n\ndef update_executors(names: List[str]):\n for name in names:\n executor = Executor(name=name)\n\n"
},
{
"alpha_fraction": 0.6443203091621399,
"alphanum_fraction": 0.6517691016197205,
"avg_line_length": 28.88888931274414,
"blob_id": "81a0b028b0b6e102fccc3ae66ea85084583d520d",
"content_id": "504cfe959cab41140672d30bab88a74f2f8eb8c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 537,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 18,
"path": "/setup.py",
"repo_name": "NURx2/CleaningManager",
"src_encoding": "UTF-8",
"text": "from setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"Cleaning Manager\",\n author=\"NURx2\",\n description=\"It allows to track the execution and quality of a cleaning.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.7',\n)"
},
{
"alpha_fraction": 0.6873156428337097,
"alphanum_fraction": 0.6873156428337097,
"avg_line_length": 18.941177368164062,
"blob_id": "6358a0f319ed5019da20fa7235016388f1374eb1",
"content_id": "2bab3587d369b77795b2ada4dbf3bbdcf8af51e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 339,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 17,
"path": "/main.py",
"repo_name": "NURx2/CleaningManager",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom src.database import db\nfrom src.parser.parser import parse\n\napp = Flask(__name__)\n# connection string of database\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///data.db'\napp.config['SQLACLHEMY_TRACK_MODIFICATIONS'] = False\ndb.init_app(app)\n\n\ndef main():\n parse()\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6262458562850952,
"alphanum_fraction": 0.6328903436660767,
"avg_line_length": 25.173913955688477,
"blob_id": "ddfe91a829e54c1d6f6340b783e84eb7bff11fb9",
"content_id": "3651a27094d9d4c6c54856e108b34d556fc32e9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 606,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 23,
"path": "/src/parser/parser.py",
"repo_name": "NURx2/CleaningManager",
"src_encoding": "UTF-8",
"text": "import openpyxl\n\nfrom src.database.additions import update_executors\nfrom src.static.constants import cnt_weeks\n\n\ndef colored(cell):\n return True\n\n\ndef parse():\n table = openpyxl.load_workbook(filename='table.xlsx')\n main_sheet = table['Лист1']\n names = [value.value for value in main_sheet['A'][1:]]\n update_executors(names)\n\n n = len(names)\n start_row = 2\n start_col = ord('B')\n for i in range(start_col, start_col + cnt_weeks):\n for j in range(start_row, start_row + n):\n if colored(main_sheet[j][i]):\n date_of_the_duty = main_sheet[j][1]\n"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.6428571343421936,
"avg_line_length": 13,
"blob_id": "113e84de61ae28b4b06f3df16b9aa5aed1194ff9",
"content_id": "5fed139ed155f57a1ce867dee898c347bd02d9a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 1,
"path": "/src/static/constants.py",
"repo_name": "NURx2/CleaningManager",
"src_encoding": "UTF-8",
"text": "cnt_weeks = 8\n"
}
] | 7 |
AyushSolanki123/Pong | https://github.com/AyushSolanki123/Pong | dfbc4a5d55dfd4d5e954c1acf7f33f05748a4aed | f201831f455dd82805da003c17cf18065b7cd66b | db9a64dde40d374690e7dec21f06d53cafe7f016 | refs/heads/master | 2022-11-15T08:43:15.958035 | 2020-07-10T09:07:29 | 2020-07-10T09:07:29 | 278,364,942 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7278226017951965,
"alphanum_fraction": 0.7358871102333069,
"avg_line_length": 34.42856979370117,
"blob_id": "bb3108af8ebb78fcb67fcf882046ff93686fc7ca",
"content_id": "5d32557ef0467e66cce96157d66d64a6c873b106",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 496,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 14,
"path": "/README.md",
"repo_name": "AyushSolanki123/Pong",
"src_encoding": "UTF-8",
"text": "# Pong\n<h3>This is my very own version of the classic pong game.</h3>\n\nThis game is completly built in python language using pygame module.<br>\nFeel free to use this code.\n\n***DOWNLOAD ALL THE FILES TO RUN THE CODE***\n\nTo run the Python code in command line<br>\n-> Firstly open the command prompt.<br>\n-> Using the cd command move to your directory.<br>\n-> Then type the command \"python memory-game.py\"<br>\n\n<h3>NOTE:</h3> Make sure to keep all the files in the same directory this is mandatory.\n"
},
{
"alpha_fraction": 0.4938816428184509,
"alphanum_fraction": 0.5372116565704346,
"avg_line_length": 28.212121963500977,
"blob_id": "07df8be331c2f0d44ed5d3add83d28b4e63848d8",
"content_id": "62425d4033ba96d006d6840009d0ebb35cf91f47",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4985,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 165,
"path": "/pong.py",
"repo_name": "AyushSolanki123/Pong",
"src_encoding": "UTF-8",
"text": "import pygame\r\nimport random\r\npygame.init()\r\npygame.mixer.init()\r\n\r\n#GAME VARIABLES\r\nWIDTH = 800\r\nHEIGHT = 600\r\nFPS = 30\r\n\r\n#COLORS #R #G #B\r\nRED = (255, 0, 0)\r\nBLACK = (0 , 0, 0)\r\nWHITE = (255, 255, 255)\r\nBLUE = (0 , 0, 255)\r\nORANGE = (200, 128, 0)\r\nRANDOM = (128, 128, 255)\r\n\r\n#FONTS\r\nGFONT = pygame.font.SysFont('Algerian', 30, True, True)\r\nSFONT = pygame.font.SysFont('Cambria', 20, True)\r\n\r\n#SETTING WINDOW\r\nwin = pygame.display.set_mode((WIDTH, HEIGHT))\r\npygame.display.set_caption('PONG')\r\nicon = pygame.image.load('ping-pong.png')\r\npygame.display.set_icon(icon)\r\n\r\n#LOADING BACKGROUND\r\nbg = pygame.image.load('background.png')\r\nbg = pygame.transform.scale(bg, (WIDTH, HEIGHT)).convert_alpha()\r\n\r\n#SETTING CLOCK\r\nclock = pygame.time.Clock()\r\n\r\n#MUSICS\r\nmusic = pygame.mixer.music.load('background.mp3')\r\npygame.mixer.music.play(-1)\r\nhitSound = pygame.mixer.Sound('hit.wav')\r\n\r\nclass player(object):\r\n def __init__(self, x, y, width, height, color):\r\n self.x = x\r\n self.y = y\r\n self.width = width\r\n self.height = height\r\n self.color = color\r\n self.vel = 10\r\n self.score = 0\r\n\r\n def draw(self, win):\r\n pygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.height))\r\n\r\nclass ball(object):\r\n def __init__(self, x, y, width, height):\r\n self.x = x\r\n self.y = y\r\n self.width = width\r\n self.height = height\r\n self.dirX = 10\r\n self.dirY = 10\r\n\r\n def draw(self, win):\r\n self.move()\r\n b1 = pygame.image.load('ball.png')\r\n win.blit(b1, (self.x, self.y))\r\n\r\n def move(self):\r\n self.x += self.dirX\r\n self.y += self.dirY\r\n if self.y >= HEIGHT:\r\n self.dirY *= -1\r\n self.y += self.dirY\r\n if self.y <= 0:\r\n self.y -= self.dirY\r\n self.dirY *= -1\r\n self.y -= self.dirY\r\n\r\ndef displayText(font, message, color, x, y):\r\n text = font.render(message, 1, color)\r\n win.blit(text, (x, y))\r\n\r\ndef redrawGameWindow():\r\n win.blit(bg, (0, 0))\r\n PADDLE1.draw(win)\r\n PADDLE2.draw(win)\r\n BALL.draw(win)\r\n displayText(SFONT, 'PLAYER 1 SCORE:' + str(PADDLE1.score), WHITE, 10, 10)\r\n displayText(SFONT, 'PLAYER 2 SCORE:' + str(PADDLE2.score), WHITE, 600, 10)\r\n if BALL.x >= WIDTH or BALL.x <= 20:\r\n displayText(GFONT, 'PRESS SPACE TO RESET BALL', ORANGE, 200, HEIGHT/2)\r\n if PADDLE1.score == 100:\r\n displayText(GFONT, 'PLAYER 1 WINS', RANDOM, 300, HEIGHT/2)\r\n if PADDLE2.score == 100:\r\n displayText(GFONT, 'PLAYER 2 WINS', RANDOM, 300, HEIGHT/2)\r\n pygame.display.update()\r\n\r\nPADDLE1 = player(26, HEIGHT/2, 20, 100, BLUE)\r\nPADDLE2 = player(753, HEIGHT/2, 20, 100, RED)\r\nBALL = ball(WIDTH/2 - 16, HEIGHT/2, 32, 32)\r\n\r\ndef welcome():\r\n exit_game = False\r\n while not exit_game:\r\n win.fill(RANDOM)\r\n displayText(GFONT, \"WELCOME TO PONG\", BLACK, WIDTH/2 -150, HEIGHT/2)\r\n displayText(GFONT, \"PRESS SPACE TO PLAY\", BLACK, WIDTH/2 - 160, HEIGHT/2 + 40)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n exit_game = True\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_SPACE:\r\n main()\r\n\r\n pygame.display.update()\r\n clock.tick(FPS)\r\n\r\ndef main():\r\n run = True\r\n while run:\r\n clock.tick(FPS)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_w] and PADDLE1.y > PADDLE1.vel:\r\n PADDLE1.y -= PADDLE1.vel\r\n if keys[pygame.K_s] and PADDLE1.y < HEIGHT - PADDLE1.vel - PADDLE1.height:\r\n PADDLE1.y += PADDLE1.vel\r\n if keys[pygame.K_UP] and PADDLE2.y > PADDLE2.vel:\r\n PADDLE2.y -= PADDLE2.vel\r\n if keys[pygame.K_DOWN] and PADDLE2.y < HEIGHT - PADDLE2.vel - PADDLE2.height:\r\n PADDLE2.y += PADDLE2.vel\r\n\r\n if BALL.x >= WIDTH or BALL.x <= 20:\r\n if keys[pygame.K_SPACE]:\r\n if BALL.x > WIDTH:\r\n PADDLE1.score += 10\r\n if BALL.x < 20:\r\n PADDLE2.score += 10\r\n BALL.x = random.randint(200, 600)\r\n BALL.y = random.randint(150, 450)\r\n BALL.dirX *= -1\r\n BALL.x += BALL.dirX\r\n\r\n if (BALL.x > 753 and BALL.x < 773) and (BALL.y > PADDLE2.y and BALL.y < PADDLE2.y + 100):\r\n hitSound.play()\r\n BALL.dirX *= -1\r\n BALL.x += BALL.dirX\r\n\r\n if (BALL.x > 26 and BALL.x < 46) and (BALL.y > PADDLE1.y and BALL.y < PADDLE1.y + 100):\r\n hitSound.play()\r\n BALL.dirX *= -1\r\n BALL.x += BALL.dirX\r\n\r\n if PADDLE1.score == 100 or PADDLE2.score == 100:\r\n run = False\r\n\r\n redrawGameWindow()\r\n\r\n pygame.quit()\r\n\r\nwelcome()\r\n"
}
] | 2 |
Fueled250/Gross-Pay-Val-Python | https://github.com/Fueled250/Gross-Pay-Val-Python | f01bbd4aa8b188df014a86fde836cb3258b6f41f | 5b66c5eafd249ac687c19f1ed8fd248e94272ee7 | 4580cd826f569f013214bd1683086e45564566b1 | refs/heads/master | 2021-01-21T22:26:23.578715 | 2017-05-24T19:02:02 | 2017-05-24T19:02:02 | 92,326,767 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6042065024375916,
"alphanum_fraction": 0.6195028424263,
"avg_line_length": 26.2702693939209,
"blob_id": "8f55d0ad1f6253358af3c56a687683e85dcb91ad",
"content_id": "85a8740ade6c3205cc11c8566fc536ffad61226f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1046,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 37,
"path": "/gross_pay_val.py",
"repo_name": "Fueled250/Gross-Pay-Val-Python",
"src_encoding": "UTF-8",
"text": "#S.McDonald 10/11/2016\r\n#Gross Pay: get the number of hours worked and calculate a Gross Pay\r\n\r\ntry:\r\n #input\r\n #get the hours\r\n hours = int(input(\"Enter the hours worked for the week: \"))\r\n #validate the input for 'hours'\r\n while hours < 0 or hours > 168:\r\n print(\"Invalid number of hours entered.\")\r\n hours = int(input(\"Enter the hours worked for the week: \"))\r\n\r\n #get the pay rate\r\n rate = float(input(\"Enter the hourly pay rate: \"))\r\n #validate the input for 'pay rate'\r\n #while rate < 8.39:\r\n #print(\"Invalid number of rate of pay entered.\")\r\n #rate = float(input(\"Enter the hourly pay rate: \"))\r\n\r\n\r\n #processing\r\n #calculate gross pay\r\n gross_pay = hours * rate\r\n\r\n\r\n #output\r\n #display the output\r\n print(\"Gross pay: $\", format(gross_pay, ',.2f'))\r\n \r\nexcept ZeroDivisionError:\r\n print(\"ERROR: CANNOT DIVIDE BY ZERO!\")\r\n\r\nexcept ValueError:\r\n print(\"ERROR: HOURS WORKED & HOURLY PAY MUST BE VALID INTEGERS!\")\r\n\r\nexcept:\r\n print(\"ERROR: CHECK YOUR DATA!\")\r\n"
}
] | 1 |
mfarooq95/Hangman-Game | https://github.com/mfarooq95/Hangman-Game | 0fb7438dbacdd3ae0ee8cd0e6fedfceb810343fe | 2fc6d746c9f10a5d26313b9036898aa6b763adbb | 040ff4d4ba7486214e1b4f9fa4f47a36f6bf56e7 | refs/heads/main | 2023-08-04T05:41:33.725850 | 2021-09-27T04:15:40 | 2021-09-27T04:15:40 | 410,737,862 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6118211150169373,
"alphanum_fraction": 0.6142172813415527,
"avg_line_length": 38.41935348510742,
"blob_id": "51f18f22db27d6320bf3a5c319460a1a1ba337b1",
"content_id": "e7bf69dc551fbb2f0178e8a17fc43a04c003fb4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2504,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 62,
"path": "/main.py",
"repo_name": "mfarooq95/Hangman-Game",
"src_encoding": "UTF-8",
"text": "from words import words\r\nimport string\r\nimport random\r\n\r\n# Randomly get a valid word from an imported list of words\r\ndef get_valid_word(words):\r\n \r\n word = random.choice(words)\r\n if \"-\" in word or \" \" in word: \r\n word = get_valid_words(words) # Recursion\r\n return word \r\n\r\n# Uses random word to start the game, takes player input and determines win and loss\r\ndef hangman():\r\n\r\n word = get_valid_word(words).upper() \r\n word_letters = set(word) # Deconstructes the winning word into letters as an iterable set to use as refernce\r\n alphabet = set(string.ascii_uppercase) # Iterable set of English alphabet in uppercase to standardize and use as letter reference against player input\r\n guessed_letters = set() # Blank set to populate with and reference against player input\r\n lives = 12 # Number of player lives\r\n\r\n while lives > 0 and len(word_letters) > 0:\r\n print(f\"You have {lives} lives. \\nThe following letters have been guessed: \", \" \".join(guessed_letters))\r\n \r\n word_list = [letter if letter in guessed_letters else \"-\" for letter in word]\r\n print(\"The current word is: \", \" \".join(word_list))\r\n \r\n player_guess = input(\"\\nPlease enter a letter: \").upper()\r\n if player_guess in alphabet - guessed_letters:\r\n guessed_letters.add(player_guess)\r\n if player_guess in word_letters:\r\n word_letters.remove(player_guess)\r\n else:\r\n lives -= 1\r\n print(f\"\\nYour guess, {player_guess} was not in the winning word. You lost a life.\\n\")\r\n elif player_guess in guessed_letters:\r\n print(\"\\nYou already guessed this letter! Try again.\\n\")\r\n else:\r\n print(\"\\nInvalid entry. You can only use English letters. Try again.\\n\")\r\n\r\n if lives == 0:\r\n print(\"\\nYou ran out of lives before you finished the word! You lose!\")\r\n else:\r\n print(\"\\nYou guessed the word, you win!\")\r\n\r\ndef replay_game():\r\n play_again_bool = False\r\n user_input = input(\"Play again? Y or N: \").upper()\r\n if user_input == 'Y':\r\n play_again_bool = True\r\n elif user_input == 'N':\r\n play_again_bool = False\r\n else:\r\n print(\"Invalid response. Please type Y for yes or N for no.\")\r\n play_again_bool = replay_game() # Recursion\r\n return play_again_bool\r\n\r\nif __name__ == \"__main__\":\r\n play_again = True\r\n while play_again == True:\r\n hangman()\r\n play_again = replay_game()"
},
{
"alpha_fraction": 0.7888888716697693,
"alphanum_fraction": 0.7888888716697693,
"avg_line_length": 44,
"blob_id": "19f66c8ce5726fcaa1333d64dd97dcead1dc4f7f",
"content_id": "d8756cecb011b22df86e403db86eebb86431c057",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 540,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 12,
"path": "/README.md",
"repo_name": "mfarooq95/Hangman-Game",
"src_encoding": "UTF-8",
"text": "# Hangman-Game\nA small hangman game created in Python\n\n# main.py\nThe main.py file is the primary python file needed to run the hangman game itself\n\nYou must download and run this python file to intialize and play the hangman game\n\n# words.py\nThe words.py file is a python file containing a list of ASCII-approved words that is both imported and used by the hangman game itself in main.py\n\nYou must download and store this .py file in the same directory in which the hangman game's main.py file is in, in order to appropriately play hangman\n"
}
] | 2 |
smitteh1/OpenDetection3 | https://github.com/smitteh1/OpenDetection3 | 20cb9ce811344d2a13621fd8429f862626719728 | 9b6fdc3ebacc397794cf6d8122c7fbc119a346cc | db4547b8c46542faeee2cf475037b31a482b13ca | refs/heads/master | 2021-02-10T17:28:14.660369 | 2020-03-09T13:40:24 | 2020-03-09T13:40:24 | 244,403,832 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8571428656578064,
"alphanum_fraction": 0.8571428656578064,
"avg_line_length": 27,
"blob_id": "58b15ebf7b7040f20a17c3639cffc1d4164830aa",
"content_id": "fc4f26c891f526e11298a8993c55b7b918917f5b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 140,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 5,
"path": "/Python/OpenDetection.py",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "from Image import ODImage\nfrom Exception import ODException\nfrom Sensor import ODSensor\nfrom Recon import ODRecon\nfrom Morph import ODMorph\n"
},
{
"alpha_fraction": 0.7307692170143127,
"alphanum_fraction": 0.7692307829856873,
"avg_line_length": 25,
"blob_id": "3f015feae710a28fb3dabae76daee935218e3662",
"content_id": "dbbb328b22b22f18399a595adefc02db1c08d53f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 26,
"license_type": "permissive",
"max_line_length": 25,
"num_lines": 1,
"path": "/Python/README.md",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "# OpenDetection3 - Python "
},
{
"alpha_fraction": 0.6482213735580444,
"alphanum_fraction": 0.6521739363670349,
"avg_line_length": 18.538461685180664,
"blob_id": "68fec81716dfcf98d68fc7bc6c4b70c550297a60",
"content_id": "efdd145b58500c1b15611fd340e0d82f7a216986",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 253,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 13,
"path": "/examples/python/displayimage.py",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "from OpenDetection import ODImage\n\ndef main() :\n # contructor requires a path.\n image = ODImage(\"image1.png\");\n\n # display image\n image.display();\n # you can also display it with a window name\n # image.display(\"window_name\");\n\n\nmain();"
},
{
"alpha_fraction": 0.5457627177238464,
"alphanum_fraction": 0.5525423884391785,
"avg_line_length": 32.71428680419922,
"blob_id": "4645c1e8ec468c04834aba64e2f5da4ddafa4b1f",
"content_id": "1c5a17e8bd2cbcee9022bf17e4d5606a160470a0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1180,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 35,
"path": "/Python/Image.py",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "import cv2 \nfrom Exception import ODException\n \nclass ODImage:\n def __init__(self, directory):\n im = cv2.imread(directory);\n if im is not None:\n self.image = im;\n self.original = self.image.copy();\n self.filedir = directory;\n else:\n raise ODException(\"No Valid Image : ODImage(%s)\" % directory);\n \n def display(self, window_title = None):\n if self.image is not None:\n if window_title is None:\n cv2.imshow(self.filedir, self.image);\n cv2.waitKey(0);\n else:\n cv2.imshow(window_title, self.image);\n cv2.waitKey(0);\n else:\n raise ODException(\"No Valid Image : display() [%s]\" % self.filedir);\n\n def getCurrentImage(self):\n if self.image is not None:\n return self.image;\n else:\n raise ODException(\"No Valid Image : getCurrentImage() [%s]\" % self.filedir);\n\n def getOriginalImage(self):\n if self.original is not None:\n return self.original;\n else:\n raise ODException(\"No Valid Image : getOriginalImage() [%s]\" % self.filedir);\n"
},
{
"alpha_fraction": 0.8253968358039856,
"alphanum_fraction": 0.841269850730896,
"avg_line_length": 20,
"blob_id": "97d2292e4c0790686ade379669939bdb6a3e0134",
"content_id": "2e0ce6262df92ceade18b55e810847c3fb023b53",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 63,
"license_type": "permissive",
"max_line_length": 25,
"num_lines": 3,
"path": "/Python/Filter.py",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom Image import ODImage\nfrom Video import ODVideo\n"
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.5857142806053162,
"avg_line_length": 14.5,
"blob_id": "029d045cdef775a9aff552aa907de3860d4d3f02",
"content_id": "65c73a3075c84bc02c152bd9aa6c0cca7fd431b4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 70,
"license_type": "permissive",
"max_line_length": 34,
"num_lines": 4,
"path": "/Python/Video.py",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "import cv2\n\nclass ODVideo:\n def __init__(self, camNumber):\n "
},
{
"alpha_fraction": 0.6200000047683716,
"alphanum_fraction": 0.6399999856948853,
"avg_line_length": 11.75,
"blob_id": "7ae300e07b2b48a0ae72f8b35a14788b06f032c8",
"content_id": "4fd3bb2ae19d509006e9326d4e34466172b1e6ef",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 50,
"license_type": "permissive",
"max_line_length": 23,
"num_lines": 4,
"path": "/Python/Recon.py",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "import cv2\n\nclass ODRecon:\n def __init__(self):"
},
{
"alpha_fraction": 0.6103752851486206,
"alphanum_fraction": 0.6192052960395813,
"avg_line_length": 22.86842155456543,
"blob_id": "b79b27b7fd75690d5781cfd4fc92b2b547d9745f",
"content_id": "e200e8477fdd9033bbf3bc91b31fa58092f9512f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 906,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 38,
"path": "/CPP/ODImage.cpp",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "#include \"ODImage.h\"\n#include \"ODException.h\"\n\nMat image;\nMat original;\nstring imageDir;\n\nODImage::ODImage (string dir) {\n Mat cur = imread(dir);\n const Size size = cur.size();\n if (size.width > 0 && size.height > 0) {\n image = cur;\n image.copyTo(original);\n imageDir = dir;\n } else throw ODInvalidImage(const_cast<char*>(dir.c_str()));\n}\n\nvoid ODImage::display () {\n if (image.size().width > 0 && image.size().height > 0) {\n imshow(imageDir, image);\n waitKey(0);\n } else throw ODInvalidImage(const_cast<char*>(imageDir.c_str()));\n}\n\nvoid ODImage::display (string win) {\n if (image.size().width > 0 && image.size().height > 0) {\n imshow(win, image);\n waitKey(0);\n } else throw ODInvalidImage(const_cast<char*>(imageDir.c_str()));\n}\n\nMat ODImage::getCurrentImage () {\n return image;\n}\n\nMat ODImage::getOriginalImage () {\n return original;\n}"
},
{
"alpha_fraction": 0.5740740895271301,
"alphanum_fraction": 0.5833333134651184,
"avg_line_length": 17.08333396911621,
"blob_id": "ea4d4cfea7ded15f2dd360c4df296fdb288213a3",
"content_id": "3122e30a7240b6682967b025cea046482e281cbb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 216,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 12,
"path": "/examples/cpp/displayimage.cpp",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "#include \"../../CPP/ODImage.h\"\n\nint main() {\n ODImage image(\"../images/image1.png\");\n\n // display image\n image.display();\n // display with window name\n // image.display(\"window_name\");\n\n return 0;\n}"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6545454263687134,
"avg_line_length": 21.200000762939453,
"blob_id": "6ce8d16e2314d14418a17d2a9799d39b866f2a9d",
"content_id": "a22691ce2b5742bf282e55bcfce2c372b5eb4b38",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 110,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 5,
"path": "/Python/Sensor.py",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "import cv2\n\nclass ODSensor:\n def __init__(self, frame):\n self.net = cv2.dnn.readNetFromTensorflow();"
},
{
"alpha_fraction": 0.6086956262588501,
"alphanum_fraction": 0.6521739363670349,
"avg_line_length": 23,
"blob_id": "73c5498fc3828063e3b40f90527cf8beb16e2490",
"content_id": "fd8d369f7f0930668b1b5bb9bdafca14aa4915bd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 23,
"license_type": "permissive",
"max_line_length": 23,
"num_lines": 1,
"path": "/CPP/README.md",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "# OpenDetection 3 - C++"
},
{
"alpha_fraction": 0.6377778053283691,
"alphanum_fraction": 0.6466666460037231,
"avg_line_length": 19.5,
"blob_id": "ec63fc85176dd6befffced9724a908422a7407f4",
"content_id": "e0bf0f859c70d4d849d17b7907a3b393914ed865",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 450,
"license_type": "permissive",
"max_line_length": 42,
"num_lines": 22,
"path": "/CPP/ODImage.h",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "#include <opencv2/core/core.hpp>\n#include <opencv2/highgui/highgui.hpp>\n#include <opencv2/imgproc/imgproc.hpp>\n#include <opencv2/imgcodecs/imgcodecs.hpp>\n\nusing namespace std;\nusing namespace cv;\n\nclass ODImage {\n public:\n ODImage(string);\n void display();\n void display(string);\n Mat getCurrentImage();\n Mat getOriginalImage();\n\n private:\n Mat image;\n Mat original;\n string imageDir;\n\n};"
},
{
"alpha_fraction": 0.6751269102096558,
"alphanum_fraction": 0.6802030205726624,
"avg_line_length": 23.75,
"blob_id": "62d0db7209444283feb701d14472b330486f1591",
"content_id": "360c66c559b4249a9c9c035e3f8a214e23081245",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 8,
"path": "/Python/main.py",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "from OpenDetection import ODImage\nimport os\n\ndef main() :\n image = ODImage(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/images/hand1.png');\n image.display(\"TEST\");\n\nmain();"
},
{
"alpha_fraction": 0.7790697813034058,
"alphanum_fraction": 0.7848837375640869,
"avg_line_length": 20.5,
"blob_id": "bc30bb69ceee7e8177446a93f767c23945936d3f",
"content_id": "bd24cb6c26e818755097c9d3f07e1880adb38adf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 172,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 8,
"path": "/Python/Morph.py",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "import cv2\nfrom Image import ODImage\nfrom Video import ODVideo\nfrom Filter import ODFilter\nfrom Exception import ODException\n\nclass ODMorph:\n def __init__(self, frame):\n"
},
{
"alpha_fraction": 0.6155097484588623,
"alphanum_fraction": 0.6350325345993042,
"avg_line_length": 46.28205108642578,
"blob_id": "d96ddf7db9ce3eddee654eba04fb51d98172d3b1",
"content_id": "258b6e693e4adbce004ac75b20234d6e15c72a6f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1844,
"license_type": "permissive",
"max_line_length": 147,
"num_lines": 39,
"path": "/README.md",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "<h1 align=\"center\">Open Detection 3</h1>\n <p align=\"center\">\n <img src='https://img.shields.io/badge/OpenCV-4.20-violet'></img>\n <img src='https://img.shields.io/badge/Build-WIP-yellow'></img>\n <img src='https://img.shields.io/badge/Dependencies-up%20to%20date-green'></img>\n\n </p>\n<h3 align=\"center\">Open source software that allows you to detect multiple types of objects and interact with the world using computer vision.</h3>\n\n# Detection Methods\n| OD3 Object | C++ Obj | Python Obj | JavaScript Obj | Build Status |\n|--------------------|--------------------|------------------|------------------|--------------|\n| Object Tracking | ObjTracker.h | ObjTracker | ObjTracker | Partial |\n| Object Recognition | ObjRecognition.h | ObjRecognition | ObjRecognition | In Progress |\n| Voice Recognition | VoiceRecognition.h | VoiceRecognition | VoiceRecognition | TO-DO |\n| Finger Detection | FingerDetection.h | FingerDetection | FingerDetection | TO-DO |\n| Face Detection | FaceDetection.h | FaceDetection | FaceDetection | In Progress |\n| Face Tracking | FaceTracker.h | FaceTracker | FaceTracker | Partial |\n| Face Recognition | FaceRecognition.h | FaceRecognition | FaceRecognition | TO-DO |\n\n- More to come!\n\n# Dependencies\n- <a href='https://opencv.org/releases/'>OpenCV (4.20)</a>\n\n\n# Python Library\n- <a href='https://pypi.org/project/opencv-python/'>pip install opencv-python</a>\n- <a href='https://github.com/smitteh1/OpenDetection3/blob/master/Python/OpenDetection.py'>import OpenDetection</a>\n\n# JavaScript Library\n- Coming Soon\n\n# Systems Tested For\n- Windows 32Bit (AMD)\n- Windows 64Bit (AMD)\n- Windows 64Bit (Intel)\n- Linux (Ubuntu 18.04) 64Bit (Intel)\n- Linux (Ubuntu 18.04) 32Bit (AMD)\n"
},
{
"alpha_fraction": 0.658203125,
"alphanum_fraction": 0.658203125,
"avg_line_length": 17.321428298950195,
"blob_id": "3fb5edcf657acbc8314a85d758a58a4b10076967",
"content_id": "b5ff40b4e4ebd40290c0edb42bbc53505d319c0f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 512,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 28,
"path": "/CPP/ODException.cpp",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "#include \"ODException.h\"\n#include \"string.h\"\n\nchar* generic;\nchar* invalidimage;\n\nconst char* convert(char* x, char* y) {\n char* z;\n strcpy(z, x); strcat(z, y);\n return const_cast<char*>(z);\n}\n\nODException::ODException(char* str) {\n generic = str;\n}\n\nconst char* ODException::what() const throw()\n{\n return generic;\n}\n\nODInvalidImage::ODInvalidImage(char* str) {\n invalidimage = str;\n}\n\nconst char* ODInvalidImage::what() const throw() {\n return convert(\"No Valid Image : \", invalidimage);\n}"
},
{
"alpha_fraction": 0.7666666507720947,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 30,
"blob_id": "1856b8a800c1f156a61325d1bbccad330789adc5",
"content_id": "51ad2f87c2e2aec62071c6d6d576f0b4850f0e73",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 30,
"license_type": "permissive",
"max_line_length": 30,
"num_lines": 1,
"path": "/JavaScript/README.md",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "# OpenDetection 3 - JavaScript"
},
{
"alpha_fraction": 0.6194030046463013,
"alphanum_fraction": 0.6194030046463013,
"avg_line_length": 19.69230842590332,
"blob_id": "3b863f0d713cdd1051277472157d717be38981a5",
"content_id": "653e4df194874b5248c73a80b699c3f446605c5c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 268,
"license_type": "permissive",
"max_line_length": 41,
"num_lines": 13,
"path": "/CPP/ODException.h",
"repo_name": "smitteh1/OpenDetection3",
"src_encoding": "UTF-8",
"text": "#include <exception>\n\nclass ODInvalidImage: std::exception{\n public:\n ODInvalidImage(char*);\n const char* what() const throw();\n};\n\nclass ODException: std::exception{\n public:\n ODException(char*);\n const char* what() const throw();\n};"
}
] | 18 |
JaiBaheti/WebAppFlask | https://github.com/JaiBaheti/WebAppFlask | d997ce8d682131fd74ed7692ded3557a34425100 | 7813c0e13b7af9d24020fe13ffd720c61ed67b4c | 728bd038bc1388ed7d250c8364fd1ec137ac7302 | refs/heads/master | 2022-11-28T16:04:00.718038 | 2020-08-08T13:55:43 | 2020-08-08T13:55:43 | 282,203,379 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7661290168762207,
"alphanum_fraction": 0.7661290168762207,
"avg_line_length": 23.799999237060547,
"blob_id": "74ee47754c84d06fa23cd72aba18f01a4a5922e8",
"content_id": "7a6436af35914866afc4b6e3acf83b9153e7e3fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 124,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 5,
"path": "/README.md",
"repo_name": "JaiBaheti/WebAppFlask",
"src_encoding": "UTF-8",
"text": "# WebApp Using Flask\nA simple WebApp which lets you add, update, delete reminders.\n\n# Command to run the code\npython app.py\n"
},
{
"alpha_fraction": 0.5635718703269958,
"alphanum_fraction": 0.5753991603851318,
"avg_line_length": 29.314815521240234,
"blob_id": "f57981ecb8e39dbea0e31cd390e857cbec382b62",
"content_id": "563a4b9290117848e2f4ac6b4706b329987054e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1691,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 54,
"path": "/app.py",
"repo_name": "JaiBaheti/WebAppFlask",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, url_for, redirect, request\r\nfrom flask_bootstrap import Bootstrap\r\nfrom keras.preprocessing.image import img_to_array\r\nfrom keras.models import load_model\r\nimport numpy as np\r\nimport imutils\r\nimport os\r\nimport json\r\nimport cv2\r\nfrom keras import backend as K\r\n\r\napp = Flask(__name__)\r\nBootstrap(app)\r\n\r\nOUTPUT_DIR = 'static'\r\nSIZE = 28\r\ndata =[]\r\n\r\ndef get_prediction(image):\r\n image = cv2.imread(image)\r\n image = cv2.resize(image, (28, 28))\r\n #image = image.astype(\"float\") / 255.0\r\n image = img_to_array(image)\r\n image =np.array(image, dtype=\"float\")/255.0\r\n image = np.expand_dims(image, axis=0)\r\n\r\n model = load_model(\"santa_not_santa.model\")\r\n (notSanta, santa) = model.predict(image)[0] \r\n label = \"Santa\" if santa > notSanta else \"Not Santa\"\r\n K.clear_session()\r\n return label\r\n \r\n \r\n\r\[email protected]('/', methods=['GET', 'POST'])\r\ndef index():\r\n if request.method == 'POST':\r\n uploaded_file = request.files['file']\r\n if uploaded_file.filename != '':\r\n if uploaded_file.filename[-3:] in ['jpg', 'png']:\r\n image_path = os.path.join(OUTPUT_DIR, uploaded_file.filename)\r\n uploaded_file.save(image_path) \r\n class_name = get_prediction(image_path)\r\n result = {\r\n 'class_name': class_name,\r\n 'path_to_image': image_path,\r\n 'size': SIZE\r\n }\r\n print(result)\r\n return render_template('static.html', result=result)\r\n return render_template('index.html')\r\n\r\nif __name__ ==\"__main__\":\r\n app.run(debug=True)\r\n"
}
] | 2 |
SkueletorTlg/Deezer_bot | https://github.com/SkueletorTlg/Deezer_bot | 69948d32b28b8425829d7358f7b01e26af95f52a | 7b2029e569d9a2208f6682509a5850136adcf230 | b9775d00fd6d5258233e92bce55f5087937d750d | refs/heads/master | 2022-01-09T01:14:09.557805 | 2019-05-15T14:59:26 | 2019-05-15T14:59:26 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.66645747423172,
"alphanum_fraction": 0.6736742854118347,
"avg_line_length": 43.887325286865234,
"blob_id": "4ffc07104eaaa12291d3aefac4bd2ebadd6e5f8a",
"content_id": "7140c2012ed7064e7990a201d398f986ef577da9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3205,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 71,
"path": "/soundcloud/keyboards.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "from math import ceil\n\nfrom aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup\nfrom yarl import URL\n\nfrom utils import new_callback\n\n\ndef sc_search_results_keyboard(results, page, per_page=5):\n kb = InlineKeyboardMarkup(2)\n total_pages = ceil(len(results) / per_page)\n start = (page-1) * per_page\n stop = start + per_page\n last_page = page == total_pages\n for i, result in enumerate(results[start : stop], start=start):\n kb.insert(InlineKeyboardButton(\n f'{i+1}. {result.artist} - {result.title}',\n callback_data=new_callback('track_soundcloud', result.id, 'send')))\n kb.row()\n if page != 1:\n kb.insert(InlineKeyboardButton(\n '◀️', callback_data=new_callback('sc_page', page-1)))\n if not last_page:\n kb.insert(InlineKeyboardButton(\n '️️▶️', callback_data=new_callback('sc_page', page+1)))\n kb.row(\n InlineKeyboardButton(text='Deezer ☑️', callback_data=new_callback('page', 1)),\n InlineKeyboardButton(text='SoundCloud ✅', callback_data=new_callback('sc_page', 1)))\n return kb\n\n\ndef sc_artist_tracks_keyboard(tracks, artist_id):\n kb = InlineKeyboardMarkup(1)\n for i, track in enumerate(tracks[:97], start=1):\n kb.insert(InlineKeyboardButton(\n f'{i}. {track.title}',\n callback_data=new_callback('track_soundcloud', track.id, 'send')))\n kb.insert(InlineKeyboardButton('Get all tracks', callback_data=new_callback('sc_artist', artist_id, 'download')))\n kb.insert(InlineKeyboardButton('Go back', callback_data=new_callback('sc_artist', artist_id, 'main')))\n return kb\n\n\ndef sc_artist_playlists_keyboard(playlists, artist_id):\n kb = InlineKeyboardMarkup(1)\n for i, playlist in enumerate(playlists, start=1):\n kb.insert(InlineKeyboardButton(\n f'{i}. {playlist.title}',\n callback_data=new_callback('playlist_soundcloud', playlist.id, 'send')))\n kb.insert(InlineKeyboardButton('Go back', callback_data=new_callback('sc_artist', artist_id, 'main')))\n return kb\n\n\ndef sc_playlist_keyboard(playlist, post):\n kb = InlineKeyboardMarkup(1)\n for i, track in enumerate(playlist.tracks, start=1):\n kb.insert(InlineKeyboardButton(\n f'{i+1}. {track.artist} \\u2013 {track.title}',\n callback_data=new_callback('track_soundcloud', playlist.id, 'send')))\n kb.insert(InlineKeyboardButton('Get all tracks', callback_data=new_callback('playlist_soundcloud', playlist.id, 'download')))\n if post:\n kb.insert(InlineKeyboardButton('Post', callback_data=new_callback('playlist_soundcloud', playlist.id, 'post')))\n return kb\n\n\ndef sc_artist_keyboard(artist):\n kb = InlineKeyboardMarkup(2)\n kb.insert(InlineKeyboardButton('Tracks', callback_data=new_callback('sc_artist', artist.id, 'tracks')))\n kb.insert(InlineKeyboardButton('Playlists', callback_data=new_callback('sc_artist', artist.id, 'playlists')))\n kb.insert(InlineKeyboardButton('Likes', callback_data=new_callback('sc_artist', artist.id, 'likes')))\n kb.insert(InlineKeyboardButton('Search on Last.Fm', url=str(URL(f'https://www.last.fm/search?q={artist.username}'))))\n return kb\n"
},
{
"alpha_fraction": 0.6817593574523926,
"alphanum_fraction": 0.6849935054779053,
"avg_line_length": 27.10909080505371,
"blob_id": "c565555f35a8d0db28a2e7452ab7cb1065f7ae4e",
"content_id": "236f8041431e161dab68891e3f836e4386878644",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1546,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 55,
"path": "/db_utils.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "from var import var\n\n\nasync def get_track(track_id, quality='mp3'):\n\treturn await var.conn.execute('get', f'track:deezer:{track_id}:{quality}')\n\n\nasync def add_track(track_id, file_id, quality='mp3'):\n\tawait var.conn.execute('set', f'track:deezer:{track_id}:{quality}', file_id)\n\tawait var.conn.execute('incr', 'tracks:deezer:total')\n\tprint(f'dz:{track_id} - {file_id}')\n\n\nasync def get_sc_track(track_id):\n\treturn await var.conn.execute('get', f'track:soundcloud:{track_id}')\n\n\nasync def add_sc_track(track_id, file_id):\n\tawait var.conn.execute('set', f'track:soundcloud:{track_id}', file_id)\n\tawait var.conn.execute('incr', 'tracks:soundcloud:total')\n\tprint(f'sc:{track_id} - {file_id}')\n\n\nasync def get_quality_setting(user_id):\n\treturn await var.conn.execute('get', f'user:{user_id}:quality_setting') or 'mp3'\n\n\nasync def set_quality_setting(user_id, setting):\n\tawait var.conn.execute('set', f'user:{user_id}:quality_setting', setting)\n\n\ndef add_user(user, isadmin=False):\n\tuser_indb = var.db.select('USERS', 'USER_ID', user.id)\n\tprint(user_indb)\n\tif not len(user_indb):\n\t\tnew_user = (\n\t\t\tuser.id,\n\t\t\tuser.first_name,\n\t\t\tuser.last_name if user.last_name else 'NULL',\n\t\t\tuser.username if user.username else 'NULL',\n\t\t\t0 if not isadmin else 1,\n\t\t\tuser.language_code if user.language_code else 'NULL')\n\t\tvar.db.insert('USERS', new_user)\n\t\tvar.db.commit()\n\n\ndef get_users_count():\n\tvar.db.execute('SELECT * FROM USERS')\n\tallusers = var.db.fetchall()\n\treturn len(allusers)\n\n\nif __name__ == '__main__':\n\tvar.db.execute(\n\t\t\"\"\"CREATE TABLE STATS\"\"\")\n"
},
{
"alpha_fraction": 0.6668233275413513,
"alphanum_fraction": 0.6759868264198303,
"avg_line_length": 43.79999923706055,
"blob_id": "6e98274527ce7901ca5aecb2d6ddd07767427866",
"content_id": "8ea894afeaf1d76a9ac7ed0e80469bd59cc424c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4274,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 95,
"path": "/deezer/keyboards.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "from math import ceil\n\nfrom aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup\nfrom yarl import URL\n\nfrom utils import new_callback\n\n\ndef quality_settings_keyboard(current_setting):\n kb = InlineKeyboardMarkup(2)\n mp3 = ' (current)' if current_setting == 'mp3' else ''\n flac = ' (current)' if current_setting == 'flac' else ''\n kb.insert(InlineKeyboardButton(\n \"MP3 320\" + mp3,\n callback_data='quality:mp3'))\n kb.insert(InlineKeyboardButton(\n \"FLAC\" + flac,\n callback_data='quality:flac'))\n return kb\n\n\ndef search_results_keyboard(results, page, per_page=5):\n kb = InlineKeyboardMarkup(2)\n total_pages = ceil(len(results) / per_page)\n start = (page-1) * per_page\n stop = start + per_page\n last_page = page == total_pages\n for i, result in enumerate(results[start : stop], start=start):\n kb.insert(InlineKeyboardButton(\n f'{i+1}. {result.artist.name} - {result.title}',\n callback_data=new_callback('track_deezer', result.id, 'send')))\n kb.row()\n if page != 1:\n kb.insert(InlineKeyboardButton(\n '◀️', callback_data=new_callback('page', page-1)))\n if not last_page:\n kb.insert(InlineKeyboardButton(\n '️️▶️', callback_data=new_callback('page', page+1)))\n kb.row(\n InlineKeyboardButton(text='Deezer ✅', callback_data=new_callback('page', 1)),\n InlineKeyboardButton(text='SoundCloud ☑️', callback_data=new_callback('sc_page', 1)))\n return kb\n\n\ndef artist_keyboard(artist):\n kb = InlineKeyboardMarkup(2)\n kb.insert(InlineKeyboardButton('Top 5 Tracks', callback_data=new_callback('artist', artist.id, 'top5')))\n kb.insert(InlineKeyboardButton('Albums', callback_data=new_callback('artist', artist.id, 'albums')))\n kb.insert(InlineKeyboardButton('Related artists', callback_data=new_callback('artist', artist.id, 'related')))\n kb.insert(InlineKeyboardButton('Radio', callback_data=new_callback('artist', artist.id, 'top5')))\n kb.insert(InlineKeyboardButton('Wikipedia', callback_data=new_callback('artist', artist.id, 'wiki')))\n kb.insert(InlineKeyboardButton('Search on Last.Fm', url=str(URL(f'https://www.last.fm/search?q={artist.name}'))))\n return kb\n\n\ndef related_artists_keyboard(related, main_artist_id):\n kb = InlineKeyboardMarkup(1)\n for i, artist in enumerate(related[:10], start=1):\n kb.insert(InlineKeyboardButton(f'{i}. {artist.name}', callback_data=new_callback('artist', artist.id, 'send')))\n kb.insert(InlineKeyboardButton('Go back', callback_data=new_callback('artist', main_artist_id, 'main')))\n return kb\n\n\ndef artist_radio_keyboard(radio, artist_id):\n kb = InlineKeyboardMarkup(1)\n for i, track in enumerate(radio, start=1):\n kb.insert(InlineKeyboardButton(f'{i}. {track.artist.name} \\u2013 {track.title}', callback_data=new_callback('track', track.id, 'send')))\n kb.insert(InlineKeyboardButton('Go back', callback_data=new_callback('artist', artist_id, 'main')))\n return kb\n\ndef album_keyboard(album, tracks, post=False):\n kb = InlineKeyboardMarkup(1)\n for i, track in enumerate(tracks, start=1):\n kb.insert(InlineKeyboardButton(f'{i}. {track.title}', callback_data=new_callback('deezer_track', track.id, 'send')))\n kb.insert(InlineKeyboardButton('Get all tracks', callback_data=new_callback('album', album.id, 'download')))\n if post:\n kb.insert(InlineKeyboardButton('Post', callback_data=new_callback('album', album.id, 'post')))\n return kb\n\n\ndef albums_keyboard(artist, albums):\n kb = InlineKeyboardMarkup(1)\n for album in albums:\n year = album.release_date.split('-')[0]\n kb.insert(InlineKeyboardButton(f'{album.title} ({year})', callback_data=new_callback('album', album.id, 'send')))\n kb.insert(InlineKeyboardButton('Go back', callback_data=new_callback('artist', artist.id, 'main')))\n return kb\n\n\ndef top5_keyboard(artist, top):\n kb = InlineKeyboardMarkup(1)\n for i, track in enumerate(top, start=1):\n kb.insert(InlineKeyboardButton(f'{i}. {track.title}', callback_data=new_callback('track', track.id, 'send')))\n kb.insert(InlineKeyboardButton('Go back', callback_data=new_callback('artist', artist.id, 'main'))) \n return kb\n"
},
{
"alpha_fraction": 0.6728070378303528,
"alphanum_fraction": 0.6964912414550781,
"avg_line_length": 26.80487823486328,
"blob_id": "0035c3d6256d02fe8a007e4b4638c55abb387a5b",
"content_id": "c57175728183e42665841faa3f8b2c7fd1d220a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1140,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 41,
"path": "/userbot.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "import asyncio\nfrom time import time\nimport shutil\nfrom functools import partial\nfrom concurrent import futures\nfrom multiprocessing import Process\n\n\nimport pyrogram\n\nimport config\nimport db_utils\nfrom bot import bot\nfrom var import var\n\n\nloop = asyncio.get_event_loop()\n\n\nasync def start():\n global client\n client = pyrogram.Client(\n 'DeezerMusicBot', api_id=config.client_api_id,\n api_hash=config.client_api_hash, bot_token=config.bot_token)\n await client.start()\n\n\nasync def post_large_track(path, track, quality='mp3', provider='deezer'):\n if provider == 'deezer':\n msg = await client.send_audio(\n chat_id=-1001246220493, audio=path, duration=track.duration,\n title=track.title, performer=track.artist.name)\n await db_utils.add_track(track.id, msg.audio.file_id, quality)\n elif provider == 'soundcloud':\n msg = await client.send_audio(\n chat_id=-1001246220493, audio=path, duration=track.duration,\n title=track.title, performer=track.artist)\n await db_utils.add_sc_track(track.id, msg.audio.file_id)\n\n\nloop.run_until_complete(start())\n"
},
{
"alpha_fraction": 0.758400022983551,
"alphanum_fraction": 0.758400022983551,
"avg_line_length": 24.6849308013916,
"blob_id": "b45cc000bf955b0f655d28afb0181f44e24fb51c",
"content_id": "396e7c347d128eeaa8127cb0bf1d9ccb3d2e2da9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1875,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 73,
"path": "/filters.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "import re\n\nfrom aiogram.dispatcher.filters import BoundFilter\nfrom aiogram import types\n\n\n# class SpotifyFilter(BoundFilter):\n# \tkey = 'spotify'\n\nasync def SpotifyFilter(self, message: types.Message):\n\treturn 'open.spotify.com/track' in message.text\n\n\n# class SpotifyPlaylistFilter(BoundFilter):\n# \tkey = 'spotify_playlist'\n\nasync def SpotifyPlaylistFilter(self, message: types.Message):\n\treturn 'open.spotify.com/playlist' in message.text\n\n\n# class SpotifyAlbumFilter(BoundFilter):\n# \tkey = 'spotify_album'\n\nasync def SpotifyAlbumFilter(self, message: types.Message):\n\treturn 'open.spotify.com/album' in message.text\n\n\n# class SpotifyArtistFilter(BoundFilter):\n# \tkey = 'spotify_artist'\n\nasync def SpotifyArtistFilter(self, message: types.Message):\n\treturn 'open.spotify.com/artist' in message.text\n\n\n# class DeezerFilter(BoundFilter):\n# \tkey = 'deezer'\n\nasync def DeezerFilter(self, message: types.Message):\n\treturn re.match(r'.+deezer.com/??track/.+', message.text)\n\n\n# class DeezerPlaylistFilter(BoundFilter):\n# \tkey = 'deezer_playlist'\n\nasync def DeezerPlaylistFilter(self, message: types.Message):\n\treturn re.match(r'.+deezer.com/???playlist/.+', message.text)\n\n\n# class DeezerAlbumFilter(BoundFilter):\n# \tkey = 'deezer_album'\n\nasync def DeezerAlbumFilter(self, message: types.Message):\n\treturn re.match(r'.+deezer.com/???album/.+', message.text)\n\n\n# class DeezerArtistFilter(BoundFilter):\n# \tkey = 'deezer_artist'\n\nasync def DeezerArtistFilter(self, message: types.Message):\n\treturn re.match(r'.+deezer.com/???artist/.+', message.text)\n\n\n# class ShazamFilter(BoundFilter):\n# \tkey = 'shazam'\n\nasync def ShazamFilter(self, message: types.Message):\n\treturn 'shazam.com' in message.text\n\n\nfilters = (\n\tSpotifyFilter, SpotifyPlaylistFilter, SpotifyAlbumFilter,\n\tSpotifyArtistFilter, DeezerFilter, DeezerPlaylistFilter,\n\tDeezerAlbumFilter, DeezerArtistFilter, ShazamFilter)\n"
},
{
"alpha_fraction": 0.74643874168396,
"alphanum_fraction": 0.74643874168396,
"avg_line_length": 30.909090042114258,
"blob_id": "96a5753b9b9a8374aa727ebff6c5376d4572cd7b",
"content_id": "ae5f90c17226a76e1bffc2cf63ecd56c444dd9e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 351,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 11,
"path": "/middlewares.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "from asyncio import sleep\n\nfrom aiogram.dispatcher.middlewares import BaseMiddleware\n\nfrom var import var\nfrom logger import message_logger, format_name\n\nclass Middleware(BaseMiddleware):\n async def on_process_message(self, message, data):\n message_logger.info(\n f'[message from {format_name(message.from_user)}] {message.text}')\n"
},
{
"alpha_fraction": 0.7133620977401733,
"alphanum_fraction": 0.7214439511299133,
"avg_line_length": 28.460317611694336,
"blob_id": "d5d516b70815ff91f3ef09436da17205e5114507",
"content_id": "c545846e2cf95c1e975943fbb0d6e398892e4f81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1856,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 63,
"path": "/soundcloud/methods.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "import shutil\nimport os\n\n\nfrom aiogram import types, exceptions\n\nimport db_utils\nimport utils\nfrom bot import bot\nfrom userbot import post_large_track\nfrom . import keyboards\n\nasync def send_soundcloud_track(chat_id, track):\n\tfile_id = await db_utils.get_sc_track(track.id)\n\tif file_id:\n\t\treturn await bot.send_audio(chat_id, file_id)\n\tpath = await track.download()\n\tif (os.path.getsize(path) >> 20) > 50:\n\t\tawait post_large_track(path, track, provider='soundcloud')\n\t\tfile_id = await db_utils.get_sc_track(track.id)\n\t\treturn await bot.send_audio(chat_id, file_id)\n\n\tawait bot.send_chat_action(chat_id, 'upload_audio')\n\t# thumb = await get_file(track.thumb_url)\n\tmsg = await bot.send_audio(\n\t\tchat_id=chat_id,\n\t\taudio=types.InputFile(path),\n\t\tperformer=track.artist,\n\t\ttitle=track.title,\n\t\t# thumb=types.InputFile(thumb)\n\t\t)\n\tawait db_utils.add_sc_track(track.id, msg.audio.file_id)\n\tshutil.rmtree(path.rsplit('/', 1)[0])\n\n\nasync def send_soundcloud_artist(chat_id, artist):\n\tawait bot.send_photo(\n\t\tchat_id=chat_id,\n\t\tphoto=artist.avatar_url,\n\t\tcaption=f'[{artist.username}]({artist.permalink_url})',\n\t\tparse_mode='markdown',\n\t\treply_markup=keyboards.sc_artist_keyboard(artist))\n\n\nasync def send_soundcloud_playlist(chat_id, playlist, pic=True, send_all=False):\n\tif pic:\n\t\tif not send_all:\n\t\t\tmarkup = keyboards.sc_playlist_keyboard(\n\t\t\t\tplaylist, chat_id in config.admins)\n\t\telse:\n\t\t\tmarkup = None\n\t\ttry:\n\t\t\tawait bot.send_photo(\n\t\t\t\tchat_id, playlist.artwork_url, reply_markup=markup,\n\t\t\t\tcaption=f'{playlist.user.username} \\u2013 {playlist.title}')\n\t\texcept exceptions.BadRequest:\n\t\t\tawait bot.send_photo(\n\t\t\t\tchat_id, playlist.tracks[0].artwork_url, reply_markup=markup,\n\t\t\t\tcaption=f'{playlist.user.username} \\u2013 {playlist.title}')\n\tif send_all:\n\t\tfor track in playlist.tracks:\n\t\t\tprint(track.title)\n\t\t\tawait send_soundcloud_track(chat_id, track)\n"
},
{
"alpha_fraction": 0.7936072945594788,
"alphanum_fraction": 0.7963470220565796,
"avg_line_length": 41.11538314819336,
"blob_id": "69a1333e2fcd6c0db6ccc38fcb664972761ce370",
"content_id": "af91789e2020d8d7adbdc8435b5a078ce667bf57",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1095,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 26,
"path": "/inline_keyboards.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "from math import ceil\n\nfrom aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup\nfrom yarl import URL\nfrom utils import new_callback\n\n\nfinish_download_keyboard = InlineKeyboardMarkup(1)\nfinish_download_keyboard.insert(InlineKeyboardButton(\n 'Loading full track, please wait...', callback_data='finish_download'))\n\nstart_keyboard = InlineKeyboardMarkup(1)\nstart_keyboard.insert(InlineKeyboardButton('Search', switch_inline_query_current_chat=''))\nstart_keyboard.insert(InlineKeyboardButton('Search albums', switch_inline_query_current_chat='.a '))\nstart_keyboard.insert(InlineKeyboardButton('Search artists', switch_inline_query_current_chat='.ar '))\n\nlarge_file_keyboard = InlineKeyboardMarkup(1)\nlarge_file_keyboard.insert(InlineKeyboardButton(\n \"File is too big, Telegram won't let to upload it\",\n callback_data='big_file'))\n\nstats_keyboard = InlineKeyboardMarkup()\nstats_keyboard.insert(InlineKeyboardButton('Update', callback_data='stats'))\n\ntoday_stats_keyboard = InlineKeyboardMarkup()\ntoday_stats_keyboard.insert(InlineKeyboardButton('Update', callback_data='today'))\n"
},
{
"alpha_fraction": 0.5946404337882996,
"alphanum_fraction": 0.603120744228363,
"avg_line_length": 26.551401138305664,
"blob_id": "5bdab7cce783a37e9290892b6dc906a8e252b4ca",
"content_id": "8e31f576cf37f9062b15c41b4471a52e9e06dea3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5896,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 214,
"path": "/utils.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "import asyncio\nimport glob\nimport os\nimport random\nimport re\nimport string\nfrom asyncio import sleep\nfrom collections import namedtuple\nfrom concurrent.futures._base import TimeoutError\nfrom datetime import date\nfrom functools import wraps\nfrom time import time\n\nimport aiofiles\nimport aiohttp\nimport mutagen\nfrom aiogram import exceptions, types\nfrom eyed3 import id3\nfrom mutagen.easyid3 import EasyID3\nfrom mutagen.mp3 import MP3\nfrom yarl import URL\n\nfrom var import var\n\n\ndef new_callback(*args, sep=':'):\n return sep.join(str(arg) for arg in args)\n\n\ndef parse_callback(callback, sep=':'):\n return callback.split(sep)\n\n\ndef random_string(length=10):\n return ''.join(random.sample(string.ascii_letters, length))\n\n\ndef clear_link(message):\n for entity in message.entities:\n if entity.type == 'url':\n return entity.url \\\n or message.text[entity.offset: entity.offset + entity.length]\n\n\ndef split_string(text):\n result = []\n words = text.split('\\n')\n string = ''\n for i, word in enumerate(words):\n if (len(string + word) > 4096):\n result.append(string)\n string = ''\n string += word + '\\n'\n if i == len(words) - 1:\n result.append(string)\n string = ''\n return result\n\n\ndef already_downloading(track_id):\n status = var.downloading.get(track_id) # pylint: disable=no-member\n if status is None or int(time()) - status > 60:\n return False\n return True\n\n\ndef islink(text):\n return 'https://' in text or 'http://' in text\n\n\nStats = namedtuple('Stats', ('downloaded_tracks',\n 'sent_tracks', 'received_messages'))\n\n\ndef get_today_stats():\n datestr = date.today().isoformat()\n downloaded_tracks = 0\n sent_tracks = 0\n received_messages = 0\n for filename in glob.iglob(f'logs/{datestr}*file_downloads.log'):\n downloaded_tracks += sum(1 for line in open(filename))\n for filename in glob.iglob(f'logs/{datestr}*sent_messages.log'):\n sent_tracks += sum(1 for line in open(filename))\n for filename in glob.iglob(f'logs/{datestr}*messages.log'):\n received_messages += sum(1 for line in open(filename))\n return Stats(downloaded_tracks, sent_tracks, received_messages)\n\n\ndef encode_url(url, *args, **kwargs):\n data = {}\n for arg in args:\n if isinstance(arg, dict):\n data.update(arg)\n data.update(kwargs)\n url = URL(url).with_query(data)\n return str(url)\n\n\ndef calling_queue(size):\n def wrapper(coro):\n sem = asyncio.Semaphore(size)\n @wraps(coro)\n async def decorator(*args, **kwargs):\n async with sem:\n result = await coro(*args, **kwargs)\n return result\n return decorator\n return wrapper\n\n\n@calling_queue(10)\nasync def download_file(url, path):\n r = await request_get(url) # pylint: disable=no-member\n async with aiofiles.open(path, 'wb') as f:\n async for chunk in r.content.iter_chunked(2048):\n await f.write(chunk)\n\n\n@calling_queue(10)\nasync def get_file(url, total_size=None):\n r = await request_get(url) # pylint: disable=no-member\n return await r.content.read()\n\n\ndef add_tags(path, track, album, image, lyrics):\n try:\n genre = album['genres']['data'][0]['name']\n except (KeyError, IndexError):\n genre = ''\n\n tag = id3.Tag()\n tag.parse(path)\n tag.artist = track['artist']['name']\n tag.album = track['album']['title']\n tag.album_artist = album['artist']['name']\n try:\n tag.original_release_date = track['album']['release_date']\n tag.recording_date = int(track['album']['release_date'].split('-')[0])\n except:\n pass\n tag.title = track['title']\n tag.track_num = track['track_position']\n tag.disc_num = track['disk_number']\n tag.non_std_genre = genre\n tag.bpm = track['bpm']\n if lyrics:\n tag.lyrics.set(lyrics)\n tag.images.set(\n type_=3, img_data=image, mime_type='image/png')\n tag.save()\n\n\ndef sc_add_tags(path, track, image, lyrics=None):\n try:\n album_title = track['publisher_metadata']['album_title']\n except KeyError:\n album_title = ''\n\n tag = id3.Tag()\n tag.parse(path)\n tag.title = track.title\n tag.artist = track.artist\n tag.album = album_title\n tag.album_artist = track.artist if album_title else ''\n tag.original_release_date = track.created_at.split('T')[0].split(' ')[\n 0].replace('/', '-')\n tag.non_std_genre = track.get('genre', '')\n if lyrics:\n tag.lyrics.set(lyrics)\n if image:\n tag.images.set(\n type_=3, img_data=image, mime_type='image/png')\n tag.save()\n\n\nerrcount = {'count': 0}\n\n\nasync def request_get(url, *args, **kwargs):\n retries_count = 0\n while True:\n try:\n result = await var.session.get(url, *args, **kwargs)\n except TimeoutError:\n if errcount['count'] > 3:\n exit(1)\n await var.session.close()\n var.session = aiohttp.ClientSession()\n errcount['count'] += 1\n except Exception as err:\n retries_count += 1\n if retries_count > 3:\n raise ValueError('Number of retries exceeded') from err\n else:\n return result\n\n\nasync def request_post(url, *args, **kwargs):\n retries_count = 0\n while True:\n try:\n result = await var.session.post(url, *args, **kwargs)\n except TimeoutError:\n if errcount['count'] > 3:\n exit(1)\n await var.session.close()\n var.session = aiohttp.ClientSession()\n errcount['count'] += 1\n except Exception as err:\n retries_count += 1\n if retries_count > 3:\n raise ValueError('Number of retries exceeded') from err\n else:\n return result\n"
},
{
"alpha_fraction": 0.6413419842720032,
"alphanum_fraction": 0.6448051929473877,
"avg_line_length": 36.71428680419922,
"blob_id": "6d4742fdcb02e23886ec86a093213afccbdc1ecd",
"content_id": "72ae3861e97f0bdc7d7f75afcd212c829756af59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9240,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 245,
"path": "/callback_handlers.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "from contextlib import suppress\nimport traceback\nfrom asyncio import sleep\n\nfrom aiogram import exceptions, types\n\nfrom bot import bot\nimport utils\nimport db_utils\nimport methods\nimport inline_keyboards\nfrom deezer import deezer_api\nfrom soundcloud import soundcloud_api, methods as sc_methods\nimport soundcloud.keyboards as sc_keyboards\nfrom var import var\nfrom utils import parse_callback\n\n\nasync def soundcloud_handler(callback):\n await callback.answer()\n track_id = callback.data.split(':')[1]\n track = await soundcloud_api.get_track(track_id)\n await sc_methods.send_soundcloud_track(callback.message.chat.id, track)\n\n\nasync def finish_download_handler(data):\n await data.answer('please wait, downloading track...', show_alert=True)\n\n\nasync def large_file_handler(callback):\n await callback.answer('Track is too large, Telegram won\\'t let to upload it', show_alert=True)\n\n\nasync def quality_setting_hanlder(callback):\n _, setting = parse_callback(callback.data)\n await db_utils.set_quality_setting(callback.from_user.id, setting)\n await callback.answer(f'quality set to {setting}')\n with suppress(exceptions.MessageNotModified):\n await bot.edit_message_reply_markup(\n chat_id=callback.message.chat.id,\n message_id=callback.message.message_id,\n reply_markup=inline_keyboards.quality_settings_keyboard(setting))\n\n\nasync def pages_handler(callback):\n await callback.answer()\n mode, page = parse_callback(callback.data)\n q = callback.message.text[:-1]\n with suppress(exceptions.MessageNotModified):\n if mode == 'page':\n search_results = await deezer_api.search(q=q)\n await bot.edit_message_reply_markup(\n chat_id=callback.message.chat.id,\n message_id=callback.message.message_id,\n reply_markup=inline_keyboards.search_results_keyboard(search_results, int(page)))\n elif mode == 'sc_page':\n search_results = await soundcloud_api.search(q=q)\n await bot.edit_message_reply_markup(\n chat_id=callback.message.chat.id,\n message_id=callback.message.message_id,\n reply_markup=sc_keyboards.sc_search_results_keyboard(search_results, int(page)))\n\n\nasync def stats_callback_handler(callback):\n await callback.answer()\n sc_tracks_count = await var.conn.execute('get', 'tracks:soundcloud:total')\n dz_tracks_count = await var.conn.execute('get', 'tracks:deezer:total')\n all_users_count = db_utils.get_users_count()\n with suppress(exceptions.MessageNotModified):\n await bot.edit_message_text(\n chat_id=callback.message.chat.id,\n message_id=callback.message.message_id,\n text=f'users: {all_users_count}\\n\\n'\n f'Deezer tracks: {dz_tracks_count}\\n\\nSoundCloud tracks: {sc_tracks_count}',\n reply_markup=inline_keyboards.stats_keyboard)\n\n\nasync def today_stats_callback_handler(callback):\n await callback.answer()\n stats = utils.get_today_stats()\n message_text = (\n f'Downloaded tracks: {stats.downloaded_tracks}\\n\\n'\n f'Sent tracks: {stats.sent_tracks}\\n\\n'\n f'Received messages: {stats.received_messages}')\n with suppress(exceptions.MessageNotModified):\n await bot.edit_message_text(\n chat_id=callback.message.chat.id,\n message_id=callback.message.message_id,\n text=message_text,\n reply_markup=inline_keyboards.today_stats_keyboard)\n\n\nasync def sc_callback_handler(callback):\n print(callback.data)\n mode, obj_id, method = parse_callback(callback.data)\n keyboard = None\n\n if mode == 'playlist_soundcloud':\n await callback.answer()\n playlist = await soundcloud_api.get_playlist(obj_id)\n\n if method == 'send':\n return await sc_methods.send_soundcloud_playlist(\n callback.message.chat.id, playlist)\n\n elif method == 'download':\n return await sc_methods.send_soundcloud_playlist(\n callback.message.chat.id, playlist, pic=False, send_all=True)\n\n elif method == 'post':\n return await sc_methods.send_soundcloud_playlist(\n -1001171972924, playlist, send_all=True)\n\n\n elif mode == 'track_soundcloud':\n if utils.already_downloading(int(obj_id)):\n return await callback.answer('already downloading, please wait...')\n else:\n await callback.answer('downloading...')\n track = await soundcloud_api.get_track(obj_id)\n await sc_methods.send_soundcloud_track(callback.message.chat.id, track)\n\n\nasync def sc_artist_callback_handler(callback):\n print(callback.data)\n await callback.answer()\n _, obj_id, method = parse_callback(callback.data)\n artist = await soundcloud_api.get_artist(obj_id)\n\n if method == 'main':\n keyboard = sc_keyboards.sc_artist_keyboard(artist)\n\n elif method == 'tracks':\n tracks = await artist.get_tracks()\n keyboard = sc_keyboards.sc_artist_tracks_keyboard(tracks, artist.id)\n\n elif method == 'playlists':\n playlists = await artist.get_playlists()\n keyboard = sc_keyboards.sc_artist_playlists_keyboard(playlists, artist.id)\n\n elif method == 'download':\n tracks = await artist.get_tracks()\n for track in tracks:\n await methods.send_soundcloud_track(callback.message.chat.id, track)\n await sleep(.3)\n return\n\n return await bot.edit_message_reply_markup(\n callback.message.chat.id,\n callback.message.message_id,\n reply_markup=keyboard)\n\n\nasync def artist_callback_handler(callback):\n await callback.answer()\n print(callback.data)\n _, obj_id, method = parse_callback(callback.data)\n\n artist = await deezer_api.getartist(obj_id)\n if method == 'top5':\n top = await artist.top(5)\n return await bot.edit_message_reply_markup(\n chat_id=callback.message.chat.id,\n message_id=callback.message.message_id,\n reply_markup=inline_keyboards.top5_keyboard(artist, top))\n\n elif method == 'albums':\n albums = await artist.albums()\n return await bot.edit_message_reply_markup(\n chat_id=callback.message.chat.id,\n message_id=callback.message.message_id,\n reply_markup=inline_keyboards.albums_keyboard(artist, albums))\n\n elif method == 'related':\n related = await artist.related()\n return await bot.edit_message_reply_markup(\n chat_id=callback.message.chat.id,\n message_id=callback.message.message_id,\n reply_markup=inline_keyboards.related_artists_keyboard(\n related, artist.id))\n\n elif method == 'radio':\n radio = await artist.radio()\n return await bot.edit_message_reply_markup(\n chat_id=callback.message.chat.id,\n message_id=callback.message.message_id,\n reply_markup=inline_keyboards.artist_radio_keyboard(\n radio, artist.id))\n\n elif method == 'main':\n kboard = inline_keyboards.artist_keyboard(artist)\n return await bot.edit_message_reply_markup(\n chat_id=callback.message.chat.id,\n message_id=callback.message.message_id,\n reply_markup=kboard)\n\n elif method == 'send':\n return await bot.send_photo(\n chat_id=callback.message.chat.id,\n photo=artist.picture_xl,\n caption=f'[{artist.name}]({artist.share})',\n parse_mode='markdown',\n reply_markup=inline_keyboards.artist_keyboard(artist))\n\n elif method == 'wiki':\n artist = await deezer_api.getartist(obj_id)\n r = await bot.session.get(\n f'https://wikipedia.org/w/index.php?search={artist.name}')\n return await bot.send_message(\n callback.message.chat.id, r.url)\n\n\nasync def callback_handler(callback):\n print(callback.data)\n mode, obj_id, method = parse_callback(callback.data)\n\n if mode == 'album':\n if method == 'download':\n await callback.answer()\n album = await deezer_api.getalbum(obj_id)\n await bot.edit_message_reply_markup(\n callback.message.chat.id,\n callback.message.message_id,\n None)\n return await methods.send_album(\n album, callback.message.chat, pic=False, send_all=True)\n\n elif method == 'post':\n await callback.answer()\n album = await deezer_api.getalbum(obj_id)\n chat = await bot.get_chat(-1001171972924)\n await methods.send_album(album, chat, send_all=True)\n\n elif method == 'send':\n await callback.answer('downloading')\n album = await deezer_api.getalbum(obj_id)\n return await methods.send_album(album, callback.message.chat) \n\n elif mode == 'track_deezer':\n if utils.already_downloading(int(obj_id)):\n return await callback.answer('already downloading, please wait...')\n else:\n await callback.answer('downloading...')\n track = await deezer_api.gettrack(obj_id)\n await methods.send_track(track, callback.message.chat)\n"
},
{
"alpha_fraction": 0.6863157749176025,
"alphanum_fraction": 0.6863157749176025,
"avg_line_length": 29.645160675048828,
"blob_id": "d841596221bdb0f7e7c48b89fe0c70e0d7b76dca",
"content_id": "deda0e11267a48b3ad78243645c5eee802e2187a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 950,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 31,
"path": "/AttrDict.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "class AttrDict(dict):\n\t\"\"\" Nested Attribute Dictionary\n\n\tA class to convert a nested Dictionary into an object with key-values\n\taccessibly using attribute notation (AttrDict.attribute) in addition to\n\tkey notation (Dict[\"key\"]). This class recursively sets Dicts to objects,\n\tallowing you to recurse down nested dicts (like: AttrDict.attr.attr)\n\t\"\"\"\n\n\tdef __init__(self, mapping):\n\t\tsuper(AttrDict, self).__init__()\n\t\ttry:\n\t\t\tfor key, value in mapping.items():\n\t\t\t\tself.__setitem__(key, value)\n\t\texcept AttributeError as exc:\n\t\t\traise ValueError(mapping) from exc\n\n\tdef __setitem__(self, key, value):\n\t\tif isinstance(value, dict):\n\t\t\tvalue = AttrDict(value)\n\t\tsuper(AttrDict, self).__setitem__(key, value)\n\n\tdef __getattr__(self, item):\n\t\ttry:\n\t\t\tif isinstance(self.__getitem__(item), list):\n\t\t\t\treturn [self.__class__(x) for x in self.__getitem__(item)]\n\t\t\treturn self.__getitem__(item)\n\t\texcept KeyError:\n\t\t\treturn None\n\n\t__setattr__ = __setitem__\n"
},
{
"alpha_fraction": 0.517241358757019,
"alphanum_fraction": 0.517241358757019,
"avg_line_length": 15.571428298950195,
"blob_id": "df345496d195bd9c2d7ac603afd765014c1e8517",
"content_id": "16b55477964267e424793603089e2e37be89dffd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 116,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 7,
"path": "/var.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "class Var:\n __slots__ = [\n\t\t'conn', 'db', 'spot', 'downloading',\n\t\t'session', 'CSRFToken', 'loop']\n\n\nvar = Var()\n"
},
{
"alpha_fraction": 0.69852215051651,
"alphanum_fraction": 0.7091625332832336,
"avg_line_length": 28.33526039123535,
"blob_id": "9aa6ea2b03a9b19c5788ee6d7c379f5782c362a4",
"content_id": "8aea6c7a919b2e511c0590e1145bab12b6a99594",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5075,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 173,
"path": "/methods.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "import asyncio\nimport json\nimport os\nimport shutil\nfrom asyncio import sleep\nfrom time import time\n\nfrom aiogram import exceptions, types\n\nimport db_utils\nimport inline_keyboards\nimport deezer.keyboards as dz_keyboards\nimport config\nfrom bot import bot\nfrom logger import file_download_logger, format_name, sent_message_logger\nfrom utils import already_downloading, calling_queue, get_file\nfrom var import var\nfrom userbot import post_large_track\n\n\n@calling_queue(3)\nasync def upload_track(track, path, tries=0):\n\tif tries > 3:\n\t\traise RuntimeError('can\\'t upload track')\n\ttry:\n\t\tmsg = await bot.send_audio(\n\t\t\tchat_id=-1001246220493,\n\t\t\taudio=types.InputFile(path),\n\t\t\ttitle=track.title,\n\t\t\tperformer=track.artist.name,\n\t\t\tduration=track.duration)\n\texcept exceptions.RetryAfter as e:\n\t\tprint(f'flood control exceeded, sleeping for {e.timeout + 10} seconds')\n\t\tawait sleep(e.timeout + 10)\n\t\treturn await upload_track(track, path, tries + 1)\n\texcept exceptions.TelegramAPIError:\n\t\tawait sleep(5)\n\t\treturn await upload_track(track, path, tries + 1)\n\treturn msg\n\n\nasync def finish_download(track, inline_message_id, user):\n\tfile_id = await db_utils.get_track(track.id)\n\tif file_id:\n\t\treturn await bot.edit_message_media(\n\t\t\tmedia = types.InputMediaAudio(\n\t\t\t\tmedia=file_id,\n\t\t\t\ttitle=track.title,\n\t\t\t\tperformer=track.artist.name,\n\t\t\t\tduration=track.duration),\n\t\tinline_message_id=inline_message_id)\n\tpath = await track.download()\n\tif (os.path.getsize(path) >> 20) > 50:\n\t\tawait bot.edit_message_reply_markup(\n\t\t\tinline_message_id=inline_message_id,\n\t\t\treply_markup=inline_keyboards.large_file_keyboard)\n\t\tawait post_large_track(path, track)\n\t\tfile_id = await db_utils.get_track(track.id)\n\telse:\n\t\tmsg = await upload_track(track, path)\n\t\tawait db_utils.add_track(track.id, msg.audio.file_id)\n\t\tfile_id = msg.audio.file_id\n\n\ttry:\n\t\tawait bot.edit_message_media(\n\t\t\tmedia = types.InputMediaAudio(\n\t\t\t\tmedia=file_id,\n\t\t\t\ttitle=track.title,\n\t\t\t\tperformer=track.artist.name,\n\t\t\t\tduration=track.duration),\n\t\t\tinline_message_id=inline_message_id)\n\t\tshutil.rmtree(path.rsplit('/', 1)[0])\n\texcept exceptions.BadRequest:\n\t\ttry:\n\t\t\tawait bot.send_audio(user.id, file_id)\n\t\texcept:\n\t\t\tpass\n\t\n\tfile_download_logger.info(\n\t\tf'[downloaded track {track.id} (inline)] {track}')\n\tsent_message_logger.info(\n\t\tf'[send track {track.id} to {format_name(user)} (inline)] {track}')\n\n\nasync def send_track(track, chat, Redownload=False):\n\tquality = await db_utils.get_quality_setting(chat.id)\n\tif not already_downloading(track.id):\n\t\tvar.downloading[track.id] = int(time())\n\telse:\n\t\treturn\n\tif not Redownload:\n\t\tif (await check_and_forward(track, chat, quality)):\n\t\t\treturn\n\n\tif quality == 'mp3':\n\t\tpath = await track.download('MP3_320')\n\telif quality == 'flac':\n\t\tpath = await track.download('FLAC')\t\t\n\n\tawait bot.send_chat_action(chat.id, 'upload_audio')\n\n\tif (os.path.getsize(path) >> 20) > 50:\n\t\tmsg = await bot.send_message(\n\t\t\tchat_id=chat.id,\n\t\t\ttext='File is larger than 50 MB, uploading can take a while, please wait') \n\t\tawait post_large_track(path, track, quality)\n\t\tawait sleep(1)\n\t\tfile_id = await db_utils.get_track(track.id, quality)\n\t\tawait bot.send_audio(chat.id, file_id)\n\t\tawait msg.delete()\n\telse:\n\t\tmsg = await upload_track(track, path)\n\t\tawait msg.forward(chat.id)\n\t\tawait db_utils.add_track(track.id, msg.audio.file_id, quality)\n\tshutil.rmtree(path.rsplit('/', 1)[0])\n\tvar.downloading.pop(track.id)\n\tsent_message_logger.info(\n\t\tf'[send track {track.id} to {format_name(chat)}] {track}')\n\n\nasync def send_album(album, chat, pic=True, send_all=False):\n\tif pic:\n\t\tif not send_all:\n\t\t\ttracks = await album.get_tracks()\n\t\t\tmarkup = dz_keyboards.album_keyboard(\n\t\t\t\talbum, tracks, chat.id in config.admins)\n\t\telse:\n\t\t\tmarkup = None\n\t\tawait bot.send_photo(\n\t\t\tchat.id,\n\t\t\talbum.cover_xl,\n\t\t\tcaption=f'{album[\"artist\"][\"name\"]} \\u2013 {album.title}',\n\t\t\treply_markup=markup)\n\tif send_all:\n\t\tfor track in await album.get_tracks():\n\t\t\tprint(track.title)\n\t\t\tawait send_track(track, chat)\n\n\nasync def send_artist(artist, chat_id):\n\tawait bot.send_photo(\n\t\tchat_id=chat_id,\n\t\tphoto=artist.picture_xl,\n\t\tcaption=f'[{artist.name}]({artist.share})',\n\t\tparse_mode='markdown',\n\t\treply_markup=dz_keyboards.artist_keyboard(artist))\n\n\nasync def check_and_forward(track, chat, quality='mp3'):\n\tfile_id = await db_utils.get_track(track.id, quality)\n\tif not file_id:\n\t\treturn False\n\tawait bot.send_audio(\n\t\tchat_id=chat.id, audio=file_id, title=track.title,\n\t\tperformer=track.artist.name, duration=track.duration)\n\tsent_message_logger.info(\n\t\tf'[send track {track.id} to {format_name(chat)}] {track}')\n\treturn True\n\n\nasync def cache(track):\n\tfile_id = await db_utils.get_track(track.id)\n\tif not file_id:\n\t\tpath = await track.download()\n\t\tif (os.path.getsize(path) >> 20) > 50:\n\t\t\tawait post_large_track(path, track)\n\t\telse:\n\t\t\tmsg = await upload_track(track, path)\n\t\t\tawait db_utils.add_track(track.id, msg.audio.file_id)\n\t\tshutil.rmtree(path.rsplit('/', 1)[0])\n\t\tprint(f'cached track {track.artist.name} - {track.title}')\n\telse:\n\t\tprint(f'skipping track {track.artist.name} - {track.title} - {file_id}')\n"
},
{
"alpha_fraction": 0.726177453994751,
"alphanum_fraction": 0.7283680438995361,
"avg_line_length": 22.41025733947754,
"blob_id": "5430964bc3d0b14c12fbabacb9e158808512bbb6",
"content_id": "88008ec721dbd8f91351b1e65e5c69375eeeb09d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 913,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 39,
"path": "/start.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom contextlib import suppress\nimport asyncio\nimport shutil\nimport re\nfrom sys import argv\n\nfrom aiogram.utils import executor\nfrom aiogram import types\nfrom aiohttp import ClientSession\n\nfrom bot import bot, dp, register_handlers\nfrom var import var\nimport handlers\nimport inline_handlers\nimport callback_handlers\nimport filters\nfrom logger import update_logging_files\n\nloop = asyncio.get_event_loop()\n\n\nasync def close():\n var.db.commit()\n var.db.close()\n var.conn.close()\n logging.cancel()\n await var.session.close()\n\n\nif __name__ == '__main__':\n with suppress(FileNotFoundError):\n shutil.rmtree('downloads')\n register_handlers(dp, handlers, inline_handlers, callback_handlers)\n logging = asyncio.ensure_future(update_logging_files())\n executor.start_polling(dp, loop=loop)\n loop.run_until_complete(close())\n loop.close()\n"
},
{
"alpha_fraction": 0.579872190952301,
"alphanum_fraction": 0.5864902138710022,
"avg_line_length": 34.055999755859375,
"blob_id": "4182e14d708af981b6d0f91482e5f4f16c7085ca",
"content_id": "c30f305eb6d589878dfdd6b5f00fc6d94953e5ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4384,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 125,
"path": "/inline_handlers.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "from aiogram import types\n\nimport db_utils\nfrom deezer import deezer_api\nimport inline_keyboards\nimport methods\nimport utils\nfrom bot import bot\nfrom var import var\n\n\nasync def inline_handler(query):\n q = query.query.replace('.a', '').replace('.ar', '').strip()\n results = []\n if not q:\n return await bot.answer_inline_query(\n inline_query_id=query.id,\n results=results,\n switch_pm_text='Search',\n switch_pm_parameter='0')\n if query.offset == 'done':\n return await bot.answer_inline_query(\n inline_query_id=query.id,\n results=results)\n\n if q:\n search_results = await deezer_api.search(q=q)\n\n if not search_results:\n return await bot.answer_inline_query(\n inline_query_id=query.id,\n results=results,\n switch_pm_text='Nothing was found',\n switch_pm_parameter='0')\n\n offset = int(query.offset) if query.offset.isdecimal() else 0\n\n if '.a' in query.query:\n already_in_list = []\n for result in search_results[offset: offset + 5]:\n if result.album.id in already_in_list:\n continue\n already_in_list.append(result.album.id)\n results.append(types.InlineQueryResultArticle(\n id=result.link,\n title=result.album.title,\n description=result.artist.name,\n thumb_url=result.album.cover_small,\n thumb_width=56,\n thumb_height=56,\n input_message_content=types.InputTextMessageContent(\n f'https://deezer.com/album/{result.album.id}')\n ))\n else:\n for result in search_results[offset: offset + 5]:\n file_id = await db_utils.get_track(result.id)\n if file_id:\n results.append(types.InlineQueryResultCachedAudio(\n id='done:' + utils.random_string(), audio_file_id=file_id))\n elif result.preview:\n results.append(types.InlineQueryResultAudio(\n id=f'finish_download:{result.id}:{utils.random_string(4)}',\n audio_url=result.preview,\n title=f'⏳{result.title}',\n performer=result.artist.name,\n audio_duration=30,\n reply_markup=inline_keyboards.finish_download_keyboard))\n\n if offset + 6 < len(search_results):\n next_offset = str(offset + 5)\n else:\n next_offset = 'done'\n await bot.answer_inline_query(\n inline_query_id=query.id,\n results=results,\n next_offset=next_offset,\n cache_time=30)\n\n\nasync def artist_search_inline_handler(query):\n q = query.query.replace('.ar', '').strip()\n search_results = await deezer_api.search('artist', q)\n results = []\n if not q:\n return await bot.answer_inline_query(\n inline_query_id=query.id,\n results=results,\n switch_pm_text='Search',\n switch_pm_parameter='0')\n if query.offset == 'done':\n await bot.answer_inline_query(\n inline_query_id=query.id,\n results=results)\n offset = int(query.offset) if query.offset.isdecimal() else 0\n for result in search_results[offset: offset + 5]:\n results.append(types.InlineQueryResultArticle(\n id=result.link,\n title=result.name,\n thumb_url=result.picture_small,\n thumb_width=56,\n thumb_height=56,\n input_message_content=types.InputTextMessageContent(result.link)\n ))\n\n if offset + 6 < len(search_results):\n next_offset = str(offset + 5)\n else:\n next_offset = 'done'\n await bot.answer_inline_query(\n inline_query_id=query.id,\n results=results,\n next_offset=next_offset)\n\n\nasync def finish_download_handler(chosen_inline: types.ChosenInlineResult):\n if utils.islink(chosen_inline.result_id):\n return\n if chosen_inline.result_id.split(':')[0] == 'done':\n return\n try:\n track_id = int(chosen_inline.result_id.split(':')[1])\n except ValueError:\n track_id = int(chosen_inline.result_id.split(':')[1].split('/')[-1])\n track = await deezer_api.gettrack(track_id)\n await methods.finish_download(track, chosen_inline.inline_message_id, chosen_inline.from_user)\n"
},
{
"alpha_fraction": 0.647293746471405,
"alphanum_fraction": 0.6535995602607727,
"avg_line_length": 35.596153259277344,
"blob_id": "3a453a065a78a41d0f420c9633f4739f1821fd6d",
"content_id": "0716640fe79b8aa13a17c42c1f14c7ced6f4ed91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9515,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 260,
"path": "/handlers.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "import re\nfrom asyncio import sleep\nfrom glob import iglob\nfrom datetime import date\n\nfrom aiogram import types\nfrom aiogram.dispatcher.handler import SkipHandler\n\nfrom var import var\nfrom bot import bot\nimport utils\nimport db_utils\nimport inline_keyboards\nfrom deezer import deezer_api, keyboards as dz_keyboards\nfrom soundcloud import soundcloud_api, keyboards as cs_keyboards\nimport soundcloud.methods as sc_methods\nimport config\nimport methods\nfrom logger import error_logger\n\n\nasync def only_admin_handler(message: types.Message):\n if message.chat.id in config.admins:\n raise SkipHandler()\n\n\nasync def quality_setting_handler(message: types.Message):\n if message.chat.id in config.admins:\n current_setting = await db_utils.get_quality_setting(message.chat.id)\n return await bot.send_message(\n message.chat.id, 'Select quality',\n reply_markup=inline_keyboards.quality_settings_keyboard(current_setting))\n\n\nasync def soundcloud_link_handler(message: types.Message):\n url = utils.clear_link(message)\n result = await soundcloud_api.resolve(url)\n if result.kind == 'track':\n await sc_methods.send_soundcloud_track(message.chat.id, result)\n elif result.kind == 'user':\n await sc_methods.send_soundcloud_artist(message.chat.id, result)\n elif result.kind == 'playlist':\n await sc_methods.send_soundcloud_playlist(message.chat.id, result)\n\n\n\nasync def audio_file_handler(message: types.Message):\n if message.caption and message.chat.id in config.admins:\n await db_utils.add_track(int(message.caption), message.audio.file_id)\n else:\n print(message.audio.file_id)\n\n\nasync def start_command_handler(message: types.Message):\n db_utils.add_user(message.from_user)\n await bot.send_message(\n chat_id=message.chat.id,\n text=config.start_message,\n disable_web_page_preview=True,\n parse_mode=types.ParseMode.MARKDOWN,\n reply_markup=inline_keyboards.start_keyboard)\n\n\nasync def getstats_handler(message):\n sc_tracks_count = await var.conn.execute('get', 'tracks:soundcloud:total')\n dz_tracks_count = await var.conn.execute('get', 'tracks:deezer:total')\n all_users_count = db_utils.get_users_count()\n await bot.send_message(\n chat_id=message.chat.id,\n text=f'users: {all_users_count}\\n\\n'\n f'Deezer tracks: {dz_tracks_count}\\n\\nSoundCloud tracks: {sc_tracks_count}',\n reply_markup=inline_keyboards.stats_keyboard)\n\n\nasync def today_stats_handler(message):\n stats = utils.get_today_stats()\n await bot.send_message(\n chat_id=message.chat.id,\n text=f'Downloaded tracks: {stats.downloaded_tracks}\\n\\n'\n f'Sent tracks: {stats.sent_tracks}\\n\\n'\n f'Received messages: {stats.received_messages}',\n reply_markup=inline_keyboards.today_stats_keyboard)\n\n\nasync def redownload_handler(message: types.Message):\n if 'com/' in message.text:\n obj_type = message.text.split('/')[-2]\n obj_id = message.text.split('/')[-1]\n if obj_type == 'track':\n track = await deezer_api.gettrack(obj_id)\n await methods.send_track(track, message.chat, Redownload=True)\n else:\n album = await deezer_api.getalbum(obj_id)\n for track in await album.get_tracks():\n await methods.send_track(track, message.chat, Redownload=True)\n else:\n search = await deezer_api.search(q=message.text.strip('/re '))\n await methods.send_track(await deezer_api.gettrack(search[0].id), message.chat, Redownload=True)\n\n\nasync def diskography_handler(message: types.Message):\n if message.reply_to_message and message.reply_to_message.audio:\n artist_name = message.reply_to_message.audio.performer\n else:\n artist_name = message.text.strip('/d ').split('/')[-1]\n if artist_name.isdigit():\n artist = await deezer_api.getartist(artist_name)\n else:\n artist = (await deezer_api.search('artist', artist_name))[0]\n\n tracks = await artist.all_tracks()\n total, skipped = len(tracks), 0\n for track in tracks:\n if await db_utils.get_track(track.id):\n skipped += 1\n\n text = f'{artist.name}\\n\\nskipped ({skipped}/{total})'\n\n await bot.send_message(message.chat.id, text)\n\n for track in tracks:\n try:\n await methods.cache(track)\n await sleep(0)\n except Exception as e:\n print(e)\n await bot.send_message(message.chat.id, e)\n await bot.send_message(message.chat.id, f'{artist.name} done')\n\n for artist in (await artist.related())[:5]:\n try:\n await sleep(2)\n tracks = await artist.all_tracks()\n total, skipped = len(tracks), 0\n for i, track in enumerate(tracks, start=1):\n if await db_utils.get_track(track.id):\n skipped += 1\n if skipped == total:\n await sleep(3)\n continue\n text = f'{artist.name}\\n\\nskipped ({skipped}/{total})'\n await bot.send_message(message.chat.id, text)\n for track in tracks:\n await methods.cache(track)\n await bot.send_message(message.chat.id, f'{artist.name} done')\n\n except Exception as e:\n print(e)\n await bot.send_message(message.chat.id, f'{artist.name}\\n\\n{e}')\n\n\nasync def artist_search_handler(message):\n artist = (await deezer_api.search(\n 'artist', message.text.strip(message.get_command())))[0]\n await methods.send_artist(artist, message.chat.id)\n\n\nasync def post_to_channel_handler(message):\n album = await deezer_api.getalbum(message.text.split('/')[-1])\n chat = await bot.get_chat(-1001171972924)\n await methods.send_album(album, chat, send_all=True)\n await bot.send_message(140999479, 'done')\n\n\nasync def spotify_handler(message):\n spotify_song = await var.spot.get_track(message.text)\n search_query = '%s %s' % (\n spotify_song.artists[0].name,\n re.match(r'[^\\(\\[\\-]+', spotify_song.name).group(0))\n search_results = await deezer_api.search(q=search_query)\n if not search_results:\n return await bot.send_message(\n message.chat.id, 'Sorry, track is not found on Deezer')\n await methods.send_track(search_results[0], message.chat)\n\n\nasync def spotify_playlist_handler(message):\n spotify_playlist = await var.spot.get_playlist(message.text)\n for track in spotify_playlist:\n try:\n search_query = '{} {}'.format(\n track.artists[0].name, re.match(r'[^\\(\\[\\-]+', track.name).group(0))\n search_results = await deezer_api.search(q=search_query)\n if search_results:\n await methods.send_track(search_results[0], message.chat)\n else:\n await bot.send_message(\n chat_id=message.chat.id,\n text=f'Sorry, track {track.artists[0].name} - {track.name} is not found on Deezer')\n except Exception as e:\n print(e)\n await sleep(.5)\n\n\nasync def spotify_album_handler(message):\n spotify_album = await var.spot.get_album(message.text)\n search_results = await deezer_api.search(\n 'album', f'{spotify_album.artists[0].name} {spotify_album.name}')\n if not search_results:\n return await bot.send_message(\n chat_id=message.chat.id,\n text=f'Sorry, album {spotify_album.name} by {spotify_album.artists[0].name} is not found on Deezer')\n await methods.send_album(search_results[0], message.chat)\n\n\nasync def spotify_artist_handler(message):\n spotify_artist = await var.spot.get_artist(message.text)\n search_results = await deezer_api.search('artist', spotify_artist.name)\n await methods.send_artist(search_results[0], message.chat.id)\n\n\nasync def artist_handler(message):\n artist = await deezer_api.getartist(message.text.split('/')[-1])\n await methods.send_artist(artist, message.chat.id)\n\n\nasync def album_handler(message):\n album = await deezer_api.getalbum(message.text.split('/')[-1])\n await methods.send_album(album, message.chat)\n\n\nasync def playlist_handler(message):\n tracks = await deezer_api.getplaylist(message.text.split('/')[-1])\n\n for track in tracks:\n try:\n await methods.send_track(track, message.chat)\n await sleep(.02)\n except Exception as e:\n print(type(e), e)\n await bot.send_message(message.chat.id, 'playlist done')\n\n\nasync def cache_playlist(message):\n tracks = await deezer_api.getplaylist(message.text.split('/')[-1])\n for track in tracks:\n if not await db_utils.get_track(track.id):\n await methods.send_track(track, message.chat)\n await sleep(.01)\n await bot.send_message(message.chat.id, 'playlist cached')\n\n\nasync def track_handler(message):\n track = await deezer_api.gettrack(message.text.split('/')[-1])\n if utils.already_downloading(track.id):\n return\n await methods.send_track(track, message.chat)\n db_utils.add_user(message.from_user)\n\n\nasync def search_handler(message):\n search_results = await deezer_api.search(q=message.text)\n if not len(search_results):\n return await bot.send_message(message.chat.id, 'Nothing was found')\n\n await bot.send_message(\n chat_id=message.chat.id,\n text=message.text + ':',\n reply_markup=inline_keyboards.search_results_keyboard(search_results, 1))\n db_utils.add_user(message.from_user)\n"
},
{
"alpha_fraction": 0.648915708065033,
"alphanum_fraction": 0.6555036902427673,
"avg_line_length": 29.872880935668945,
"blob_id": "d6cc5ecf92192cc0511816ae994ea285c6ec7015",
"content_id": "b7ed55dbfe728e518937ea020e724149fa69b2a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3643,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 118,
"path": "/spotify.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "import requests\nimport base64\nimport re\nfrom time import time\n\nfrom AttrDict import AttrDict\nfrom utils import encode_url, request_get\nfrom var import var\n\nspotify_track = re.compile(r'open.spotify.com/track/[^? ]+')\nspotify_album = re.compile(r'open.spotify.com/album/[^? ]+')\nspotify_artist = re.compile(r'open.spotify.com/artist/[^? ]+')\nspotify_playlist = re.compile(r'open.spotify.com/.+/playlist/[^? ]+')\n\nclass Spotify_API:\n\tdef __init__(self, client, secret):\n\t\tself.client = client\n\t\tself.secret = secret\n\t\ts = self.client + ':' + self.secret\n\t\tself.auth = base64.urlsafe_b64encode(s.encode()).decode()\n\t\tr = requests.post(\n\t\t\t'https://accounts.spotify.com/api/token',\n\t\t\theaders={\n\t\t\t\t'Authorization': f'Basic {self.auth}',\n\t\t\t\t'Content-Type': 'application/x-www-form-urlencoded'},\n\t\t\tdata={'grant_type': 'client_credentials'})\n\t\tjson = r.json()\n\t\tself.token_type = json['token_type']\n\t\tself.token = json['access_token']\n\t\tself.expires_in = time() + json['expires_in']\n\n\tdef restart(self):\n\t\tself.__init__(self.client, self.secret)\n\n\tasync def search(self, query, obj_type='track', limit=5):\n\t\tif self.expires_in < time():\n\t\t\tself.restart()\n\t\tdata = {'type': obj_type, 'limit': limit, 'q': query}\n\t\theaders = {'Authorization': f'Bearer {self.token}'}\n\t\tr = await request_get(encode_url(\n\t\t\t'https://api.spotify.com/v1/search', data=data), headers=headers)\n\t\tjson = await r.json(content_type=None)\n\t\tresult = []\n\t\tif json['tracks']['total'] != 0:\n\t\t\tfor item in json['tracks']['items']:\n\t\t\t\tresult.append(AttrDict(item))\n\t\treturn result\n\n\tasync def get_track(self, url, retries=0):\n\t\tif self.expires_in < time():\n\t\t\tself.restart()\n\t\tif retries > 3:\n\t\t\traise ValueError('Cannot get track')\n\t\turl = re.findall(spotify_track, url)[0]\n\t\turl = 'https://' + url\n\t\ttrack_id = url.split('/')[-1]\n\t\tr = await request_get(\n\t\t\tf'https://api.spotify.com/v1/tracks/{track_id}',\n\t\t\theaders={'Authorization': f'Bearer {self.token}'})\n\t\tprint(r.url)\n\t\tjson = await r.json(content_type=None)\n\t\ttry:\n\t\t\tjson['error']\n\t\texcept KeyError:\n\t\t\treturn AttrDict(json)\n\t\telse:\n\t\t\tself.restart()\n\t\t\treturn await self.get_track(url, retries + 1)\n\n\tasync def get_playlist(self, url):\n\t\tif self.expires_in < time():\n\t\t\tself.restart()\n\t\turl = re.findall(spotify_playlist, url)[0]\n\t\tplaylist_id = url.split('/')[-1]\n\t\tr = await request_get(\n\t\t\tf'https://api.spotify.com/v1/playlists/{playlist_id}/tracks',\n\t\t\theaders={'Authorization': f'Bearer {self.token}'})\n\t\tprint(r.url)\n\t\tjson = await r.json(content_type=None)\n\t\tif not json.get('error'):\n\t\t\treturn [AttrDict(track['track']) for track in json['items']]\n\t\telse:\n\t\t\traise ValueError('Error getting playlist: ' + json.get('error'))\n\n\tasync def get_album(self, url):\n\t\tif self.expires_in < time():\n\t\t\tself.restart()\n\t\turl = re.findall(spotify_album, url)[0]\n\t\talbum_id = url.split('/')[-1]\n\t\tr = await request_get(\n\t\t\tf'https://api.spotify.com/v1/albums/{album_id}',\n\t\t\theaders={'Authorization': f'Bearer {self.token}'})\n\t\tprint(r.url)\n\t\tjson = await r.json(content_type=None)\n\t\tif not json.get('error'):\n\t\t\treturn AttrDict(json)\n\t\telse:\n\t\t\traise ValueError('Error getting albums: ' + json.get('error'))\n\n\t\t\n\tasync def get_artist(self, url):\n\t\tif self.expires_in < time():\n\t\t\tself.restart()\n\t\turl = re.findall(spotify_artist, url)[0]\n\t\tartist_id = url.split('/')[-1]\n\t\tr = await request_get(\n\t\t\tf'https://api.spotify.com/v1/artists/{artist_id}',\n\t\t\tjson={'Authorization': f'Bearer {self.token}'})\n\t\tprint(r.url)\n\t\tjson = await r.json(content_type=None)\n\t\tif not json.get('error'):\n\t\t\treturn AttrDict(json)\n\t\telse:\n\t\t\traise ValueError('Error getting artist: ' + json.get('error'))\n\n\nif __name__ == '__main__':\n\tpass\n"
},
{
"alpha_fraction": 0.6827479600906372,
"alphanum_fraction": 0.6840987801551819,
"avg_line_length": 37.38518524169922,
"blob_id": "87ed0b2ebcde806273e80360b84b59162c70a79d",
"content_id": "8849ff4233a17a6590010ab62c9a7f0587c5e1b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5182,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 135,
"path": "/bot.py",
"repo_name": "SkueletorTlg/Deezer_bot",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport asyncio\nimport re\nfrom sys import argv\n\nimport aiohttp\nimport aioredis\nfrom aiogram import Bot, types\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom aiogram.dispatcher import Dispatcher\n\nimport config\nimport utils\nfrom filters import filters\nfrom middlewares import Middleware\nfrom sql import database\nfrom var import var\n\nloop = asyncio.get_event_loop()\n\n\ndef register_handlers(dp, handlers, inline_handlers, callback_handlers):\n spotify_playlist = re.compile(r'.+spotify.com/.+/playlist/.+')\n\n if '-a' in argv:\n dp.register_message_handler(handlers.only_admin_handler)\n dp.register_message_handler(\n handlers.audio_file_handler,\n content_types=[types.ContentType.AUDIO])\n dp.register_message_handler(\n handlers.soundcloud_link_handler,\n lambda m: 'soundcloud.com' in m.text)\n dp.register_callback_query_handler(\n callback_handlers.soundcloud_handler,\n lambda c: c.data.startswith('sc_track'))\n dp.register_message_handler(handlers.start_command_handler, commands=['start'])\n dp.register_message_handler(handlers.quality_setting_handler, commands=['quality'])\n dp.register_message_handler(\n handlers.getstats_handler, commands=['stats'])\t\n dp.register_message_handler(\n handlers.today_stats_handler, commands=['today'])\t\n dp.register_message_handler(\n handlers.redownload_handler, commands=['re', 'redownload'])\n dp.register_message_handler(\n handlers.post_to_channel_handler,\n lambda m: m.chat.id in config.admins, commands=['post'])\n dp.register_message_handler(\n handlers.artist_search_handler, commands=['a', 'artist'])\n dp.register_message_handler(handlers.diskography_handler, commands=['d'])\n dp.register_message_handler(\n handlers.spotify_album_handler,\n lambda m: 'open.spotify.com/album' in m.text)\n dp.register_message_handler(\n handlers.spotify_artist_handler, lambda m: 'open.spotify.com/artist' in m.text)\n dp.register_message_handler(\n handlers.spotify_playlist_handler,\n lambda m: re.match(spotify_playlist, m.text))\n dp.register_message_handler(\n handlers.spotify_handler,\n lambda m: 'open.spotify.com/track' in m.text)\n dp.register_message_handler(\n handlers.artist_handler,\n lambda m: '/artist/' in m.text)\n dp.register_message_handler(\n handlers.album_handler,\n lambda m: '/album/' in m.text)\n dp.register_message_handler(\n handlers.cache_playlist,\n lambda m: '/playlist/' in m.text and '/c ' in m.text)\n dp.register_message_handler(\n handlers.playlist_handler,\n lambda m: '/playlist/' in m.text)\n dp.register_message_handler(\n handlers.track_handler, \n lambda m: '/track/' in m.text)\n dp.register_message_handler(\n handlers.search_handler, lambda m: m.chat.type == 'private')\n dp.register_inline_handler(\n inline_handlers.artist_search_inline_handler,\n lambda q: '.ar' in q.query)\n dp.register_inline_handler(inline_handlers.inline_handler)\n dp.register_callback_query_handler(\n callback_handlers.quality_setting_hanlder,\n lambda d: d.data.startswith('quality'))\n dp.register_callback_query_handler(\n callback_handlers.finish_download_handler,\n lambda d: d.data == 'finish_download')\n dp.register_callback_query_handler(\n callback_handlers.large_file_handler,\n lambda d: 'big_file' in d.data)\n dp.register_callback_query_handler(\n callback_handlers.pages_handler,\n lambda d: 'page' in d.data)\n dp.register_callback_query_handler(\n callback_handlers.stats_callback_handler,\n lambda d: d.data == 'stats')\n dp.register_callback_query_handler(\n callback_handlers.today_stats_callback_handler,\n lambda d: d.data == 'today')\n dp.register_callback_query_handler(\n callback_handlers.sc_artist_callback_handler,\n lambda d: 'sc_artist' in d.data)\n dp.register_callback_query_handler(\n callback_handlers.sc_callback_handler,\n lambda d: 'soundcloud' in d.data)\n dp.register_callback_query_handler(\n callback_handlers.artist_callback_handler,\n lambda d: 'artist' in d.data)\n dp.register_callback_query_handler(callback_handlers.callback_handler)\n dp.register_chosen_inline_handler(inline_handlers.finish_download_handler)\n dp.middleware.setup(Middleware())\n\n\ntry:\n global session\n bot = Bot(token=config.bot_token, loop=loop)\n storage = MemoryStorage()\n dp = Dispatcher(bot, storage=storage)\n var.downloading = {}\n var.session = aiohttp.ClientSession(raise_for_status=True)\n print('created session')\n var.CSRFToken = None\n var.loop = loop\n\n from spotify import Spotify_API\n var.spot = Spotify_API(\n config.spotify_client, config.spotify_secret)\n var.db = database('db.sqlite')\n var.conn = loop.run_until_complete(aioredis.create_connection(\n ('localhost', 6379), encoding='utf-8', db=4, loop=loop))\n print('datebase connected')\n \nexcept Exception as e:\n print(e)\n"
}
] | 18 |
ag300g/leecode | https://github.com/ag300g/leecode | 48da8aeb57cdf0af260c66651515676828ed4bfb | 099432775d604d67b021ea0b1b1e5407fe95c3a0 | 9007e11ff17d8306ad7fa2d01120b849a6416145 | refs/heads/master | 2020-03-29T13:08:58.432511 | 2018-10-22T06:17:47 | 2018-10-22T06:17:47 | 149,942,855 | 0 | 0 | null | 2018-09-23T02:56:03 | 2018-09-07T02:37:02 | 2016-01-19T23:17:39 | null | [
{
"alpha_fraction": 0.5794297456741333,
"alphanum_fraction": 0.585539698600769,
"avg_line_length": 29.65625,
"blob_id": "7ffa3b3d13781ca748d3543dee22fab11ab09cf7",
"content_id": "9c739e7ddf35dfd431acfa5a845dd18b3d929eba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 982,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 32,
"path": "/zigzagConversion.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# The string \"PAYPALISHIRING\" is written in a zigzag pattern on a given number of rows like this: \n# (you may want to display this pattern in a fixed font for better legibility)\n\n# P A H N\n# A P L S I I G\n# Y I R\n# And then read line by line: \"PAHNAPLSIIGYIR\"\n# Write the code that will take a string and make this conversion given a number of rows:\n# string convert(string text, int nRows);\n# convert(\"PAYPALISHIRING\", 3) should return \"PAHNAPLSIIGYIR\".\n\nclass Solution:\n # @return a string\n def convert(self, s, nRows):\n if nRows == 1: return s\n zigzag = 2*nRows-2\n rows = ['']*nRows\n\n for i in range(len(s)):\n if i%zigzag < nRows:\n rows[i%zigzag] += s[i]\n else:\n rows[nRows-2-(i%zigzag)%nRows] += s[i]\n out = ''\n for i in rows: out += i\n return out\n\nif __name__ == '__main__':\n s = 'PAYPALISHIRING'\n test = Solution()\n out = test.convert(s, 3)\n print out\n\n"
},
{
"alpha_fraction": 0.4990958273410797,
"alphanum_fraction": 0.5027124881744385,
"avg_line_length": 22.65217399597168,
"blob_id": "6cf4ed214d4025f342826c97b5ab9c15e645dc9a",
"content_id": "dabc33309e7b4e1d373ab733b8ec58d9d0b38380",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 553,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 23,
"path": "/reverseList.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Reverse a singly linked list.\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param {ListNode} head\n # @return {ListNode}\n def reverseList(self, head):\n v = []\n while head != None:\n v.append(head.val)\n head = head.next\n \n head = ListNode(0)\n new = head\n for val in v[::-1]:\n head.next = ListNode(val)\n head = head.next\n return new.next\n "
},
{
"alpha_fraction": 0.5719523429870605,
"alphanum_fraction": 0.5866177678108215,
"avg_line_length": 34.19355010986328,
"blob_id": "8af4ea6dd494aa21f59f1698312e2ef9eb9e5190",
"content_id": "c1530bd3b18e493a38ed5479807ddccd3bb90a57",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1091,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 31,
"path": "/bestTimeIII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Say you have an array for which the ith element is the price of a given stock on day i.\n# Design an algorithm to find the maximum profit. \n# You may complete at most two transactions.\n\n# Note:\n# You may not engage in multiple transactions at the same time \n# (ie, you must sell the stock before you buy again).\n\nclass Solution:\n # @param prices, a list of integer\n # @return an integer\n def maxProfit(self, prices):\n if prices == []: return 0\n maxtol, leftmax, rightmax = 0, 0, 0\n left = [] \n low, high = prices[0], prices[-1]\n for i in range(len(prices)):\n leftmax = max(leftmax, prices[i]-low)\n left.append(leftmax)\n if prices[i] < low: low = prices[i]\n for j in range(len(prices)-1, -1, -1):\n rightmax = max(rightmax, high-prices[j])\n maxtol = max(maxtol, left[j]+rightmax)\n if prices[j] > high: high = prices[j]\n return maxtol\n \nif __name__ == '__main__':\n prices = [2,1,4,5,2,9,7]\n test = Solution()\n out = test.maxProfit(prices)\n print out\n"
},
{
"alpha_fraction": 0.5933660864830017,
"alphanum_fraction": 0.601965606212616,
"avg_line_length": 26.066667556762695,
"blob_id": "959d645736c806caefb63f12bebe81eea782fbb4",
"content_id": "2aa17ef9ae3b2bee9e968748636452af8d645435",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 814,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 30,
"path": "/minDepth.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a binary tree, find its minimum depth.\n\n# The minimum depth is the number of nodes along the shortest path \n# from the root node down to the farthest leaf node.\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return an integer\n def minDepth(self, root):\n if root == None: return 0\n left, right = 1, 1\n left += self.minDepth(root.left)\n right += self.minDepth(root.right)\n if left == 1: return right\n if right == 1: return left\n return min(left, right)\n\nif __name__ == '__main__':\n root = TreeNode(1)\n root.left = TreeNode(2)\n test = Solution()\n out = test.minDepth(root)\n print out\n\n\n"
},
{
"alpha_fraction": 0.5653179287910461,
"alphanum_fraction": 0.5942196249961853,
"avg_line_length": 38.3636360168457,
"blob_id": "20a2c50042dbddb29711a604be0f9543552c8383",
"content_id": "3e3331c4abfc65f490e11a7d4f82cf415978bb6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 869,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 22,
"path": "/findPeakElement.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#A peak element is an element that is greater than its neighbors.\n#Given an input array where num[i] ≠ num[i+1], find a peak element and return its index.\n\n#The array may contain multiple peaks, in that case return the index to any one of the peaks is fine.\n#You may imagine that num[-1] = num[n] = -∞.\n\n#For example, in array [1, 2, 3, 1], 3 is a peak element and your function should return the index number 2.\n\nclass Solution:\n # @param nums, an integer[]\n # @return an integer\n def findPeakElement(self, nums):\n l = len(nums)\n if l == 0: return\n if l == 1: return 0\n if l == 2: return 0 if nums[0] > nums[1] else 1\n \n for i in range(1, l):\n if nums[i] > nums[i-1] and (i == l-1 or nums[i] > nums[i+1]):\n return i\n return 0 if nums[0] > nums[-1] else l-1"
},
{
"alpha_fraction": 0.5301120281219482,
"alphanum_fraction": 0.5420168042182922,
"avg_line_length": 34.42499923706055,
"blob_id": "e83777ee0618c0f424123cb557de32a32191b822",
"content_id": "6f5b2353a706d9cf76afdb05e8c7688c3534f92c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1428,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 40,
"path": "/mergsort.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# mergesort\n\n# the two sorted sublists to be merged are:\n# lt[left:mid] and lt[mid, right]\n\ndef merge(lt, left, mid, right):\n i, j = left, mid\n lt3 = [0] * len(lt) #temporarily store sorted sublist\n \n for k in range(left, right): # lt3[left:right] will store sorted results\n # if scanning lt[left:mid] hasn't over, \n # and the element in lt[left:mid] is smaller than that in lt[mid:right], \n # or lt[mid:right] has been scanned over\n # change lt[k] to the element in lt[left:mid]\n if i < mid and (j >= right or lt[i] < lt[j]):\n lt3[k] = lt[i]\n i += 1\n # else (i.e. lt[left:mid] has been scanned over,\n # or element in lt3[left:mid] is larger than lt3[mid:right]\n else:\n lt3[k] = lt[j]\n j += 1 \n \n # overwrite the sublist lt[left:right] to the sorted one \n lt[left:right] = lt3[left:right]\n \ndef mergesort(lt):\n # started with sublist of width 1\n width = 1\n \n # max of with is len(lt)\n while width <= len(lt):\n # merge sublists lt[i:i+width] and lt[i+width:i+2*width]\n for i in range(0, len(lt), 2*width):\n mid = min(i+width, len(lt))\n right = min(i+2*width,len(lt))\n if mid < right:\n merge(lt, i, mid ,right)\n # double size of the sublists to be merged in the next step\n width *= 2\n \n "
},
{
"alpha_fraction": 0.556587815284729,
"alphanum_fraction": 0.5616554021835327,
"avg_line_length": 25.68181800842285,
"blob_id": "3efba2e914c4455b6ec5078c196458f0aff3ac6c",
"content_id": "8328f55359a5d93a3ad65b421e8ae94c0540c135",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1184,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 44,
"path": "/listToBST.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a singly linked list where elements are sorted in ascending order, \n# convert it to a height balanced BST.\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param head, a list node\n # @return a tree node\n def sortedListToBST(self, head):\n heads = [] \n while head != None:\n heads.append(head)\n head = head.next\n \n return self.buildTree(heads)\n \n def buildTree(self, heads): \n if heads == []: return\n median = len(heads)//2 \n root = TreeNode(heads[median].val) \n root.left = self.buildTree(heads[:median])\n root.right = self.buildTree(heads[median+1:])\n \n return root\n \nif __name__ == '__main__':\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(4)\n head.next.next.next = ListNode(5)\n \n test = Solution()\n out = test.sortedListToBST(head) \n "
},
{
"alpha_fraction": 0.45466846227645874,
"alphanum_fraction": 0.47293639183044434,
"avg_line_length": 29.75,
"blob_id": "6fc6211f920cb98b89c4b089d5aba3c80a57d723",
"content_id": "6f171e88858808cece60e733eb805b94919c9f3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1478,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 48,
"path": "/uniqueBSTII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given n, generate all structurally unique BST's (binary search trees) \n# that store values 1...n.\n\n# For example,\n# Given n = 3, your program should return all 5 unique BST's shown below.\n\n# 1 3 3 2 1\n# \\ / / / \\ \\\n# 3 2 1 1 3 2\n# / / \\ \\\n# 2 1 2 3\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @return an integer\n def generateTrees(self, n):\n if n == 0: return [None]\n alltrees = []\n for i in range(1,n+1):\n alltrees += self.buildTrees(i, 0, n+1)\n return alltrees\n\n def buildTrees(self, root, small, large): \n if large-small == 2: return [TreeNode(root)]\n left, right = [], []\n for j in range(small+1, root): \n left += self.buildTrees(j, small, root)\n for k in range(root+1, large): \n right += self.buildTrees(k, root, large) \n new = []\n if left == []: left = [None]\n if right == []: right = [None]\n for m in left:\n for n in right:\n newtree = TreeNode(root)\n newtree.left, newtree.right = m, n\n new.append(newtree)\n return new\n \nif __name__ == '__main__':\n test = Solution()\n out = test.generateTrees(3)\n\n\n"
},
{
"alpha_fraction": 0.5248070359230042,
"alphanum_fraction": 0.5512679219245911,
"avg_line_length": 28.129032135009766,
"blob_id": "d5bccfe45177b0263513da2b830888359da99bab",
"content_id": "bbd4f00564149b2bfeb64d2cba06a4920c5a8bbc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 907,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 31,
"path": "/jumpGame.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an array of non-negative integers, you are initially positioned at the first index of the array.\n# Each element in the array represents your maximum jump length at that position.\n# Determine if you are able to reach the last index.\n\n# For example:\n# A = [2,3,1,1,4], return true.\n# A = [3,2,1,0,4], return false.\n\nclass Solution:\n # @param A, a list of integers\n # @return a boolean\n def canJump(self, A):\n if A == [0]: return True\n for i in range(len(A)):\n if A[i] == 0: \n if i != len(A) -1 and self.decide(A, i) == False:\n return False\n return True\n\n def decide(self, A, loc): # A[loc] == 0\n for j in range(loc):\n if A[j] > loc - j: return True\n return False\n \nif __name__ == '__main__':\n A = [2,3,1,1,4]\n B = [3,2,1,0,4]\n\n test = Solution()\n out = test.canJump(B)\n print out\n \n"
},
{
"alpha_fraction": 0.43628185987472534,
"alphanum_fraction": 0.48125937581062317,
"avg_line_length": 27.404254913330078,
"blob_id": "044f61303759f4ba889ebc273e7caa62decf6b7f",
"content_id": "01732acf80a7eb0adbe3e21e7d3ea06f38976811",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1334,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 47,
"path": "/isUglyII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Write a program to find the n-th ugly number.\n\n#Ugly numbers are positive numbers whose prime factors only include 2, 3, 5. For example, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 is the sequence of the first 10 ugly numbers.\n\n#Note that 1 is typically treated as an ugly number.\n\n#This soluiton exceed time limit\nclass Solution(object):\n def nthUglyNumber(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n uglys = [1]\n count = 1\n while count < n:\n new1 = self.findMin(uglys, 2)\n new2 = self.findMin(uglys, 3)\n new3 = self.findMin(uglys, 5)\n uglys.append(min(new1, new2, new3))\n count += 1\n return uglys[-1]\n \n def findMin(self, uglys, factor):\n for i in uglys:\n if i*factor > uglys[-1]:\n return i*factor\n \n\n#Solution from Internet, very smart!\nclass Solution1:\n # @param {integer} n\n # @return {integer}\n def nthUglyNumber(self, n):\n q = [1]\n i2 = i3 = i5 = 0\n while len(q) < n:\n m2, m3, m5 = q[i2] * 2, q[i3] * 3, q[i5] * 5\n m = min(m2, m3, m5)\n if m == m2:\n i2 += 1\n if m == m3:\n i3 += 1\n if m == m5:\n i5 += 1\n q += m,\n return q[-1]"
},
{
"alpha_fraction": 0.49302324652671814,
"alphanum_fraction": 0.5069767236709595,
"avg_line_length": 28.340909957885742,
"blob_id": "71e9ce5c1fda115325e900f59c36895bd70167f8",
"content_id": "ce89e5d233966615ecd4bd5c6796b9d963d7ae99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1304,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 44,
"path": "/matchRegExpr.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Implement regular expression matching with support for '.' and '*'.\n\n# '.' Matches any single character.\n# '*' Matches zero or more of the preceding element.\n\n# The matching should cover the entire input string (not partial).\n\n# The function prototype should be:\n# bool isMatch(const char *s, const char *p)\n\n# Some examples:\n# isMatch(\"aa\",\"a\") → false\n# isMatch(\"aa\",\"aa\") → true\n# isMatch(\"aaa\",\"aa\") → false\n# isMatch(\"aa\", \"a*\") → true\n# isMatch(\"aa\", \".*\") → true\n# isMatch(\"ab\", \".*\") → true \n# isMatch(\"aab\", \"c*a*b\") → true \n\n# This version exceeds time limit for large data set\nclass Solution:\n # @return a boolean\n def isMatch(self, s, p):\n ls, lp = len(s), len(p) \n if lp == 0: return ls == 0\n \n if lp == 1 or p[1] != '*':\n if ls == 0 or p[0] != '.' and s[0] != p[0]:\n return False\n return self.isMatch(s[1:], p[1:])\n else:\n i = -1\n while i < ls and (i < 0 or p[0] == '.' or p[0] == s[i]):\n if self.isMatch(s[i+1:], p[2:]):\n return True\n i += 1\n return False\n\nif __name__ == '__main__':\n s, p = \"aaaaaaaaaaaaab\", \"a*a*a*a*a*a*a*a*a*a*c\"\n\n test = Solution()\n out = test.isMatch(s, p)"
},
{
"alpha_fraction": 0.4945705831050873,
"alphanum_fraction": 0.49753206968307495,
"avg_line_length": 22.372093200683594,
"blob_id": "8cc50e89c8ad415ddfb64c7868614dcf9bb52ba8",
"content_id": "7efb2755edf9250c46bb3989f295d32745553c0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1013,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 43,
"path": "/MinStack.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.\n\n# push(x) -- Push element x onto stack.\n# pop() -- Removes the element on top of the stack.\n# top() -- Get the top element.\n# getMin() -- Retrieve the minimum element in the stack.\n\nclass MinStack(object):\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.stack = []\n self.min = []\n\n def push(self, x):\n \"\"\"\n :type x: int\n :rtype: nothing\n \"\"\"\n self.stack.append(x)\n if self.min == []: self.min.append(x)\n else: self.min.append(min(self.min[-1], x))\n\n def pop(self):\n \"\"\"\n :rtype: nothing\n \"\"\"\n self.stack.pop()\n self.min.pop()\n\n def top(self):\n \"\"\"\n :rtype: int\n \"\"\"\n return self.stack[-1]\n\n def getMin(self):\n \"\"\"\n :rtype: int\n \"\"\"\n if self.min == []: return\n return self.min[-1]\n "
},
{
"alpha_fraction": 0.49041712284088135,
"alphanum_fraction": 0.5253664255142212,
"avg_line_length": 24.941177368164062,
"blob_id": "7e8e8960cc1536a2ec79b1bcbf4bbd82e5e208b1",
"content_id": "c86139ba803c82d6eeb48eb8ad42ff7f5e5edd2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 887,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 34,
"path": "/mergetwolists.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Merge two sorted singly linked lists\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param two ListNodes\n # @return a ListNode\n def mergeTwoLists(self, l1, l2):\n new = ListNode(0)\n back = new\n\n while l1 != None or l2 != None:\n if l1 == None: new.next, l2 = l2, l2.next\n elif l2 == None: new.next, l1 = l1, l1.next\n elif l1.val > l2.val:\n new.next, l2 = l2, l2.next\n else:\n new.next, l1 = l1, l1.next\n new = new.next\n # the first node is set by hand, so drop it\n return back.next\n\nif __name__ == '__main__':\n l1 = ListNode(1)\n l1.next = ListNode(3)\n l2 = ListNode(2)\n # l2.next = ListNode(4)\n \n sol = Solution()\n pp = sol.mergeTwoLists(l1, l2)\n \n"
},
{
"alpha_fraction": 0.5575757622718811,
"alphanum_fraction": 0.581818163394928,
"avg_line_length": 23.44444465637207,
"blob_id": "61c3ff1e6b3641c69fbe06aa38c4e0ea45dd7c03",
"content_id": "17ba69597d0d7e75e1d0f435e6796a5ed354356a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 660,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 27,
"path": "/max_height.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "\nclass Treenode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n \ndef maxHeight(tree):\n if tree == None:\n return 0\n \n lmax = maxHeight(tree.left)\n rmax = maxHeight(tree.right)\n \n return max(lmax, rmax) + 1\n\nif __name__ == '__main__':\n tree = Treenode(3)\n tree.left = Treenode(9)\n tree.right = Treenode(0)\n tree.right.left = Treenode(15)\n tree.right.right = Treenode(7)\n tree.left.right = Treenode(20)\n tree.left.right.right = Treenode(25)\n tree.left.right.left = Treenode(30)\n tree.left.right.left.right = Treenode(13)\n \n print maxHeight(tree)"
},
{
"alpha_fraction": 0.5672316551208496,
"alphanum_fraction": 0.5875706076622009,
"avg_line_length": 30.64285659790039,
"blob_id": "b062e14343fe7c9dc935c85bf3e510fbc600be15",
"content_id": "e329594dc675c1dbc296f35714b42fe6f24890c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 885,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 28,
"path": "/productExceptSelf.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Given an array of n integers where n > 1, nums, return an array output such that output[i] is equal to the product of all the elements of nums except nums[i].\n\n#Solve it without division and in O(n).\n\n#For example, given [1,2,3,4], return [24,12,8,6].\n\n#Follow up:\n#Could you solve it with constant space complexity? (Note: The output array does not count as extra space for the purpose of space complexity analysis.)\n\nclass Solution:\n # @param {integer[]} nums\n # @return {integer[]}\n def productExceptSelf(self, nums):\n l = len(nums)\n head, tail = 1, 1\n heads, tails = [1],[1] \n out = []\n\n for i in range(l-1):\n head *= nums[i]\n tail *= nums[l-1-i]\n heads.append(head)\n tails.append(tail)\n \n for i in range(l):\n out.append(heads[i]*tails[l-1-i])\n \n return out"
},
{
"alpha_fraction": 0.5019727945327759,
"alphanum_fraction": 0.508987307548523,
"avg_line_length": 27.873416900634766,
"blob_id": "b5bc390bd399b88cfcb261f91b0418881e24913d",
"content_id": "594f4673d3381cf4b227f3e6b7e3a2fb8f948cd5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2281,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 79,
"path": "/wordLadderII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given two words (start and end), and a dictionary, \n# find the length of shortest transformation sequence from start to end, \n# such that:\n# 1.Only one letter can be changed at a time\n# 2.Each intermediate word must exist in the dictionary\n\n# For example,\n# Given:\n# start = \"hit\"\n# end = \"cog\"\n# dict = [\"hot\",\"dot\",\"dog\",\"lot\",\"log\"]\n# As one shortest transformation is \"hit\" -> \"hot\" -> \"dot\" -> \"dog\" -> \"cog\",\n# return its length 5.\n\n# Note:\n# Return 0 if there is no such transformation sequence.\n# All words have the same length.\n# All words contain only lowercase alphabetic characters.\n\n# @param start, a string\n# @param end, a string\n# @param d, a set of string\n# @return an integer\n\n# time limit exceed \nclass Solution:\n # @param start, a string\n # @param end, a string\n # @param dict, a set of string\n # @return an integer\n def ladderLength(self, start, end, d):\n curr = [start]\n track = {start : [], end: []}\n for i in d: track[i] = []\n find = False\n\n while curr != [] and find == False:\n temp = []\n for i in curr:\n if self.distance(i, end):\n find = True\n track[end].append(i)\n continue\n for j in d:\n if j not in temp and self.distance(i,j):\n track[j].append(i)\n temp.append(j)\n for k in temp: d.remove(k) \n curr = temp\n\n if find == False: return []\n out, now = [], [[end]]\n while now[0][-1] != start:\n new = []\n for i in now:\n temp = []\n for j in track[i[-1]]:\n temp.append(i+[j]) # temp is list of lists\n new += temp\n now = new\n reorder = []\n for i in now: reorder.append(i[::-1])\n return reorder\n\n def distance(self, s1, s2):\n count = 0\n for i in range(len(s1)):\n if s1[i] != s2[i]: count += 1\n if count > 1: return False\n return True\n\nif __name__ == '__main__':\n start = \"red\"\n end = \"tax\"\n dict = set([\"ted\",\"tex\",\"red\",\"tax\",\"tad\",\"den\",\"rex\",\"pee\"])\n\n test = Solution()\n out = test.ladderLength(start, end, dict)\n print out\n"
},
{
"alpha_fraction": 0.40416666865348816,
"alphanum_fraction": 0.4277777671813965,
"avg_line_length": 28.625,
"blob_id": "04e8449610d0b139fb44d1b539c02c2149243269",
"content_id": "19e353710e0821d62c7050a4940603a15dda71af",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 720,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 24,
"path": "/twosum.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# two sum\n'''\n# this worked but too slow\nclass Solution:\n def twoSum(self, array, target):\n for i in range(len(array)):\n for j in range(i, len(array)):\n if array[i] + array[j] == target: \n print 'index1=', i+1, 'index2=', j+1\n''' \nclass Solution:\n def twoSum(self, num, target):\n dict = {}\n for i in range(len(num)): \n if dict.get(target-num[i]) == None:\n dict[num[i]] = i \n else:\n return (dict[target-num[i]]+1, i+1)\n \n \nif __name__ == '__main__':\n lt = [1,3,2,4,4,5,6,7,8,0]\n out = Solution()\n print out.twoSum(lt, 9)\n \n "
},
{
"alpha_fraction": 0.5128205418586731,
"alphanum_fraction": 0.5294871926307678,
"avg_line_length": 31.65217399597168,
"blob_id": "347983ad05f00d1b8ae0fa8c8c7dc24534078bce",
"content_id": "daa38d5a1df34f7a7065566f2ae77f27c393f7f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 782,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 23,
"path": "/minSubArrayLen.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Given an array of n positive integers and a positive integer s, \n# find the minimal length of a subarray of which the sum ≥ s. If there isn't one, return 0 instead.\n\n# For example, given the array [2,3,1,2,4,3] and s = 7,\n# the subarray [4,3] has the minimal length under the problem constraint.\n\nclass Solution(object):\n def minSubArrayLen(self, s, nums):\n \"\"\"\n :type s: int\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if sum(nums) < s: return 0\n l = len(nums)\n bucket = []\n for i in range(len(nums)):\n bucket.append(nums[i])\n while sum(bucket) >= s:\n l = min(l, len(bucket))\n bucket.pop(0)\n return l \n \n "
},
{
"alpha_fraction": 0.5567010045051575,
"alphanum_fraction": 0.5694360136985779,
"avg_line_length": 30.132076263427734,
"blob_id": "4dfbd99f63c98f3be76c164bce0428f574c9ca5f",
"content_id": "92c9ee572ece17227d4516da1a4eb12803103b42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1649,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 53,
"path": "/MedianFinder.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Median is the middle value in an ordered integer list. \n# If the size of the list is even, there is no middle value. So the median is the mean of the two middle value.\n\n# Examples: \n# [2,3,4] , the median is 3\n\n# [2,3], the median is (2 + 3) / 2 = 2.5\n\n# Design a data structure that supports the following two operations:\n\n# void addNum(int num) - Add a integer number from the data stream to the data structure.\n# double findMedian() - Return the median of all elements so far.\n\nimport heapq as hp\n\nclass MedianFinder:\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\" \n self.left, self.right = [], []\n\n def addNum(self, num):\n \"\"\"\n Adds a num into the data structure.\n :type num: int\n :rtype: void\n \"\"\"\n if len(self.left) == 0 or num < -self.left[0]:\n hp.heappush(self.left, -num) \n else: \n hp.heappush(self.right, num)\n \n if len(self.right) > len(self.left): \n hp.heappush(self.left, -hp.heappop(self.right))\n elif len(self.right) < len(self.left)-1:\n hp.heappush(self.right, -hp.heappop(self.left))\n \n def findMedian(self):\n \"\"\"\n Returns the median of current data stream\n :rtype: float\n \"\"\"\n if len(self.left) == 0: return\n if len(self.right) == len(self.left):\n return (self.right[0]-self.left[0])/2.0\n else: \n return -self.left[0]\n \n# Your MedianFinder object will be instantiated and called as such:\n# mf = MedianFinder()\n# mf.addNum(1)\n# mf.findMedian()"
},
{
"alpha_fraction": 0.5525113940238953,
"alphanum_fraction": 0.5639269351959229,
"avg_line_length": 24.294116973876953,
"blob_id": "17c319ba5f76e67ff22277cab2e9d0fd4f4d2e3c",
"content_id": "0b54608c7ad5a0340b6fce69ab0c2b5ece7a47aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 438,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 17,
"path": "/trailingZeros.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Given an integer n, return the number of trailing zeroes in n!.\n\n#Note: Your solution should be in logarithmic time complexity.\n\nclass Solution:\n # @param {integer} n\n # @return {integer}\n def trailingZeroes(self, n):\n res, count = n, 0 \n while res > 0:\n res = res/5\n count += res\n return count\n\nif __name__ == '__main__':\n x = Solution()\n print x.trailingZeroes(30)\n "
},
{
"alpha_fraction": 0.4699169993400574,
"alphanum_fraction": 0.4802904427051544,
"avg_line_length": 27.382352828979492,
"blob_id": "e366a141e4a93eb100281f23a35f3c978d6219ba",
"content_id": "214233b0bbe24db21e38ac103dfe8d3fba27f4f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 964,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 34,
"path": "/NqueensII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Follow up for N-Queens problem.\n\n# Now, instead outputting board configurations, \n#return the total number of distinct solutions.\n\nclass Solution:\n # @return a list of lists of string\n def totalNQueens(self, n):\n self.solution = []\n for i in range(n):\n self.add(n, [[0],[i]]) \n return len(self.solution) \n \n def add(self, n, loc): \n if len(loc[0]) == n:\n self.solution.append(loc) \n return \n for j in range(n):\n if self.check(n, loc, j):\n self.add(n, [loc[0]+[len(loc[0])], loc[1]+[j]]) \n return \n \n def check(self, n, loc, j): \n if j in loc[1]: \n return False\n for k in range(len(loc[0])):\n if len(loc[0]) - k == abs(loc[1][k] - j): \n return False\n return True\n \n \nif __name__ == '__main__':\n test = Solution()\n out = test.totalNQueens(4)"
},
{
"alpha_fraction": 0.45118483901023865,
"alphanum_fraction": 0.48056870698928833,
"avg_line_length": 29.171428680419922,
"blob_id": "194e9af52c376eb5b54efc0375b3c4082fa9830c",
"content_id": "119c2fa3c3f0dcb3db3c03ba17722c83b475141b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1055,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 35,
"path": "/subsets.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a set of distinct integers, S, return all possible subsets.\n\n# Note:\n# Elements in a subset must be in non-descending order.\n# The solution set must not contain duplicate subsets.\n# For example,\n# If S = [1,2,3], a solution is:\n# [[3], [1], [2], [1,2,3], [1,3], [2,3], [1,2], []]\nclass Solution:\n # @param S, a list of integer\n # @return a list of lists of integer\n def subsets(self, S):\n out = []\n for i in range(len(S)+1):\n out += self.combine(S, i)\n return out\n \n def combine(self, sets, k): # find subsets of length k\n c0 = [sorted(sets[0:k])]\n c1 = []\n \n for i in range(len(sets) - k):\n c1 = []\n for item in c0:\n for j in range(len(item)):\n new = sorted(item[0:j] + [sets[k+i]] + item[j+1:])\n if new not in c1:\n c1.append(new)\n c0 += c1 \n \n return c0\n \nif __name__ == '__main__':\n test = Solution()\n out = test.subsets([4,1,0])"
},
{
"alpha_fraction": 0.3811320662498474,
"alphanum_fraction": 0.4000000059604645,
"avg_line_length": 17.64285659790039,
"blob_id": "fb44243c25a7b6bed7aaba1bcc5496ab3c270483",
"content_id": "80e4beb96b25523a068e68b905b42883b688793b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 265,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 14,
"path": "/MaxSub.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "def maxsub(lt):\n maxi, sumi = lt[0], lt[0]\n for i in lt[1:]:\n if sumi <= 0:\n sumi = i\n else:\n sumi += i\n maxi = max(maxi, sumi)\n \n return maxi\n\nif __name__ == '__main__':\n lt = [-2]\n print maxsub(lt)\n "
},
{
"alpha_fraction": 0.5256270170211792,
"alphanum_fraction": 0.5496183037757874,
"avg_line_length": 29.566667556762695,
"blob_id": "ba1a38f27da1cf0bb96ea9e774e7dda432c4de68",
"content_id": "d50e10ada0d623835ab3cd8a94a3f30743dcee6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 917,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 30,
"path": "/largestRec.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given n non-negative integers representing the histogram's bar height \n# where the width of each bar is 1, \n# find the area of largest rectangle in the histogram.\n# For example,\n# Given height = [2,1,5,6,2,3],\n# return 10.\n\nclass Solution:\n # @param height, a list of integer\n # @return an integer\n def largestRectangleArea(self, height):\n height.append(0)\n i, l = 0, len(height)\n maxarea = 0\n index = []\n while i < l:\n if index == [] or height[i] >= height[index[-1]]:\n index.append(i)\n i += 1\n else: \n curr = index.pop()\n area = height[curr]*i if index == [] else height[curr]*(i-index[-1]-1)\n maxarea = max(maxarea, area)\n return maxarea\n\nif __name__ == '__main__':\n height = [4,2,0,3,2,5]\n test = Solution()\n out = test.largestRectangleArea(height)\n print out\n"
},
{
"alpha_fraction": 0.49819493293762207,
"alphanum_fraction": 0.505415141582489,
"avg_line_length": 26.700000762939453,
"blob_id": "c9d3f9f5c1669684bcb91ce40bdef315a881c88d",
"content_id": "d6e9ac8d1714f864981277a236cabb3eb5bb95fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 10,
"path": "/singleNum.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param A, a list of integer\n # @return an integer\n def singleNumber(self, A):\n if A == []: return\n record = {}\n for i in A:\n if i in record: record.pop(i)\n else: record[i] = 1\n return record.keys()[0]\n"
},
{
"alpha_fraction": 0.4805583357810974,
"alphanum_fraction": 0.5214356780052185,
"avg_line_length": 29.42424201965332,
"blob_id": "1c3a817c01878086737a3582c7b57f8eed8a1b4d",
"content_id": "06b870a604ef990080888de4f0f4e38a943af987",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1003,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 33,
"path": "/minPathSum.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a m x n grid filled with non-negative numbers, \n# find a path from top left to bottom right \n# which minimizes the sum of all numbers along its path.\n\n# Note: You can only move either down or right at any point in time.\n\n# mini(i,j) returns the minimum sum ending at grid[i][j]\n# mini(i,j) = min(mini(i-1,j) + mini(i,j-1)) + grid[i][j]\n\ndef minPathSum(grid):\n m = len(grid) # row number\n n = len(grid[0]) # col number\n if m == 0 or n == 0:\n return 0\n \n mini = [0]* m\n for i in range(m): mini[i] = [0]*n \n \n mini[0][0] = grid[0][0]\n for i in range(1, m):\n mini[i][0] = mini[i-1][0] + grid[i][0]\n for j in range(1, n):\n mini[0][j] = mini[0][j-1] + grid[0][j]\n \n for i in range(1, m):\n for j in range(1, n):\n mini[i][j] = min(mini[i][j-1], mini[i-1][j]) + grid[i][j] \n return mini[m-1][n-1]\n \nif __name__ == '__main__':\n grid = [[1,2,3],[4,5,6],[7,8,9]]\n grid2 = [[1],[2],[3]]\n out = minPathSum(grid)"
},
{
"alpha_fraction": 0.4716692268848419,
"alphanum_fraction": 0.5022971034049988,
"avg_line_length": 23.185184478759766,
"blob_id": "33aec1d623c42d5f2743e3bf884516036b3a5845",
"content_id": "c309252ddfe35905ddefdad9c4706c8434e8a69a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 653,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 27,
"path": "/rmDuplicates.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Follow up for \"Remove Duplicates\":\n# What if duplicates are allowed at most twice?\n\n# For example,\n# Given sorted array A = [1,1,1,2,2,3],\n\n# Your function should return length = 5, and A is now [1,1,2,2,3].\n\nclass Solution:\n # @param a list of integers\n # @return an integer\n def removeDuplicates(self, A):\n sz, i = 0, 0\n while i < len(A):\n A[sz] = A[i]\n sz += 1 \n i += 1\n while i < len(A) and A[i-1] == A[i]:\n i += 1\n A = A[:sz]\n return sz\n \nif __name__ == '__main__':\n A = [1,1]\n test = Solution()\n out = test.removeDuplicates(A)\n print out\n"
},
{
"alpha_fraction": 0.44668588042259216,
"alphanum_fraction": 0.46685880422592163,
"avg_line_length": 17.289474487304688,
"blob_id": "97c0c5516ddb15fa7291f7185d9bf6ffadbe27eb",
"content_id": "e68e8f19f6e527a98f9254facb44645e1d6679d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 694,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 38,
"path": "/invertTree.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Invert a binary tree.\n\n# 4\n# / \\\n# 2 7\n# / \\ / \\\n#1 3 6 9\n#to\n# 4\n# / \\\n# 7 2\n# / \\ / \\\n#9 6 3 1\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def invertTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: TreeNode\n \"\"\"\n self.DFS(root)\n return root\n \n def DFS(self, root):\n if root == None: return\n temp = root.left\n root.left = root.right\n root.right = temp\n self.DFS(root.left)\n self.DFS(root.right)\n return"
},
{
"alpha_fraction": 0.5467836260795593,
"alphanum_fraction": 0.556530237197876,
"avg_line_length": 30.090909957885742,
"blob_id": "e973fdbc70ba644f5837e5d1c2bb5301a7f720ec",
"content_id": "fc325c2894d819f593cf6db6b602feef5fe62b62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1026,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 33,
"path": "/longestSubStr.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding:UTF-8 -*-\n\n# Given a string,\n# find the length of the longest substring without repeating characters. \n# For example, \n# the longest substring without repeating letters for \"abcabcbb\" is \"abc\", \n# which the length is 3. \n# For \"bbbbb\" the longest substring is \"b\", with the length of 1.\n\nclass Solution:\n # @return an integer\n def lengthOfLongestSubstring(self, s):\n if s == '': return 0\n i, l, maxi = 0, 0, 0\n record = {}\n while i < len(s):\n while i < len(s) and (s[i] not in record or record[s[i]] < i - l):\n record[s[i]] = i\n i += 1\n l += 1\n maxi = max(maxi, l) \n if i < len(s): \n l = i - record[s[i]] \n record[s[i]] = i \n i += 1\n return maxi\n\nif __name__ == '__main__':\n s = \"wlrbbmqbhcdarzowkkyhiddqscdxrjmowfrxsjybldbefsarcbynecdyggxxpklorellnmpapqfwkhopkmco\"\n test = Solution()\n out = test.lengthOfLongestSubstring(s)\n print(out)\n"
},
{
"alpha_fraction": 0.5558035969734192,
"alphanum_fraction": 0.59375,
"avg_line_length": 30.964284896850586,
"blob_id": "9f84d564551ef21c0a2cc8d4b01e55add00c00af",
"content_id": "823b00323f30e7cdd652cf27a37fcd95eaeb8007",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 896,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 28,
"path": "/isHappy.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Write an algorithm to determine if a number is \"happy\".\n\n# A happy number is a number defined by the following process: Starting with any positive integer, \n# replace the number by the sum of the squares of its digits, and repeat the process until the number equals 1 \n# (where it will stay), or it loops endlessly in a cycle which does not include 1. \n# Those numbers for which this process ends in 1 are happy numbers.\n\n# Example: 19 is a happy number\n\n# 12 + 92 = 82\n# 82 + 22 = 68\n# 62 + 82 = 100\n# 12 + 02 + 02 = 1\n\nclass Solution(object):\n def isHappy(self, n):\n \"\"\"\n :type n: int\n :rtype: bool\n \"\"\"\n hist = {n}\n while 1 > 0:\n digits = sum([int(i)*int(i) for i in str(n)])\n if digits == 1: return True\n elif digits in hist: return False\n else:\n hist.add(digits)\n n = digits\n "
},
{
"alpha_fraction": 0.40227705240249634,
"alphanum_fraction": 0.43453511595726013,
"avg_line_length": 23,
"blob_id": "df62e84e0f16b43167e3db7f907968e42b25a173",
"content_id": "c89c77f32d0288b38611ad10209ca57baed9c8df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 527,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 22,
"path": "/rmDuplicaites.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Remove duplicates from sorted array in place\n\nclass Solution:\n # @param a list of integers\n # @return an integer\n def removeDuplicates(self, A):\n if len(A) == 0:\n return\n \n sz = 1\n i = 0\n for i in range(len(A)-1):\n if A[i] != A[i+1]:\n A[sz] = A[i+1]\n sz += 1\n A = A[:sz]\n return sz\n \nif __name__ == '__main__':\n A = [1,2,3,4,5,6,6,7,7,8]\n out = Solution()\n print out.removeDuplicates(A)"
},
{
"alpha_fraction": 0.5710207223892212,
"alphanum_fraction": 0.581013560295105,
"avg_line_length": 30.840909957885742,
"blob_id": "074310bc53eff9d8ad83cb4293469e181dead60c",
"content_id": "936944bb6e62b2aa311a8cfe62c7aa17e0c8b463",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1401,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 44,
"path": "/pathSumII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a binary tree and a sum, \n# determine if the tree has a root-to-leaf path \n# such that adding up all the values along the path equals the given sum.\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nclass Solution:\n # @param root, a tree node\n # @param sum, an integer\n # @return a list of lists of integers\n def pathSum(self, root, sumi):\n if root == None: return []\n if root.left == None and root.right == None:\n if sumi == root.val: return [[root.val]]\n else: return []\n \n leftsum = self.pathSum(root.left, sumi - root.val)\n for i in range(len(leftsum)):\n leftsum[i] = [root.val] + leftsum[i]\n \n rightsum = self.pathSum(root.right, sumi - root.val)\n for i in range(len(rightsum)):\n rightsum[i] = [root.val] + rightsum[i]\n \n return leftsum + rightsum\n \nif __name__ == '__main__':\n root = TreeNode(5)\n root.left = TreeNode(4)\n root.left.left = TreeNode(11)\n root.left.left.left = TreeNode(7)\n root.left.left.right = TreeNode(2)\n root.right = TreeNode(8)\n root.right.left = TreeNode(13)\n root.right.right = TreeNode(4)\n root.right.right.right = TreeNode(1)\n root.right.right.left = TreeNode(5)\n \n test = Solution()\n out = test.pathSum(root, 22) \n print out\n"
},
{
"alpha_fraction": 0.4693295359611511,
"alphanum_fraction": 0.47931525111198425,
"avg_line_length": 25.339622497558594,
"blob_id": "46a1fb5698839eb80e334ad068a4d27f1461fbdd",
"content_id": "41f9489e74ad0789f43cfcda01b85967668915e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1402,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 53,
"path": "/LevelOrder.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Binary tree level order\nclass Treenode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n \nclass Solution:\n def levelOrder(self, tree):\n if tree is None: \n return []\n \n tol = [[tree.val]] \n parents = [tree]\n \n while parents != []:\n current = self.CurrentLevel(parents)\n if current == []:\n break\n \n level = [] \n for i in current:\n level.append(i.val) \n tol.append(level)\n parents = current\n \n return tol\n \n def CurrentLevel(self, parents):\n current = []\n for node in parents:\n if node.left != None:\n current.append(node.left)\n \n if node.right != None:\n current.append(node.right)\n \n return current\n \nif __name__ == '__main__':\n tree = Treenode(3)\n tree.left = Treenode(9)\n tree.right = Treenode(0)\n tree.right.left = Treenode(15)\n tree.right.right = Treenode(7)\n tree.left.right = Treenode(20)\n tree.left.right.right = Treenode(25)\n tree.left.right.left = Treenode(30)\n tree.left.right.left.right = Treenode(13)\n \n out = Solution()\n #print [tree], level\n print out.levelOrder(tree)\n "
},
{
"alpha_fraction": 0.4596977233886719,
"alphanum_fraction": 0.4748110771179199,
"avg_line_length": 28.924528121948242,
"blob_id": "56764ce77051bb0f539b59bc99dafe639f418830",
"content_id": "262d9465237589e5eefff82a3bf4cbe2caa7b8be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1588,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 53,
"path": "/BasicCalculator.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Implement a basic calculator to evaluate a simple expression string.\n\n#The expression string may contain open ( and closing parentheses ), the plus + or minus sign -, non-negative integers and empty spaces .\n\n#You may assume that the given expression is always valid.\n\n#Some examples:\n\n#\"1 + 1\" = 2\n#\" 2-1 + 2 \" = 3\n#\"(1+(4+5+2)-3)+(6+8)\" = 23\n\n#Note: Do not use the eval built-in library function. \n\nclass Solution:\n # @param {string} s\n # @return {integer}\n def calculate(self, s):\n self.stack = []\n i = 0\n while i < len(s):\n if s[i] == ' ':\n i += 1\n elif s[i] in ['+', '_', '(']:\n self.stack.append(s[i])\n i += 1\n elif s[i].isdigit():\n j = i+1\n while j < len(s) and s[j].isdigit():\n j += 1\n self.operation(self.stack, s[i:j])\n i = j\n else:\n string = self.stack.pop()\n self.operation(self.stack, string)\n i += 1\n return self.stack[0]\n \n def operation(self, stack, string):\n if self.stack == []: \n self.stack.append(int(string))\n return\n elif self.stack[-1] == '(':\n self.stack.pop()\n self.stack.append(int(string))\n return\n val = int(string)\n sign = self.stack.pop()\n if sign == '+':\n new_val = int(self.stack.pop())+val\n else:\n new_val = int(self.stack.pop())-val\n self.stack.append(new_val) "
},
{
"alpha_fraction": 0.6160635352134705,
"alphanum_fraction": 0.6328331828117371,
"avg_line_length": 41,
"blob_id": "236052c83d3f27088c2e622c055c0f82d6143041",
"content_id": "f10694f4b3ebf964d7233b086521e27cc048563f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1135,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 27,
"path": "/hIndex.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Given an array of citations (each citation is a non-negative integer) of a researcher, write a function to compute the researcher's h-index.\n\n# According to the definition of h-index on Wikipedia: \"A scientist has index h if h of his/her N papers have at least h citations each, \n# and the other N − h papers have no more than h citations each.\"\n\n# For example, given citations = [3, 0, 6, 1, 5], \n# which means the researcher has 5 papers in total and each of them had received 3, 0, 6, 1, 5 citations respectively. \n# Since the researcher has 3 papers with at least 3 citations each and the remaining two with no more than 3 citations each, his h-index is 3.\n\n# Note: If there are several possible values for h, the maximum one is taken as the h-index. \nclass Solution(object):\n def hIndex(self, citations):\n \"\"\"\n :type citations: List[int]\n :rtype: int\n \"\"\"\n tol = len(citations)\n citations.sort()\n i = 0\n \n while i < tol:\n if i < citations[tol-i-1]:\n i += 1\n else: break\n \n return i"
},
{
"alpha_fraction": 0.6136101484298706,
"alphanum_fraction": 0.6251441836357117,
"avg_line_length": 35.08333206176758,
"blob_id": "cec02309e8d06d35f42047a81c2eea43b6e64a0d",
"content_id": "2f62b2ff666b57b43e72923a3dc2dd15cedd08f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 867,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 24,
"path": "/bestTimeII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Say you have an array for which the ith element is the price of a given stock on day i.\n# Design an algorithm to find the maximum profit. \n# You may complete as many transactions as you like (ie, buy one and sell one share of the stock multiple times). \n# However, you may not engage in multiple transactions at the same time \n# (ie, you must sell the stock before you buy again).\n\nclass Solution:\n # @param prices, a list of integer\n # @return an integer\n def maxProfit(self, prices):\n if prices == []: return 0\n lowest = prices[0]\n maxpro = 0\n for i in range(len(prices)):\n if prices[i] > lowest: \n maxpro += prices[i]-lowest\n lowest = prices[i]\n return maxpro\n\nif __name__ == '__main__':\n prices = [10,3,5,6,10]\n test = Solution()\n out = test.maxProfit(prices)\n print out\n\n"
},
{
"alpha_fraction": 0.3944353461265564,
"alphanum_fraction": 0.4402618706226349,
"avg_line_length": 23.75,
"blob_id": "a84fc8a3a0d2a2730082a11235efc84f727d6a44",
"content_id": "ee9eb73e1f3ce687cb01b0f8167927d55acb3ba8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 611,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 24,
"path": "/romanToInt.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a roman numeral, convert it to an integer.\n# Input is guaranteed to be within the range from 1 to 3999.\n\ndef romanToInt(s):\n table = {'I':1, 'V':5, 'X':10, 'L':50, 'C':100, 'D':500, 'M':1000}\n Int = 0\n i = 0\n \n while i < len(s)-1:\n if table[s[i]] <table[s[i+1]]:\n Int += table[s[i+1]] - table[s[i]]\n i += 2\n else:\n Int += table[s[i]]\n i += 1\n \n if i == len(s):\n return Int\n else:\n return Int + table[s[i]]\n \nif __name__ == '__main__':\n s = 'MDCCCLXXXIV'\n out = romanToInt('MM')\n \n "
},
{
"alpha_fraction": 0.5302981734275818,
"alphanum_fraction": 0.5376723408699036,
"avg_line_length": 26.584070205688477,
"blob_id": "f92eec6e7118a8932356008ad2608c675f34a977",
"content_id": "48838e2436f0805be0c8bf075dc741542635cc5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3119,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 113,
"path": "/scrambleStrII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a string s1, we may represent it as a binary tree \n# by partitioning it to two non-empty substrings recursively.\n\n# Below is one possible representation of s1 = \"great\":\n# great\n# / \\\n# gr eat\n# / \\ / \\\n# g r e at\n# / \\\n# a t\n\n# To scramble the string, we may choose any non-leaf node and swap its two children.\n\n# For example, if we choose the node \"gr\" and swap its two children, \n# it produces a scrambled string \"rgeat\".\n\n# rgeat\n# / \\\n# rg eat\n# / \\ / \\\n# r g e at\n# / \\\n# a t\n# We say that \"rgeat\" is a scrambled string of \"great\".\n\n# Similarly, if we continue to swap the children of nodes \"eat\" and \"at\", \n# it produces a scrambled string \"rgtae\".\n\n# rgtae\n# / \\\n# rg tae\n# / \\ / \\\n# r g ta e\n# / \\\n# t a\n# We say that \"rgtae\" is a scrambled string of \"great\".\n\n# Given two strings s1 and s2 of the same length, \n# determine if s2 is a scrambled string of s1.\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n# This version find all scrambles of s1, exceed time limit\nclass Solution:\n # @return a boolean\n def isScramble(self, s1, s2):\n if s1 == '' and s2 == '': return True\n if len(s1) != len(s2): return False\n trees = [TreeNode(s1[0])]\n allscramble = []\n\n # all possible binary trees of s1\n for s in s1[1:]:\n temp = []\n for t in trees:\n newroot = TreeNode(t.val+s)\n newroot.left = t\n newroot.right = TreeNode(s)\n temp.append(newroot)\n temp.append(self.addOneLt(t, s))\n trees = temp\n\n for t in trees:\n allscramble += self.scramble(t)\n for i in allscramble:\n if s2 == i.val: return True\n return False \n\n def addOneLt(self, root, s):\n if root == None: return\n newroot = TreeNode(root.val+s)\n if root.right == None:\n newroot.left = TreeNode(root.val)\n newroot.right = TreeNode(s)\n else: \n newroot.right = self.addOneLt(root.right, s)\n newroot.left = root.left\n return newroot\n\n def buildTree(self, left, right):\n root = TreeNode(left.val+right.val)\n root.left, root.right = left, right\n return root\n\n def scramble(self, root):\n newroots = []\n if root.left == None: return [root]\n newlefts = self.scramble(root.left)\n newrights = self.scramble(root.right)\n for nl in newlefts:\n for nr in newrights:\n newroots.append(self.buildTree(nl, nr))\n\n exchange = []\n for nr in newroots:\n temp = TreeNode(nr.right.val+nr.left.val)\n temp.left, temp.right = nr.right, nr.left\n exchange.append(temp)\n newroots += exchange\n return newroots\n\nif __name__ == '__main__':\n s1 = \"abcdefghij\"\n s2 = \"efghijcadb\"\n\n test = Solution()\n out = test.isScramble(s1, s2)\n print out\n\n\n"
},
{
"alpha_fraction": 0.5096322298049927,
"alphanum_fraction": 0.5253940224647522,
"avg_line_length": 24.399999618530273,
"blob_id": "1307c1dbbfc75e7608f74b8d1b0b242639fbf9da",
"content_id": "f24b3fddc09f1fb0895f652d1f47c0c3d40246b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1142,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 45,
"path": "/mergeKlist.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Merge k sorted linked lists and return it as one sorted list. \n# Analyze and describe its complexity.\nimport heapq \n \nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n \ndef mergeKLists(lists):\n new = ListNode(0)\n out = new\n recorder = {}\n heap = []\n for i in range(len(lists)):\n if lists[i] != None:\n recorder[lists[i].val] = i\n heapq.heappush(heap, lists[i].val)\n \n while recorder != {}: \n index = recorder[heap[0]] \n new.next = lists[index]\n del recorder[heap[0]]\n heapq.heappop(heap)\n \n if lists[index].next != None: \n lists[index] = lists[index].next\n recorder[lists[index].val] = index \n heapq.heappush(heap, lists[index].val) \n new = new.next \n return out.next\n\nif __name__ == '__main__':\n l1 = ListNode(1)\n l1.next = ListNode(6)\n \n l2 = ListNode(2)\n l2.next = ListNode(4)\n l2.next.next = ListNode(5)\n \n l3 = ListNode(3)\n l3.next = ListNode(7)\n \n lists = [None, l1] \n out = mergeKLists(lists)"
},
{
"alpha_fraction": 0.5499265789985657,
"alphanum_fraction": 0.5499265789985657,
"avg_line_length": 34.91666793823242,
"blob_id": "7428dad93519edb1de8aa17a7963bb59cddc50af",
"content_id": "e1d9c959602ce0580cb5cb3f47df51d6a4fb14de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1362,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 36,
"path": "/wordPattern.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a pattern and a string str, find if str follows the same pattern.\n\n# Here follow means a full match, such that there is a bijection between a letter in pattern and a non-empty word in str.\n\n# Examples:\n\n# pattern = \"abba\", str = \"dog cat cat dog\" should return true.\n# pattern = \"abba\", str = \"dog cat cat fish\" should return false.\n# pattern = \"aaaa\", str = \"dog cat cat dog\" should return false.\n# pattern = \"abba\", str = \"dog dog dog dog\" should return false.\n\n# Notes:\n# You may assume pattern contains only lowercase letters, and str contains lowercase letters separated by a single space. \n\nclass Solution(object):\n def wordPattern(self, pattern, s):\n \"\"\"\n :type pattern: str\n :type str: str\n :rtype: bool\n \"\"\"\n s = s.split(' ')\n if len(s) != len(pattern):\n return False\n \n patndict = {}\n strdict = {}\n for i in range(len(pattern)):\n if pattern[i] not in patndict and s[i] not in strdict:\n patndict[pattern[i]] = s[i]\n strdict[s[i]] = pattern[i]\n elif pattern[i] in patndict and s[i] in strdict:\n if pattern[i] != strdict[s[i]]:\n return False\n else: return False\n return True\n \n\n \n \n \n "
},
{
"alpha_fraction": 0.4970513880252838,
"alphanum_fraction": 0.5164279937744141,
"avg_line_length": 30.236841201782227,
"blob_id": "3b13169dec785759a1ea6d2d9fc4cd3b09d4e3ef",
"content_id": "fc20776327651499a1ae202dcf969d2f69a176ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1187,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 38,
"path": "/maxPoints.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given n points on a 2D plane, \n# find the maximum number of points that lie on the same straight line.\n\n# Definition for a point\nclass Point:\n def __init__(self, a=0, b=0):\n self.x = a\n self.y = b\n\nclass Solution:\n # @param points, a list of Points\n # @return an integer\n def maxPoints(self, points):\n if len(points) <=2: return len(points)\n coeffs = {}\n tol = 2\n for end in range(len(points)):\n for start in range(end):\n new = self.line(points[start], points[end])\n if new in coeffs:\n if start not in coeffs[new]: coeffs[new].append(start)\n if end not in coeffs[new]: coeffs[new].append(end)\n else: coeffs[new] = [start, end]\n tol = max(tol, len(coeffs[new]))\n return tol\n \n def line(self, pt1, pt2):\n a = float('inf') if pt1.x == pt2.x else\\\n 1.0*(pt1.y-pt2.y)/(pt1.x-pt2.x)\n b = pt1.y - a*pt1.x\n return (a,b)\n\nif __name__ == '__main__':\n \n points = [Point(1,1), Point(1,1), Point(0,0)]\n test = Solution()\n out = test.maxPoints(points)\n print out\n"
},
{
"alpha_fraction": 0.47421732544898987,
"alphanum_fraction": 0.49355432391166687,
"avg_line_length": 29.13888931274414,
"blob_id": "d2362f623df0d30df2c776792292f2e07a55c6bc",
"content_id": "15eb82645982b3a22ce95d250f576eacb8ca0876",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1086,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 36,
"path": "/longestPalindromicSubStrIII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a string S, find the longest palindromic substring in S. \n# You may assume that the maximum length of S is 1000, \n# and there exists one unique longest palindromic substring.\n\nclass Solution:\n # @return a string\n def longestPalindrome(self, s):\n if len(s) <= 1: return s\n \n T = '#'\n for i in s: T += i+'#'\n \n longestCenter, center, right = 0, 0, 0\n maxlen = 1\n l = len(T)\n longestP = T[0]\n P = [0]*l\n for i in range(l):\n i_mirror = 2*center - i\n P[i] = 0 if i > right else min(P[i_mirror], right-i)\n while i-1-P[i] > -1 and i+1+P[i] < l \\\n and T[i+1+P[i]] == T[i-1-P[i]]:\n P[i] += 1\n if P[i] + i > right: center, right = i, i + P[i]\n if P[i] > maxlen:\n maxlen = P[i] \n longestCenter = i \n return s[(longestCenter-maxlen)/2: (longestCenter+maxlen)/2]\n\n\nif __name__ == '__main__':\n s = 'aaaaaaaaa'\n test = Solution()\n out = test.longestPalindrome(s)\n print s\n print out\n\n"
},
{
"alpha_fraction": 0.4321839213371277,
"alphanum_fraction": 0.4333333373069763,
"avg_line_length": 28.517240524291992,
"blob_id": "98349fd8e17e510f8ada3be14ba0d0a2ab82f1ca",
"content_id": "2ce2f84927cca1c12daa369b7ffd6602f81317df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 870,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 29,
"path": "/simplifyPath.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an absolute path for a file (Unix-style), simplify it.\n\n# For example,\n# path = \"/home/\", => \"/home\"\n# path = \"/a/./b/../../c/\", => \"/c\"\n\nclass Solution:\n # @param path, a string\n # @return a string\n def simplifyPath(self, path):\n if path == '': return ''\n sep = path.split('/')\n newpath = []\n for i in range(len(sep)):\n if sep[i] == '..' and newpath != []:\n newpath.pop()\n elif sep[i] != '' and sep[i] != '.' and sep[i] != '..': \n newpath.append(sep[i])\n if newpath == [] and i == len(sep)-1: \n newpath.append('')\n out = ''\n for i in newpath: out += '/'+i\n return out\n\nif __name__ == '__main__':\n path = \"/home/\"\n test = Solution()\n out = test.simplifyPath(path)\n print out \n\n\n\n\n"
},
{
"alpha_fraction": 0.42553192377090454,
"alphanum_fraction": 0.48811012506484985,
"avg_line_length": 26.101694107055664,
"blob_id": "5d36454792995bc86502c1c7154928d7c6a158bb",
"content_id": "015511a743dcf886109ed588583f924adaf50ca4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1598,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 59,
"path": "/numIslands.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Number of Islands Total Accepted: 4863 Total Submissions: 23172\n\n#Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. \n#An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.\n\n#Example 1:\n\n#11110\n#11010\n#11000\n#00000\n\n#Answer: 1\n\n#Example 2:\n\n#11000\n#11000\n#00100\n#00011\n\n#Answer: 3\n\nclass Solution:\n # @param grid, a list of list of characters\n # @return an integer\n def numIslands(self, grid):\n if grid == []: return 0\n count = 0\n self.m, self.n = len(grid), len(grid[0])\n self.grid = grid\n for i in range(self.m):\n for j in range(self.n):\n if self.grid[i][j] == '1':\n count += 1\n self.DFS(i, j) \n return count\n \n def DFS(self, i, j):\n if i < self.m-1 and self.grid[i+1][j] == '1':\n self.grid[i+1][j] = '2'\n self.DFS(i+1,j)\n if i > 0 and self.grid[i-1][j] == '1':\n self.grid[i-1][j] = '2'\n self.DFS(i-1,j)\n if j < self.n-1 and self.grid[i][j+1] == '1':\n self.grid[i][j+1] = '2'\n self.DFS(i, j+1) \n if j > 0 and self.grid[i][j-1] == '1':\n self.grid[i][j-1] = '2' \n self.DFS(i, j-1) \n return \n\nif __name__ == '__main__':\n grid = [['1','0','1','1','1'],\\\n ['1','0','1','0','1'],\\\n ['1','1','1','0','1']]\n x = Solution()\n print x.numIslands(grid)"
},
{
"alpha_fraction": 0.48638132214546204,
"alphanum_fraction": 0.49935150146484375,
"avg_line_length": 28.038461685180664,
"blob_id": "3c69c0a2c13e50bbadee74ad97cb0cf395a0ec19",
"content_id": "34125ed20074225759e34f331195aaf9b2b7e335",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1542,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 52,
"path": "/ArraytoBST.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Convert sorted array to height balanced BST\n# Not optimal, need improvement\n\nclass Treenode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n \nclass Solution:\n # @param num, a list of integers\n # @return a tree node\n def sortedArrayToBST(self, num): \n if len(num) == 0: return\n # find the median of the array, take it as the root\n median = int(len(num)/2)\n root = Treenode(num[median]) \n \n self.buildTree(num, root) \n \n return root\n \n def buildTree(self, num, root): \n if len(num) <= 1: # no children\n return \n \n median = num.index(root.val) \n \n # if len(num) == 2, only one children\n if median == len(num)-1: # only left children\n root.left = Treenode(num[0])\n return \n if median == 0: # only right children\n root.right = Treenode(num[1])\n return\n \n left = num[0:median]\n right = num[median+1:len(num)]\n \n median_left = int(len(left)/2)\n median_right = int(len(right)/2)\n \n root.left = Treenode(left[median_left])\n root.right = Treenode(right[median_right])\n \n self.buildTree(left, root.left)\n self.buildTree(right, root.right)\n\nif __name__ == '__main__':\n lt = [2,3,4,5,6,7,8,9]\n out = Solution()\n tree = out.sortedArrayToBST(lt) \n \n \n "
},
{
"alpha_fraction": 0.5620723366737366,
"alphanum_fraction": 0.5738025307655334,
"avg_line_length": 26.54054069519043,
"blob_id": "4a6213b7ef9210b9d72e06caf7002dcb1d10fbbd",
"content_id": "cee5fa26389d24d66571810ce87d0b49bc8c3fe1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1023,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 37,
"path": "/swapNodes.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a linked list, \n# swap every two adjacent nodes and return its head.\n# For example, Given 1->2->3->4, you should return the list as 2->1->4->3.\n\n# Your algorithm should use only constant space. \n# You may not modify the values in the list, only nodes itself can be changed.\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\ndef swapNodes(head):\n if head == None or head.next == None:\n return head\n else:\n out = head.next\n \n while head != None and head.next!= None: \n newhead = head.next \n nextpair = head.next.next\n if nextpair != None and nextpair.next != None: \n head.next = nextpair.next\n else:\n head.next = nextpair\n newhead.next = head\n head = nextpair\n \n return out \n \nif __name__ == '__main__':\n head = ListNode(1)\n head.next = ListNode(2)\n #head.next.next = ListNode(3)\n #head.next.next.next = ListNode(4)\n \n out = swapNodes(head)\n "
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5099999904632568,
"avg_line_length": 30.133333206176758,
"blob_id": "3310fe5cd2f03f547a675a186e6e9c3836d29b8c",
"content_id": "9200e5fc6a1dd1a1c7ea4ab78a7e462d539990b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1401,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 45,
"path": "/Nqueens.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# The n-queens puzzle is the problem of placing n queens \n# on an n×n chessboard such that no two queens attack each other.\n\n# Given an integer n, return all distinct solutions to the n-queens puzzle.\n\n# Each solution contains a distinct board configuration of the n-queens' placement, \n# where 'Q' and '.' both indicate a queen and an empty space respectively.\n\nclass Solution:\n # @return a list of lists of string\n def solveNQueens(self, n):\n self.solution = []\n for i in range(n):\n self.add(n, [[0],[i]])\n \n final = [] \n for item in self.solution:\n out = []\n for i in range(n):\n out.append('.'*item[1][i]+'Q'+'.'*(n-item[1][i]-1))\n final.append(out)\n return final\n \n def add(self, n, loc): \n if len(loc[0]) == n:\n self.solution.append(loc) \n return \n for j in range(n):\n if self.check(n, loc, j):\n self.add(n, [loc[0]+[len(loc[0])], loc[1]+[j]]) \n return \n \n def check(self, n, loc, j): \n if j in loc[1]: \n return False\n for k in range(len(loc[0])):\n if len(loc[0]) - k == abs(loc[1][k] - j): \n return False\n return True\n \n \nif __name__ == '__main__':\n test = Solution()\n out = test.solveNQueens(4)"
},
{
"alpha_fraction": 0.41025641560554504,
"alphanum_fraction": 0.4711538553237915,
"avg_line_length": 15.473684310913086,
"blob_id": "3d4b1eaa36a7d09bcdfac8d3a49a9708936126f0",
"content_id": "603343d424bf08e9795816ad9ad0480a2dd94734",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 312,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 19,
"path": "/climbingStairs.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Climbing Stairs\n# Recursion f(n) = f(n-1) + f(n-2)\ndef climbStairs(n):\n if n == 1: return 1\n if n == 2: return 2\n \n fn = 0\n fn1 = 2\n fn2 = 1\n \n for i in range(2, n):\n fn = fn1 + fn2\n fn2 = fn1\n fn1 = fn\n \n return fn\n \nif __name__ == '__main__':\n print climbStairs(10)"
},
{
"alpha_fraction": 0.5945454835891724,
"alphanum_fraction": 0.5945454835891724,
"avg_line_length": 35.733333587646484,
"blob_id": "965db1d1f79aadbbe14d1e9c118988d4ccdcdd4a",
"content_id": "cdebc8f30421250545acccddc3829ad4542af1e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 550,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 15,
"path": "/containsNearbyDuplicate.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Given an array of integers and an integer k, \n#find out whether there there are two distinct indices i and j in the array such that nums[i] = nums[j] \n#and the difference between i and j is at most k. \n\nclass Solution:\n # @param {integer[]} nums\n # @param {integer} k\n # @return {boolean}\n def containsDuplicate(self, nums, k):\n had = {}\n for i in range(len(nums)):\n if nums[i] not in had: had[nums[i]]=i\n elif i - had[nums[i]] <= k: return True\n else: had[nums[i]] = i\n return False"
},
{
"alpha_fraction": 0.37773722410202026,
"alphanum_fraction": 0.41605839133262634,
"avg_line_length": 23.954545974731445,
"blob_id": "ba35a3154393402dd8d4dbffaa08be338406e00c",
"content_id": "a92743b5d3403390186b2c88738122b3ea469550",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 548,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 22,
"path": "/heap.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Heap algorithm\n\nclass Solution():\n \n def heap(self, N, array):\n if N == 1:\n return array\n if N > len(array):\n return 'exceed array size' \n \n for i in range(N):\n self.heap(N-1, array)\n if i < N-1: \n array[0 if N%2 == 1 else i], array[N-1] =\\\n array[N-1], array[0 if N%2 == 1 else i]\n \n print array\n \nif __name__ == '__main__':\n A = [1,2,3,4,5,6,7,8,9]\n solution = Solution()\n solution.heap(3, A)"
},
{
"alpha_fraction": 0.5497254133224487,
"alphanum_fraction": 0.5808419585227966,
"avg_line_length": 30.44230842590332,
"blob_id": "bf25b369013033971ebd4a8b4931e6a902381120",
"content_id": "95682a6f52009386fe6c7f1bed4f80355b416ba2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1639,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 52,
"path": "/combinationSumII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a collection of candidate numbers (C) and a target number (T), \n# find all unique combinations in C where the candidate numbers sums to T.\n\n# Each number in C may only be used once in the combination.\n\n# Note:\n# All numbers (including target) will be positive integers.\n# Elements in a combination must be in non-descending order. \n# The solution set must not contain duplicate combinations.\n# For example, given candidate set 10,1,2,7,6,1,5 and target 8, \n# A solution set is: \n# [1, 7] \n# [1, 2, 5] \n# [2, 6] \n# [1, 1, 6]\n\nclass Solution:\n # @param candidates, a list of integers\n # @param target, integer\n # @return a list of lists of integers\n def combinationSum2(self, candidates, target):\n candidates.sort()\n self.collection = []\n temp = []\n self.dfs(candidates, 0, target, 0, temp)\n return self.collection\n \n def dfs(self, candidates, now, target, start, temp):\n if now == target:\n self.collection.append(temp[:])\n return\n if now > target: \n return\n l = len(candidates)\n i = start\n while i < l:\n count = 0\n while i+count<l-1 and candidates[i+count] == candidates[i+count+1]: \n count += 1 \n temp.append(candidates[i])\n self.dfs(candidates, now+candidates[i], target, i+1, temp)\n temp.pop()\n i += count + 1\n return\n \nif __name__ == '__main__':\n candidates = [10, 1, 2, 7, 6, 1, 5]\n candy = [2,2,2,1]\n can = [4,4,2,1,4,2,2,1,3]\n test = Solution()\n out = test.combinationSum2(candidates, 8)\n print out\n\n \n"
},
{
"alpha_fraction": 0.4328097701072693,
"alphanum_fraction": 0.45375218987464905,
"avg_line_length": 27.41666603088379,
"blob_id": "9bbd4cbce7fa0d757df8c687932e0181a6445271",
"content_id": "0892b837cee253a28c885c097ccc6e6e10af15ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1719,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 60,
"path": "/numSquares.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a positive integer n, \n# find the least number of perfect square numbers (for example, 1, 4, 9, 16, ...) \n# which sum to n.\n\n# For example, given n = 12, return 3 because 12 = 4 + 4 + 4; \n# given n = 13, return 2 because 13 = 4 + 9.\n\n# DFS, time limit exceeded/maximum recursion depth exceeded\nclass Solution1(object):\n def numSquares(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n self.counter = n\n self.good = {} \n self.bad = set() \n self.DFS(n, 1, 0) \n return self.counter\n \n def DFS(self, n, start, counter):\n print n\n if n == 0: \n self.counter = min(self.counter, counter)\n return counter\n if n in self.good:\n counter += self.good[n]\n self.counter = min(self.counter, counter)\n return counter\n if n in self.bad or counter >= self.counter: return \n \n while start*start <= n:\n tol = self.DFS(n-start*start, start, counter+1)\n if tol != None:\n self.good[n] = tol - counter \n else: self.bad.add(n)\n start += 1\n return \n\n# DP, time limit exceeded \nclass Solution2(object):\n def numSquares(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n dp = {}\n i = 1\n while i*i <= n:\n dp[i*i] = 1\n i += 1\n \n for j in range(1, n+1):\n if j not in dp: dp[j] = j\n i = 1\n while j + i*i <= n:\n if j+i*i not in dp or dp[j+i*i] > dp[j]+1:\n dp[j+i*i] = dp[j]+1\n i += 1\n return dp[n]\n \n \n"
},
{
"alpha_fraction": 0.5493934154510498,
"alphanum_fraction": 0.5649913549423218,
"avg_line_length": 28.3389835357666,
"blob_id": "6135d734c5f6cf1128a53d4f2744d7fa4956a2d9",
"content_id": "d57a8d7cba1e06fe459027e4f7b59da5a710a1b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1731,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 59,
"path": "/reverseKGroup.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a linked list, reverse the nodes of a linked list k at a time and return its modified list.\n# If the number of nodes is not a multiple of k then left-out nodes in the end should remain as it is.\n# You may not alter the values in the nodes, only nodes itself may be changed.\n# Only constant memory is allowed.\n\n# For example,\n# Given this linked list: 1->2->3->4->5\n# For k = 2, you should return: 2->1->4->3->5\n# For k = 3, you should return: 3->2->1->4->5\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @param k, an integer\n # @return a ListNode\n def reverseKGroup(self, head, k):\n [new, tail] = self.reverseK(head, k)\n while tail != None:\n temp = tail\n [head, tail] = self.reverseK(tail.next, k)\n temp.next = head\n return new\n\n def reverseK(self, head, k):\n if head == None: return [None, None]\n count = 1\n check = head\n while count < k:\n if check.next == None: return [head, None]\n check = check.next\n count += 1\n\n count = 1 \n new, temp = head, head\n while count < k:\n new = head.next\n head.next = new.next\n new.next = temp\n temp = new \n count += 1\n return [new, head]\n\nif __name__ == '__main__':\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(3)\n head.next.next.next = ListNode(4)\n head.next.next.next.next = ListNode(5)\n\n test = Solution()\n out = test.reverseKGroup(head, 3)\n while out != None:\n print out.val\n out = out.next\n"
},
{
"alpha_fraction": 0.4398406445980072,
"alphanum_fraction": 0.4892430305480957,
"avg_line_length": 29.609756469726562,
"blob_id": "7b553b5f478e6d694db2c6553b3bbb9c38df2cae",
"content_id": "6a25831975f94f502ebfd4480603a1736bd9f157",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1255,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 41,
"path": "/addOperators.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a string that contains only digits 0-9 and a target value, \n# return all possibilities to add binary operators (not unary) +, -, or * between the digits \n# so they evaluate to the target value.\n\n# Examples: \n# \"123\", 6 -> [\"1+2+3\", \"1*2*3\"] \n# \"232\", 8 -> [\"2*3+2\", \"2+3*2\"]\n# \"105\", 5 -> [\"1*0+5\",\"10-5\"]\n# \"00\", 0 -> [\"0+0\", \"0-0\", \"0*0\"]\n# \"3456237490\", 9191 -> []\n\nclass Solution(object):\n def addOperators(self, num, target):\n \"\"\"\n :type num: str\n :type target: int\n :rtype: List[str]\n \"\"\"\n self.nums = [int(i) for i in num]\n self.target = target\n self.l = len(self.nums)\n self.collect = []\n self.DFS(self.nums[0], [], 0)\n \n return self.collect\n \n def DFS(self, curr, ops, i):\n if i >= self.l-1:\n if curr == self.target:\n return ops\n return\n nt = self.nums[i+1]\n multiply = curr*nt\n plus = curr+nt\n minus = curr-nt\n multiply = self.DFS(curr*nt, ops+['*'], i+1)\n plus = self.DFS(curr+nt, ops+['+'], i+1)\n minus = self.DFS(curr-nt, ops+['-'], i+1)\n for i in [multiply, plus, minus]:\n if i != None:\n self.collect.append(i)\n"
},
{
"alpha_fraction": 0.37005162239074707,
"alphanum_fraction": 0.42685025930404663,
"avg_line_length": 22.280000686645508,
"blob_id": "42f1c2a71be75cd01500e796327ff3433d6ab655",
"content_id": "20f9e792250e1b91dc4898de298bcc3244b6dec2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 581,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 25,
"path": "/sqrt(x).py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Implement int sqrt(int x).\n# Compute and return the square root of x.\n\nclass Solution:\n # @param x, an integer\n # @return an integer\n \n # Newton's method\n # y = sqrt(x)\n # f(y) = y^2 - x\n # f'(y) = 2y\n # f(y0) = (y0-y1)*f'(y0) => y1 = y0 - f(y0)/f'(y0)\n # y1 = y0 - (y0^2 - x)/(2y0) \n def sqrt(self, x):\n y0 = 0 \n y1 = 1\n \n while int(y0) != int(y1):\n y0 = y1\n y1 = y0 - (y0**2 - x)/(2.0*y0) \n return int(y1)\n \nif __name__ == '__main__':\n test = Solution()\n out = test.sqrt(1)"
},
{
"alpha_fraction": 0.6058871746063232,
"alphanum_fraction": 0.6181520819664001,
"avg_line_length": 29.475000381469727,
"blob_id": "3160dd182fe047c04456c5c96c18d2ea324dcf2c",
"content_id": "3d7451a99b7aeddade196a1529f65b92d2a50189",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1223,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 40,
"path": "/LRUCache.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Design and implement a data structure for Least Recently Used (LRU) cache. \n# It should support the following operations: get and set.\n\n# get(key) - Get the value (will always be positive) of the key \n# if the key exists in the cache, otherwise return -1.\n# set(key, value) - Set or insert the value if the key is not already present. \n# When the cache reached its capacity, \n# it should invalidate the least recently used item before inserting a new item.\n\nclass LRUCache:\n\n # @param capacity, an integer\n def __init__(self, capacity):\n self.cap = capacity \n self.start = 0\n self.cache = {}\n self.index = {}\n\n # @return an integer\n def get(self, key):\n if key in self.cache: return self.cache[key]\n else: return -1\n\n # @param key, an integer\n # @param value, an integer\n # @return nothing\n def set(self, key, value):\n if len(self.cache) == self.cap:\n self.cache.pop(self.index[self.start])\n self.start += 1\n self.cache[key] = value\n self.index[len(self.index)] = key\n\nif __name__ == '__main__':\n cache = LRUCache(3)\n cache.set(1,1)\n cache.set(2,2)\n cache.set(3,3)\n cache.set(4,4) \n cache.set(5,5) \n \n"
},
{
"alpha_fraction": 0.32364341616630554,
"alphanum_fraction": 0.3478682041168213,
"avg_line_length": 23.023256301879883,
"blob_id": "169cd0815e7dc866c1e5036b55493a82609b0e2f",
"content_id": "da18ff8c8a79ef4f24ee689694d2e045e931ea14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1032,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 43,
"path": "/MedianTwoArray.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# median of two sorted array\n\nclass Solution:\n \n def findMedian(self, A, B):\n la = len(A)\n lb = len(B)\n \n if (la + lb)%2 == 0:\n return (self.findK(A,B,(la+lb)/2) +\\\n self.findK(A,B,(la+lb)/2+1))/2.0\n else:\n return self.findK(A,B,(la+lb+1)/2)\n \n def findK(self, A, B, K): \n la, lb = len(A), len(B) \n ta = min(la, K/2)\n tb = min(lb, K - ta) \n \n if la == 0:\n return B[K-1]\n \n if lb == 0:\n return A[K-1]\n \n if K == 1:\n return min(A[0], B[0])\n \n if A[ta - 1] < B[tb - 1]: \n return self.findK(A[ta:], B, K - ta)\n \n elif A[ta-1] > B[tb-1]:\n return self.findK(A, B[tb:], K - tb)\n \n else: \n return A[ta - 1] \n \nif __name__ == '__main__':\n A = [0]\n B = [1,2]\n\n out = Solution()\n print out.findMedian(A,B)"
},
{
"alpha_fraction": 0.4794759750366211,
"alphanum_fraction": 0.5039301514625549,
"avg_line_length": 26.926828384399414,
"blob_id": "d8729155b582cebc3b09f0e7f5435e4536629b72",
"content_id": "c702e705ba27caf9e415bd8cb332ab957e2ddf1d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1145,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 41,
"path": "/subsetsII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a collection of integers that might contain duplicates, S, \n# return all possible subsets.\n\n# Note:\n# Elements in a subset must be in non-descending order.\n# The solution set must not contain duplicate subsets.\n\n# For example,\n# If S = [1,2,2], a solution is:\n# [[2],\n# [1],\n# [1,2,2],\n# [2,2],\n# [1,2],\n# []]\n\n# DP!\nclass Solution:\n # @param S, a list of integer\n # @return a list of lists of integer\n def subsetsWithDup(self, S):\n out = [[]]\n S.sort()\n for i in range(len(S)):\n temp = []\n for item in out:\n add = item+[S[i]]\n if add not in out and add not in temp: temp.append(add)\n for j in range(1,len(item)):\n if j > 0 and item[j] == item[j-1]:continue\n if item[j] == S[i]: continue\n rep = item[0:j] + item[j+1:] + [S[i]]\n if rep not in out and add not in temp: temp.append(rep)\n out += temp\n return out\n \nif __name__ == '__main__':\n S = [1,2,3,4,5,6,7,8,10,0]\n test = Solution()\n out = test.subsetsWithDup(S)\n print out\n"
},
{
"alpha_fraction": 0.4611307382583618,
"alphanum_fraction": 0.5035335421562195,
"avg_line_length": 30.47222137451172,
"blob_id": "8449ca5d6513c848295c14d9eabc024a4b906da9",
"content_id": "c2e6e488001741835d26927946c41e53adc6d7e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1132,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 36,
"path": "/editDist.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given two words word1 and word2, \n# find the minimum number of steps required to convert word1 to word2. \n# (each operation is counted as 1 step.)\n\n# You have the following 3 operations permitted on a word:\n\n# a) Insert a character\n# b) Delete a character\n# c) Replace a character\n\nclass Solution:\n # @return an integer\n def minDistance(self, word1, word2):\n l1, l2 = len(word1), len(word2)\n if l1 == 0: return l2\n if l2 == 0: return l1\n \n # dist[i][0] = i, dist[0][j] = j\n dist = [[0]*(l2+1) for i in range(l1+1)]\n for i in range (l1+1): dist[i][0] = i \n for j in range(l2+1): dist[0][j] = j\n \n for i in range(l1):\n for j in range(l2):\n if word1[i] == word2[j]:\n dist[i+1][j+1] = dist[i][j]\n else:\n dist[i+1][j+1] = min(dist[i][j+1], dist[i+1][j], dist[i][j]) + 1\n \n return dist[l1][l2]\n \n \nif __name__ == '__main__':\n word1, word2 = \"distance\", \"springbok\" \n test = Solution()\n out = test.minDistance(word1, word2)"
},
{
"alpha_fraction": 0.5199063420295715,
"alphanum_fraction": 0.5448868274688721,
"avg_line_length": 29.5,
"blob_id": "19397a2848d53f296d746dcda6cd0d6be2bf7f80",
"content_id": "58ffe2b4702dade565d518553e78ce8125149be9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1281,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 42,
"path": "/candy.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# There are N children standing in a line. \n# Each child is assigned a rating value.\n\n# You are giving candies to these children \n# subjected to the following requirements:\n\n# Each child must have at least one candy.\n# Children with a higher rating get more candies than their neighbors.\n# What is the minimum candies you must give?\n\n# Do not understand why it expects 4:\n# Input:\t[1,2,2]\n# Output:\t5\n# Expected:\t4\n\nclass Solution:\n # @param ratings, a list of integer\n # @return an integer\n def candy(self, ratings):\n lr = len(ratings)\n peak, bottom = 0, 0\n tol, indiv = 0, 0\n for i in range(lr):\n if i == 0 or ratings[i] > ratings[i-1]:\n indiv += 1\n elif ratings[i] < ratings[i-1]:\n indiv -= 1\n if (i == 0 or ratings[i] < ratings[i-1]) and\\\n (i == lr-1 or ratings[i] < ratings[i+1]):\n tol += (1-indiv)*(i-peak) #correction\n indiv = 1\n elif (i == 0 or ratings[i] > ratings[i-1]) and\\\n (i == lr-1 or ratings[i] > ratings[i+1]):\n peak = i\n tol += indiv\n return tol\n\nif __name__ == '__main__':\n ratings = [1,3,5,7,9,5,0]\n test = Solution()\n out = test.candy(ratings)\n print out\n"
},
{
"alpha_fraction": 0.6005788445472717,
"alphanum_fraction": 0.6121562719345093,
"avg_line_length": 41.625,
"blob_id": "ec7408d6aedc1abbdc10b101ea5f99b883762c73",
"content_id": "09bcb60ce9f930565fcf2fe693f9c834866f9584",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 691,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 16,
"path": "/containMostWater.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given n non-negative integers a1, a2, ..., an, where each represents a point at coordinate (i, ai). \n# n vertical lines are drawn such that the two endpoints of line i is at (i, ai) and (i, 0). \n# Find two lines, which together with x-axis forms a container, \n# such that the container contains the most water.\n\nclass Solution:\n # @return an integer\n def maxArea(self, height):\n maxArea = 0\n left, right = 0, len(height)-1\n while left < right:\n area = min(height[left], height[right])*(right-left)\n if height[left] < height[right]: left += 1\n else: right -= 1\n maxArea = max(maxArea, area)\n return maxArea\n \n"
},
{
"alpha_fraction": 0.5123893618583679,
"alphanum_fraction": 0.5407079458236694,
"avg_line_length": 26.512195587158203,
"blob_id": "21d0da892eda96f1f64164b3b7182feb39feb6d5",
"content_id": "295aedc033e321c2493bd778af9eda387983c125",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1130,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 41,
"path": "/grayGode.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# The gray code is a binary numeral system \n# where two successive values differ in only one bit.\n\n# Given a non-negative integer n representing the total number of bits in the code, \n# print the sequence of gray code. A gray code sequence must begin with 0.\n\n# For example, given n = 2, return [0,1,3,2]. Its gray code sequence is:\n# 00 - 0\n# 01 - 1\n# 11 - 3\n# 10 - 2\n\n# DP, hahahahaha\nclass Solution:\n # @return a list of integers\n def grayCode(self, n):\n if n == 0: return [0]\n bins = [[0],[1]]\n for i in range(1, n):\n temp = []\n for j in range(len(bins)):\n temp.append([0]+bins[j])\n for j in range(len(bins)-1, -1, -1):\n temp.append([1]+bins[j])\n bins = temp\n\n out = []\n for num in bins: out.append(self.binaryToDecimal(num))\n return out\n \n def binaryToDecimal(self, binary):\n num = 0\n l = len(binary)\n for i in range(l):\n num += 2**i * binary[l-1-i]\n return num\n\nif __name__ == '__main__':\n test = Solution()\n out = test.grayCode(2)\n print out\n\n\n"
},
{
"alpha_fraction": 0.5603112578392029,
"alphanum_fraction": 0.5719844102859497,
"avg_line_length": 28.71014404296875,
"blob_id": "a8190e2dc109a9dbb9eacf4380fd1fd6bc4ec846",
"content_id": "fbd45ecd787a87f8aedaa2928f101b71b7306b92",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2056,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 69,
"path": "/connectTreeII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Follow up for problem \"Populating Next Right Pointers in Each Node\".\n# What if the given tree could be any binary tree? \n# Would your previous solution still work?\n\n# Note:\n# You may only use constant extra space.\n# For example,\n# Given the following binary tree,\n# 1\n# / \\\n# 2 3\n# / \\ \\\n# 4 5 7\n# After calling your function, the tree should look like:\n# 1 -> NULL\n# / \\\n# 2 -> 3 -> NULL\n# / \\ \\\n# 4-> 5 -> 7 -> NULL\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n self.next = None\n\nclass Solution:\n def connect(self, root):\n if root == None or (root.left == None and root.right == None):\n return\n if root.left != None and root.right == None:\n root.left.next = self.nextRight(root)\n elif root.left == None and root.right != None:\n root.right.next = self.nextRight(root)\n else:\n root.left.next = root.right\n root.right.next = self.nextRight(root)\n # must connect from right, then left !!!\n self.connect(root.right)\n self.connect(root.left)\n \n def nextRight(self, root):\n if root.next == None:\n return\n if root.next.left == None and root.next.right == None: \n return self.nextRight(root.next)\n if root.next.left != None:\n return root.next.left\n elif root.next.right != None:\n return root.next.right\n\nif __name__ == '__main__':\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(3)\n root.left.left = TreeNode(0)\n root.left.right = TreeNode(7)\n root.right.left = TreeNode(9)\n root.right.right = TreeNode(1)\n root.left.left.left = TreeNode(2)\n root.left.right.left = TreeNode(1)\n root.left.right.right = TreeNode(0)\n root.right.right.left = TreeNode(8)\n root.right.right.right = TreeNode(8)\n\n test = Solution()\n out = test.connect(root)\n\n \n"
},
{
"alpha_fraction": 0.49520766735076904,
"alphanum_fraction": 0.49520766735076904,
"avg_line_length": 21.285715103149414,
"blob_id": "832b6a45c116dd9a5f92deb8be3fb642221b8868",
"content_id": "25a815783f8d954125169c816645a2354171e448",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 313,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 14,
"path": "/reverse.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "class ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def reverse(self, head):\n tail = head\n while tail.next != None:\n new = tail.next\n tail.next = new.next\n new.next = head\n head = new\n return head\n\n"
},
{
"alpha_fraction": 0.5101186037063599,
"alphanum_fraction": 0.5415213108062744,
"avg_line_length": 33.975608825683594,
"blob_id": "8eaaf6acd87317e0108d13f7a2de382754f2cc1b",
"content_id": "98b4b3e49aacbc4e1cfb0e93c537c00699659d3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1433,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 41,
"path": "/searchRange.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a sorted array of integers, \n# find the starting and ending position of a given target value.\n\n# Your algorithm's runtime complexity must be in the order of O(log n).\n# If the target is not found in the array, return [-1, -1].\n\n# For example,\n# Given [5, 7, 7, 8, 8, 10] and target value 8,\n# return [3, 4].\n\nclass Solution:\n # @param A, a list of integers\n # @param target, an integer to be searched\n # @return a list of length 2, [index1, index2]\n def searchRange(self, A, target):\n l = len(A)\n median = l//2\n if l == 0: return [-1, -1]\n \n if target > A[median]:\n shift = self.searchRange(A[median+1:], target)\n [lower, upper] = [-1, -1] if -1 in shift \\\n else [shift[0]+median+1, shift[1]+median+1]\n \n elif target < A[median]: \n shift = self.searchRange(A[:median], target)\n [lower, upper] = [-1, -1] if -1 in shift else shift \n \n else:\n shift_low = self.searchRange(A[:median], target)[0]\n shift_up = self.searchRange(A[median+1:], target)[1]\n lower = median if shift_low == -1 else shift_low\n upper = median if shift_up == -1 else median + shift_up + 1\n \n return [lower, upper]\n \nif __name__ == '__main__':\n A = [5,7,7,8,8,10]\n target = 10\n test = Solution()\n out = test.searchRange(A, target)"
},
{
"alpha_fraction": 0.36867088079452515,
"alphanum_fraction": 0.40189874172210693,
"avg_line_length": 25.16666603088379,
"blob_id": "8296dd8b515c6d66e2c5b57e1069f26ea60ebd98",
"content_id": "8b1711335134003cbc01e2afe80ad1fa142f9b07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 632,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 24,
"path": "/BigPlusOne.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# big number plus one\n\nclass Solution:\n # @param digits, a list of integer digits\n # @return a list of integer digits\n def plusOne(self, digits):\n l = len(digits) \n digits[l-1] += 1\n \n for i in range(l-1, -1, -1):\n if digits[i] == 10:\n if i == 0:\n digits = [1] + [0]*l\n else:\n digits[i] = 0\n digits[i-1] += 1\n else: \n break\n \n return digits \n \nif __name__ == '__main__':\n digits = [1,0,6,5,8,3,9,9]\n out = Solution().plusOne(digits)\n "
},
{
"alpha_fraction": 0.6045627593994141,
"alphanum_fraction": 0.6108998656272888,
"avg_line_length": 33.34782791137695,
"blob_id": "0ecea3ac6dcb287528058d2b91b894bbe8857ded",
"content_id": "74bf3e025017ebedcfccbc4016ac78bc9fbbe415",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 789,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 23,
"path": "/quicksort.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# quick sort, follow:\n# http://www.youtube.com/watch?v=aQiWF4E8flQ\n# which is super clear and easy!\n\ndef partition(lt, start, end): \n pivot = lt[end]\n wall = start # wall to the left of the start element\n \n for current in range(start, end):\n if lt[current] < pivot:\n # exchange current element with the element right next to the wall\n lt[current], lt[wall] = lt[wall], lt[current]\n # move the wall to the right of the element smaller the pivot\n wall += 1\n # put the pivot to the position of the wall\n lt[wall], lt[end] = lt[end], lt[wall]\n return wall\n\ndef quicksort(lt, start, end):\n if start < end:\n wall = partition(lt, start, end)\n quicksort(lt, start, wall-1)\n quicksort(lt, wall+1, end)"
},
{
"alpha_fraction": 0.5660980939865112,
"alphanum_fraction": 0.5788912773132324,
"avg_line_length": 21.80487823486328,
"blob_id": "36134f653d4f9411c4d8211672f28ebd5181dd62",
"content_id": "3d3a0c96976e2cadbae4ea31eb6c114615372c53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 938,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 41,
"path": "/postOrderTraversal.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a binary tree, return the postorder traversal of its nodes' values.\n\n# For example:\n# Given binary tree {1,#,2,3},\n# 1\n# \\\n# 2\n# /\n# 3\n# return [3,2,1].\n\n# Note: Recursive solution is trivial, could you do it iteratively?\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a list of integers\n def postorderTraversal(self, root):\n self.out = []\n self.dfs(root)\n return self.out\n\n def dfs(self, root):\n if root == None: return \n self.dfs(root.left)\n self.dfs(root.right)\n self.out.append(root.val)\n \nif __name__ == '__main__':\n root = TreeNode(1)\n root.right = TreeNode(2)\n root.right.left = TreeNode(3)\n test = Solution()\n out = test.postorderTraversal(root)\n print out\n\n\n\n"
},
{
"alpha_fraction": 0.5572916865348816,
"alphanum_fraction": 0.5729166865348816,
"avg_line_length": 28.461538314819336,
"blob_id": "947edbdfb1082b78e23e64c95f85b91d6d908e40",
"content_id": "aaa274f26f3606921a33fd41d6dd724a971416f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 768,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 26,
"path": "/bestTime.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Say you have an array i\n# for which the ith element is the price of a given stock on day i.\n\n# If you were only permitted to complete at most one transaction \n# (ie, buy one and sell one share of the stock), \n# design an algorithm to find the maximum profit.\n\nclass Solution:\n # @param prices, a list of integer\n # @return an integer\n def maxProfit(self, prices):\n if prices == []: return 0\n maxpro = 0\n buy = [prices[0]]\n lowest = prices[0]\n for i in prices[1:]:\n if i < lowest:\n lowest = i\n maxpro = max(maxpro, i - lowest)\n return maxpro\n \nif __name__ == '__main__':\n prices = [10,3,5,6,10]\n test = Solution()\n out = test.maxProfit(prices) \n print out\n\n\n"
},
{
"alpha_fraction": 0.492946058511734,
"alphanum_fraction": 0.515352725982666,
"avg_line_length": 26.930233001708984,
"blob_id": "72084fac383ed93e8a1c4cb6df2daf0bbad3e176",
"content_id": "448cb2a100b241accf73964bc66b3e30460c3619",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1205,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 43,
"path": "/rmDuplicates_listII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a sorted linked list, delete all nodes that have duplicate numbers, \n# leaving only distinct numbers from the original list.\n\n# For example,\n# Given 1->2->3->3->4->4->5, return 1->2->5.\n# Given 1->1->1->2->3, return 2->3.\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n \nclass Solution:\n # @param head, a ListNode\n # @return a ListNode\n def deleteDuplicates(self, head): \n if head == None or head.next == None: return head \n out = ListNode(0)\n record = out\n count = 1\n \n while head != None:\n if head.next != None and head.val == head.next.val:\n count += 1\n elif count == 1:\n out.next = head\n out = out.next\n else: count = 1 \n head = head.next \n out.next = None\n \n return record.next\n \n \nif __name__ == '__main__':\n head = ListNode(1)\n head.next = ListNode(3)\n head.next.next = ListNode(3)\n #head.next.next.next = ListNode(4)\n #head.next.next.next.next = ListNode(4)\n \n test = Solution()\n out = test.deleteDuplicates(head)\n "
},
{
"alpha_fraction": 0.3971962630748749,
"alphanum_fraction": 0.4357476532459259,
"avg_line_length": 27.566667556762695,
"blob_id": "1804d9aeb279470d518921fe250f9790a4739a28",
"content_id": "2ac0db04ed20e0cf68988f9ab43593704be15103",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 856,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 30,
"path": "/combinations.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given two integers n and k, \n# return all possible combinations of k numbers out of 1 ... n.\n\n# For example,\n# If n = 4 and k = 2, a solution is:\n# [[2,4], [3,4], [2,3], [1,2], [1,3], [1,4]]\n\nclass Solution:\n # @return a list of lists of integers\n \n # comb(n, k) = comb(n-1, k) * (n-1)\n def combine(self, n, k): # find subsets of length k\n sets = range(1, n+1)\n c0 = [sets[0:k]]\n c1 = []\n \n for i in range(len(sets) - k):\n c1 = []\n for item in c0:\n for j in range(len(item)):\n new = sorted(item[0:j] + [sets[k+i]] + item[j+1:])\n if new not in c1:\n c1.append(new)\n c0 += c1 \n \n return c0\n \nif __name__ == '__main__':\n test = Solution()\n out = test.combine(5,2)"
},
{
"alpha_fraction": 0.4983801245689392,
"alphanum_fraction": 0.5194384455680847,
"avg_line_length": 28.725807189941406,
"blob_id": "5abbbb760ad5e07625f724b4bea148f3afcd16ef",
"content_id": "84c69cc027960d6bb207ef30c0c1e493af2c6499",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1852,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 62,
"path": "/wordSearch.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a 2D board and a word, find if the word exists in the grid.\n\n# The word can be constructed from letters of sequentially adjacent cell, \n# where \"adjacent\" cells are those horizontally or vertically neighboring. \n# The same letter cell may not be used more than once.\n\n# For example,\n# Given board =\n# [\"ABCE\",\n# \"SFCS\",\n# \"ADEE\"]\n# word = \"ABCCED\", -> returns true,\n# word = \"SEE\", -> returns true,\n# word = \"ABCB\", -> returns false.\n\ndef exist(board, word):\n pointer = 1\n \n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == word[0]:\n if len(word) == 1:\n return True\n if search(board, word, [i,j], pointer, [[i,j]]):\n return True\n return False\n\ndef search(board, word, loc, pointer, tracker): \n out = False\n directions = [[1,0], [-1,0], [0,1], [0,-1]] \n \n # boundary conditions\n if loc[0] == 0:\n directions.remove([-1,0])\n if loc[0] == len(board)-1:\n directions.remove([1,0])\n if loc[1] == 0:\n directions.remove([0,-1])\n if loc[1] == len(board[0]) - 1:\n directions.remove([0,1])\n \n for [dx, dy] in directions: \n newtracker = tracker\n newloc = [loc[0]+dx, loc[1]+dy] \n if newloc not in tracker and board[newloc[0]][newloc[1]] == word[pointer]: \n newtracker = tracker + [newloc]\n newpointer = pointer + 1 \n if newpointer == len(word):\n return True \n out = search(board, word, newloc, newpointer, newtracker) \n if out == True:\n break \n \n return out\n \nif __name__ == '__main__':\n board =[\"ABCE\",\"SFCS\",\"ADEE\"]\n word1 = \"ABCCED\"\n word2 = \"SEE\"\n word3 = \"ABCB\"\n \n out = exist(board, word3)\n \n "
},
{
"alpha_fraction": 0.4541284441947937,
"alphanum_fraction": 0.5,
"avg_line_length": 23.69811248779297,
"blob_id": "831face8ebbc56469bd1c0fc8d4d8afc2260af4e",
"content_id": "6956d5b6f80f239db69823dcf53ebced232a2efe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1308,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 53,
"path": "/addTwoNumbers.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# You are given two linked lists representing two non-negative numbers. \n# The digits are stored in reverse order \n# and each of their nodes contain a single digit. \n# Add the two numbers and return it as a linked list.\n\n# Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)\n# Output: 7 -> 0 -> 8\n\n#Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\ndef addTwoNumbers(l1, l2):\n l3 = ListNode(0)\n out = l3\n add = 0\n \n while l1 != None or l2 != None:\n if l1 == None:\n sumi = add + l2.val\n l2 = l2.next\n elif l2 == None:\n sumi = add + l1.val\n l1 = l1.next\n else: \n sumi = add + l1.val + l2.val\n l1, l2 = l1.next, l2.next\n \n if sumi >= 10:\n l3.next = ListNode(sumi%10)\n add = 1 \n else:\n l3.next = ListNode(sumi)\n add = 0\n l3 = l3.next\n \n if l1 == None and l2 == None and add == 1:\n l3.next = ListNode(1)\n \n return out.next\n \nif __name__ == '__main__':\n l1 = ListNode(9)\n l1.next = ListNode(4)\n l1.next.next = ListNode(3)\n \n l2 = ListNode(9)\n l2.next = ListNode(6)\n l2.next.next = ListNode(4)\n \n out = addTwoNumbers(l1, l2)"
},
{
"alpha_fraction": 0.5762355327606201,
"alphanum_fraction": 0.594111442565918,
"avg_line_length": 24.052631378173828,
"blob_id": "60a3d264cee99847c3839625481d43ad0b2a1c95",
"content_id": "6a4c1b9535917eabb0c4ed8676c404ca2ba537f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 951,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 38,
"path": "/sumNumbers.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a binary tree containing digits from 0-9 only, \n# each root-to-leaf path could represent a number.\n# An example is the root-to-leaf path 1->2->3 which represents the number 123.\n# Find the total sum of all root-to-leaf numbers.\n\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n \ndef sumNumbers(root): \n path, sumi = 0, 0\n return sumPath(root, sumi, path)\n \ndef sumPath(root, sumi, path): \n if root == None:\n return 0\n \n path = path*10 + root.val\n \n if root.left == None and root.right == None:\n return path\n \n sumleft = sumPath(root.left, sumi, path) \n sumright = sumPath(root.right, sumi, path)\n \n return sumleft + sumright\n \nif __name__ == '__main__':\n root = TreeNode(1)\n root.left = TreeNode(5)\n root.right = TreeNode(1)\n root.right.right = TreeNode(6)\n \nprint sumNumbers(root)"
},
{
"alpha_fraction": 0.5797356963157654,
"alphanum_fraction": 0.5947136282920837,
"avg_line_length": 26.0238094329834,
"blob_id": "cf3c505689119a17f0f2b2adc19c7ca83d29a8ce",
"content_id": "b1e3b21d8c78cb98adde6067f4c94ed5b5ff8402",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1135,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 42,
"path": "/maxPathSumII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a binary tree, find the maximum root-leaf path sum.\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return an integer\n def maxPathSum(self, root):\n if root == None: return 0\n self.max = 0\n self.dfs(root, 0)\n return self.max\n\n def dfs(self, root, sumi): \n if root == None: return 0\n sumi += root.val\n self.max = max(self.max, sumi)\n self.dfs(root.left, sumi)\n self.dfs(root.right, sumi)\n\nif __name__ == \"__main__\":\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(3)\n root.left.left = TreeNode(0)\n root.left.right = TreeNode(7)\n root.right.left = TreeNode(10)\n root.right.right = TreeNode(1)\n root.left.left.left = TreeNode(2)\n root.left.right.left = TreeNode(1)\n root.left.right.right = TreeNode(0)\n root.right.right.left = TreeNode(8)\n root.right.right.right = TreeNode(8)\n\n test = Solution()\n out = test.maxPathSum(root)\n print out\n"
},
{
"alpha_fraction": 0.528896689414978,
"alphanum_fraction": 0.5481611490249634,
"avg_line_length": 26.047618865966797,
"blob_id": "d9b3d9d4ddc409af775b9420a6dd4039b5486972",
"content_id": "402069d012328e9be30610c14530b8601e5e90a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 571,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 21,
"path": "/PascalTriangleII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an index k, return the kth row of the Pascal's triangle.\n# For example, given k = 3,\n# Return [1,3,3,1].\n# Note:\n# Could you optimize your algorithm to use only O(k) extra space?\n\nclass Solution:\n # @return a list of lists of integers\n def getRow(self, rowIndex):\n prev = []\n for i in range(rowIndex+1):\n curr = [1]*(i+1)\n for j in range(1,i):\n curr[j] = prev[j-1]+prev[j]\n prev = curr\n return curr\n\nif __name__ == '__main__':\n test = Solution()\n out = test.getRow(3)\n print out\n\n\n\n"
},
{
"alpha_fraction": 0.585539698600769,
"alphanum_fraction": 0.5865580439567566,
"avg_line_length": 29.15625,
"blob_id": "d09a7b6116811221681e5b3924246ce92113bfa2",
"content_id": "ecbd89622270434ea71926204f8e1e02f55cf3a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 982,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 32,
"path": "/rightSideView.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a binary tree, imagine yourself standing on the right side of it, \n# return the values of the nodes you can see ordered from top to bottom.\n\n# For example:\n# Given the following binary tree,\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a list of integers\n def rightSideView(self, root):\n if root == None: return []\n self.out = []\n self.find_right([root])\n return self.out\n def children(self, roots):\n children = []\n for r in roots:\n if r.left != None: children.append(r.left)\n if r.right != None: children.append(r.right)\n return children\n def find_right(self, roots):\n if roots == []: return\n self.out.append(roots[-1].val)\n roots = self.children(roots)\n self.find_right(roots)\n \n "
},
{
"alpha_fraction": 0.5389049053192139,
"alphanum_fraction": 0.5513929128646851,
"avg_line_length": 31.375,
"blob_id": "b6838f600a1709eda20589980554085e98b8e524",
"content_id": "6f077c8b251ebe7395a404fbe53f1bffcda7ca85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1041,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 32,
"path": "/copyListWithRandomPt.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# A linked list is given such that each node contains an additional random pointer \n# which could point to any node in the list or null.\n\n# Return a deep copy of the list.\n# Definition for singly-linked list with a random pointer.\nclass RandomListNode:\n def __init__(self, x):\n self.label = x\n self.next = None\n self.random = None\n\nclass Solution:\n # @param head, a RandomListNode\n # @return a RandomListNode\n def copyRandomList(self, head):\n index, pt = {}, {}\n i, j = 0, 0\n new = RandomListNode(0)\n anchor1, anchor2 = new, new\n new, anchor1 = new.next, new.next\n while head != None:\n new = RandomListNode(head.label)\n index[i] = head.random\n pt[head] = i\n newpt[i] = new \n new, head = new.next, head.next\n i += 1\n while anchor1 != None:\n anchor1.random = newpt[pt[index[j]]]\n anchor1 = anchor1.next\n j += 1\n return anchor2.next\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.45975443720817566,
"alphanum_fraction": 0.5259208679199219,
"avg_line_length": 32.69767379760742,
"blob_id": "87f3e4c8cbda2065c2c997e2fa3545782ba19da2",
"content_id": "477da24f6f5f6a0a0b98808c545f72c752bacb31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1466,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 43,
"path": "/NumMatrix2D.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a 2D matrix matrix, \n# find the sum of the elements inside the rectangle defined by its upper left corner (row1, col1) and lower right corner (row2, col2).\n\n# Range Sum Query 2D\n# The above rectangle (with the red border) is defined by (row1, col1) = (2, 1) and (row2, col2) = (4, 3), which contains sum = 8.\n\n# Example:\n# Given matrix = [\n# [3, 0, 1, 4, 2],\n# [5, 6, 3, 2, 1],\n# [1, 2, 0, 1, 5],\n# [4, 1, 0, 1, 7],\n# [1, 0, 3, 0, 5]\n# ]\n\n# sumRegion(2, 1, 4, 3) -> 8\n# sumRegion(1, 1, 2, 2) -> 11\n# sumRegion(1, 2, 2, 4) -> 12\n\nclass NumMatrix(object):\n def __init__(self, matrix):\n \"\"\"\n initialize your data structure here.\n :type matrix: List[List[int]]\n \"\"\"\n if len(matrix) == 0: m, n = 0, 0\n else: m, n = len(matrix), len(matrix[0])\n self.sums = [[0]*(n+1) for k in range(m+1)]\n for i in range(m):\n self.sums[i+1][1] = self.sums[i][1]+matrix[i][0]\n for j in range(1, n):\n self.sums[i+1][j+1] = self.sums[i+1][j]+self.sums[i][j+1]-self.sums[i][j]+matrix[i][j]\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\"\n sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.\n :type row1: int\n :type col1: int\n :type row2: int\n :type col2: int\n :rtype: int\n \"\"\"\n return self.sums[row2+1][col2+1]-self.sums[row1][col2+1]-self.sums[row2+1][col1]+self.sums[row1][col1]\n \n "
},
{
"alpha_fraction": 0.38786280155181885,
"alphanum_fraction": 0.4459103047847748,
"avg_line_length": 19.33333396911621,
"blob_id": "b8f1f646d649711df28897a58a7ad07c4ae4f556",
"content_id": "30fadb5f643df10a1e3c9a8dc3c08409bee22b73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 379,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 18,
"path": "/largest_K.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "import heapq\n\ndef heapSearch(lt, k):\n heap = []\n \n for i in lt:\n if len(heap) < k:\n heapq.heappush(heap, i)\n \n if heap[0] < i:\n heapq.heappop(heap)\n heapq.heappush(heap, i)\n \n return heap\n \nif __name__ == '__main__':\n lt = [1,3,2,4,6,4,7,8,56,23,54,32,11,99]\n print heapSearch(lt, 4)\n \n "
},
{
"alpha_fraction": 0.5222727060317993,
"alphanum_fraction": 0.5304545164108276,
"avg_line_length": 27.05128288269043,
"blob_id": "fab8100c6e5935dabede61c245441094fb2dbd01",
"content_id": "ef83affb84484426b9aae588ce08054d405f410f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2200,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 78,
"path": "/countNodes.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a complete binary tree, count the number of nodes.\n\n# Definition of a complete binary tree from Wikipedia:\n# In a complete binary tree every level, except possibly the last, \n# is completely filled, and all nodes in the last level are as far left as possible. \n# It can have between 1 and 2h nodes inclusive at the last level h.\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n# BFS, time limit exceed\nclass Solution1(object):\n def countNodes(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if root == None: return 0\n children = [root] \n level = 0\n nodes = 0\n \n while len(children) == 2**level:\n level += 1\n nodes += len(children)\n children = self.BFS(children)\n nodes += len(children)\n return nodes\n \n def BFS(self, roots):\n children = []\n for i in roots:\n if i.left != None:\n children.append(i.left)\n if i.right != None:\n children.append(i.right)\n return children\n\n# DFS, time limit exceed \nclass Solution2(object):\n def countNodes(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n self.count = 0\n self.DFS(root)\n return self.count\n \n def DFS(self, root):\n if root == None: return\n self.count += 1\n self.DFS(root.left)\n self.DFS(root.right)\n\nclass Solution3(object):\n def countNodes(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if root == None: return 0\n left = self.getHeight(root, 'L')\n right = self.getHeight(root, 'R')\n if left == right: return 2**left -1 \n else: return self.countNodes(root.left)+ self.countNodes(root.right) +1\n \n def getHeight(self, root, direction):\n h = 0\n while root != None:\n if direction == 'L': root = root.left\n else: root = root.right\n h += 1 \n return h\n "
},
{
"alpha_fraction": 0.5971074104309082,
"alphanum_fraction": 0.6177685856819153,
"avg_line_length": 31.266666412353516,
"blob_id": "271f1a11e2578c3aeccbcfe908b348af7adf0bb0",
"content_id": "ff7d40b56a63639fe4564a8fb9168f170ca92416",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 484,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 15,
"path": "/maxProduct.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Find the contiguous subarray within an array (containing at least one number) \n# which has the largest product.\n\n# For example, given the array [2,3,-2,4],\n# the contiguous subarray [2,3] has the largest product = 6.\n\nclass Solution:\n # @param A, a list of integers\n # @return an integer\n def maxProduct(self, A):\n product = A[0]\n maxproduct = A[0]\n for i in range(1, len(A)):\n product *= A[i]]\n maxproduct = max(maxproduct, new)\n"
},
{
"alpha_fraction": 0.30434781312942505,
"alphanum_fraction": 0.35731226205825806,
"avg_line_length": 26.55555534362793,
"blob_id": "57f760b4c3c134f6907442552b8e2cf36111b6b7",
"content_id": "ca055e28d1ac0436e600cd61d6e9a2ae45c412ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1265,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 45,
"path": "/spiral_print_matrix.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "def spiral(A):\n if A == []:\n return A\n \n row = len(A)\n col = len(A[0]) \n i, j, k = 0, 0, 0\n out = [] \n dx, dy = 0, 1 # direction vector\n \n for counter in range(row*col):\n \n if row - 2*k == 1:\n dx, dy = 0, 1\n elif col - 2*k == 1:\n dx, dy = 1, 0 \n # topright corner\n if i == k and j == col - 1 - k:\n dx, dy = 1, 0 \n # bottomright corner\n elif i == row - 1 - k and j == col -1 - k:\n dx, dy = 0, -1 \n # bottomleft corner\n elif i == row - 1 - k and j == k:\n dx, dy = -1, 0 \n # topleft corner, ATTENTION: different from other three!\n # Because it's not a circal but a spiral, not closed!\n elif i == k + 1 and j == k:\n dx, dy = 0, 1\n k = k+1\n \n print A[i][j] \n print 'direction', dx, dy\n out.append(A[i][j])\n i += dx\n j += dy\n \n \n return out\n \nif __name__ == '__main__':\n A = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]\n B= [[1],[2],[3]]\n C = [[1,2,3],[4,5,6],[7,8,9],[10,11,12],[13,14,15]]\n print spiral(B)\n \n \n "
},
{
"alpha_fraction": 0.498694509267807,
"alphanum_fraction": 0.5169712901115417,
"avg_line_length": 19.210525512695312,
"blob_id": "d531e3c777edd00fc1067a69081f1dcff701aa0e",
"content_id": "0ffea08660e703ebbf790d7e923b4b46383434d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 383,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 19,
"path": "/Palindrome.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "def isPalindrome(s):\n table = 'abcdefghijklmnopqrstuvwxyz'\n\n lt = []\n for i in s.lower() :\n if i in table or i.isdigit():\n lt.append(i)\n \n if lt == lt[::-1]:\n return True\n else:\n return False\n \nif __name__ == '__main__':\n s1 = \"A man, a plan, a canal: Panama\"\n s2 = \"1a2\"\n \n print isPalindrome(s1)\n print isPalindrome(s2)"
},
{
"alpha_fraction": 0.48181816935539246,
"alphanum_fraction": 0.4939393997192383,
"avg_line_length": 29,
"blob_id": "8b2f552f69e7b98942dd07177fd3314c7db1834f",
"content_id": "63e5aacd53b111d22542dd1bbaf3c31746c182fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 330,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 11,
"path": "/singleNumII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param A, a list of integer\n # @return an integer\n def singleNumber(self, A):\n if A == []: return\n record = {}\n for i in A:\n if i not in record: record[i] = 1\n elif record[i] < 2: record[i] += 1\n else: record.pop(i)\n return record.keys()[0]\n"
},
{
"alpha_fraction": 0.6013745665550232,
"alphanum_fraction": 0.6036655306816101,
"avg_line_length": 27.933332443237305,
"blob_id": "bdf977be3429e0bbab085d596dd412106ad3c175",
"content_id": "567f419cec41fc8f62b30ba5cd53c553d48c67c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 873,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 30,
"path": "/sameTree.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given two binary trees, write a function to check if they are equal or not.\n\n# Two binary trees are considered equal \n# if they are structurally identical and the nodes have the same value.\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param p, a tree node\n # @param q, a tree node\n # @return a boolean\n def isSameTree(self, p, q):\n if p == None and q == None: return True\n if p == None or q == None: return False\n if p.val != q.val: return False\n left = self.isSameTree(p.left, q.left)\n right = self.isSameTree(p.right, q.right)\n return left and right\n\nif __name__ == '__main__':\n p = TreeNode(0)\n q = TreeNode(0)\n test = Solution()\n out = test.isSameTree(p, q)\n print out \n\n\n\n\n"
},
{
"alpha_fraction": 0.5149863958358765,
"alphanum_fraction": 0.5299727320671082,
"avg_line_length": 28,
"blob_id": "b8ca5bd0f77e07ed3f18c386be8c4e91c42127eb",
"content_id": "79fe9656c24383e798b53712b6ccf21573c358bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 738,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 25,
"path": "/majorityElement.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#Given an array of size n, find the majority element. \n#The majority element is the element that appears more than ⌊ n/2 ⌋ times.\n\n#You may assume that the array is non-empty and the majority element always exist in the array.\n\nclass Solution:\n # @param {integer[]} nums\n # @return {integer}\n def majorityElement(self, nums):\n dic = {}\n rec = 0\n out = None\n for i in nums:\n if i not in dic:\n dic[i] = 1\n else: dic[i] += 1\n if dic[i] > rec: out = i\n rec = max(rec, dic[i])\n return out\n \nif __name__ == '__main__':\n nums = [1,2,1,2,4,2]\n x = Solution()\n print x.majorityElement(nums)\n \n "
},
{
"alpha_fraction": 0.47104519605636597,
"alphanum_fraction": 0.49858757853507996,
"avg_line_length": 34.275001525878906,
"blob_id": "a0eb5ccfdb4ccac888a141f5956db76009f45748",
"content_id": "4a9939938481da72c63801d4fbafde1b5341b0b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1416,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 40,
"path": "/searchRotatedArray.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Suppose a sorted array is rotated at some pivot unknown to you beforehand.\n# (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).\n\n# You are given a target value to search. \n# If found in the array return its index, otherwise return -1.\n\n# You may assume no duplicate exists in the array.\n\nclass Solution:\n # @param A, a list of integers\n # @param target, an integer to be searched\n # @return an integer\n def search(self, A, target): \n l = len(A)\n median = l//2\n \n if l == 0: return -1 \n if target == A[0]: return 0 \n if target == A[median]: return median\n \n if target > A[0]: # in the large part \n if target < A[median] or A[0] > A[median]:\n index = self.search(A[:median], target)\n else: \n shift = self.search(A[median+1:], target)\n index = -1 if shift == -1 else median + shift + 1 \n else: # in the small part\n if target > A[median] or A[-1] < A[median]:\n shift = self.search(A[median+1:], target)\n index = -1 if shift == -1 else median + shift + 1\n else:\n index = self.search(A[:median], target)\n \n return index\n \n \nif __name__ == '__main__':\n A = [4,5,6,8,0,1,2]\n test = Solution()\n out = test.search(A, 7) \n "
},
{
"alpha_fraction": 0.4444444477558136,
"alphanum_fraction": 0.4915824830532074,
"avg_line_length": 26,
"blob_id": "b7a538ddfbce1f96b1a309b738995267eecfdeec",
"content_id": "cdd472aca44b606e0123ffb097db92e12dfd97e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 891,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 33,
"path": "/countAndSay.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# The count-and-say sequence is the sequence of integers beginning as follows:\n# 1, 11, 21, 1211, 111221, ...\n\n# 1 is read off as \"one 1\" or 11.\n# 11 is read off as \"two 1s\" or 21.\n# 21 is read off as \"one 2, then one 1\" or 1211.\n# Given an integer n, generate the nth sequence.\n\n# Note: The sequence of integers will be represented as a string.\n\nclass Solution:\n # @return a string\n def countAndSay(self, n):\n now = '1'\n for k in range(1, n):\n out = ''\n i = 0\n while i < len(now):\n say = now[i]\n count = 1\n while i < len(now) -1 and now[i] == now[i+1]:\n count += 1\n i += 1\n out += str(count)+say\n i += 1\n now = out\n\n return now\n\nif __name__ == '__main__':\n n = 5\n test = Solution()\n out = test.countAndSay(n)\n"
},
{
"alpha_fraction": 0.6396760940551758,
"alphanum_fraction": 0.6558704376220703,
"avg_line_length": 32,
"blob_id": "866dc43a6b37e809a06dcb5a708da17b26261e5b",
"content_id": "e19ddc6cd9afb67d5fa67cba154e01ea0464b41b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 494,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 15,
"path": "/missingNumber.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Given an array containing n distinct numbers taken from 0, 1, 2, ..., n, find the one that is missing from the array.\n\n#For example,\n#Given nums = [0, 1, 3] return 2.\n\n#Note:\n#Your algorithm should run in linear runtime complexity. Could you implement it using only constant extra space complexity?\n\nclass Solution(object):\n def missingNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n return list(set(range(len(nums)+1))-set(nums)).pop()"
},
{
"alpha_fraction": 0.4153284728527069,
"alphanum_fraction": 0.48686131834983826,
"avg_line_length": 28.782608032226562,
"blob_id": "8624a26d040dc83d2f3688f25f55edf0b54d4eb8",
"content_id": "dd4ee9f345b9198c32e2b5dc22e147643707aec9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1370,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 46,
"path": "/interLeave.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given s1, s2, s3, \n# find whether s3 is formed by the interleaving of s1 and s2.\n\n# For example,\n# Given:\n# s1 = \"aabcc\",\n# s2 = \"dbbca\",\n \n# When s3 = \"aadbbcbcac\", return true.\n# When s3 = \"aadbbbaccc\", return false.\n\n# results[k][i][j]:\n# k,i,j are the length of substr (start from 0) of s3, s1, s2\n\nclass Solution:\n # @return a boolean\n def isInterleave(self, s1, s2, s3):\n l1, l2, l3 = len(s1), len(s2), len(s3)\n if l3 != l1+l2: return False\n if l3 == 0: return True\n if l1 == 0: return s3 == s2\n if l2 == 0: return s3 == s1\n \n results = [[[False]*(l2+1) for i in range(l1+1)] for k in range(l3+1)] \n results[0][0][0] = True\n results[1][1][0], results[1][0][1] = s3[0]==s1[0], s3[0]==s2[0]\n\n for k in range(2, l3+1):\n for i in range(max(0, k-l2), min(l1+1, k+1)):\n j = k - i \n results[k][i][j] = (i==0 and s3[:j]==s2[:j]) or (results[k-1][i-1][j] and s3[k-1]==s1[i-1]) or\\\n (j==0 and s3[:i]==s1[:i]) or (results[k-1][i][j-1] and s3[k-1]==s2[j-1])\n return results[l3][l1][l2]\n\nif __name__ == '__main__':\n s1 = \"aabcc\"\n s2 = \"dbbca\"\n s3 = \"aadbbcbcac\"\n s4 = 'aadbbbaccc'\n\n t1 = 'a'\n t2 = 'b'\n t3 = 'aa'\n test = Solution()\n out = test.isInterleave(s1, s2, s4)\n print out\n"
},
{
"alpha_fraction": 0.4468384087085724,
"alphanum_fraction": 0.4533957839012146,
"avg_line_length": 35.186439514160156,
"blob_id": "874641ec7672d6f7bb7aa32c0486554caf1aada1",
"content_id": "0504d65a43911a82d319e6d3b4240bd176204fff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2135,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 59,
"path": "/substrWithCat.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# You are given a string, S, and a list of words, L, that are all of the same length. \n# Find all starting indices of substring(s) in S that is a concatenation of each word in L \n# exactly once and without any intervening characters.\n\n# For example, given:\n# S: \"barfoothefoobarman\"\n# L: [\"foo\", \"bar\"]\n# You should return the indices: [0,9].\n# (order does not matter).\n\n# exceed time limit\nclass Solution:\n # @param S, a string\n # @param L, a list of string\n # @return a list of integer\n def findSubstring(self, S, L):\n if L == [] or len(S) < len(L[0])*len(L): return []\n hashmap = {}\n for i in L:\n if i not in hashmap: hashmap[i] = 1\n else: hashmap[i] += 1\n unit = len(L[0])\n index = []\n for begin in range(unit):\n i = begin\n while i < len(S):\n match = {}\n start = i\n found = 0\n while i+unit-1<len(S) and S[i:i+unit] in hashmap:\n if S[i:i+unit] not in match:\n match[S[i:i+unit]] = [i]\n else:\n match[S[i:i+unit]].append(i)\n if len(match[S[i:i+unit]]) == hashmap[S[i:i+unit]]:\n found += 1 \n elif len(match[S[i:i+unit]]) > hashmap[S[i:i+unit]]:\n newstart = match[S[i:i+unit]][0]+unit\n for j in range(start, newstart, unit):\n if len(match[S[j:j+unit]]) == hashmap[S[j:j+unit]]:\n found -= 1 \n match[S[j:j+unit]].pop(0)\n start = newstart\n if found == len(hashmap): \n index.append(start)\n match[S[start:start+unit]].pop(0)\n found -= 1\n start += unit \n i += unit\n i += unit\n return sorted(index)\n\nif __name__ == '__main__':\n S = \"barfoothefoobarman\"\n L = [\"foo\", \"bar\"]\n\n test = Solution()\n out = test.findSubstring('abababab',['a','b','a'])\n print out\n"
},
{
"alpha_fraction": 0.5042158365249634,
"alphanum_fraction": 0.5092748999595642,
"avg_line_length": 22.625,
"blob_id": "a7f1fb24c549ebd0571b6d2a3041bbbddf067361",
"content_id": "278d1b3123b50f1e5b1586326a9b2f66f573b99f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 593,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 24,
"path": "/hashtable.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# simplest hashtable, cannot deal with collisions\n\nclass KeyValue:\n def __init__(self, key, value):\n self.key = key\n self.value = value\n \n\nclass HashTable:\n size = 20\n def __init__(self):\n self.list = [0] * self.size\n \n def hash(self, key):\n index = key % self.size\n return index\n \n def set(self, key, value):\n index = self.hash(key) \n self.list[index] = KeyValue(key, value)\n \n def get(self, key):\n index = self.hash(key)\n return self.list[index].value\n \n \n "
},
{
"alpha_fraction": 0.37685060501098633,
"alphanum_fraction": 0.41722744703292847,
"avg_line_length": 25.464284896850586,
"blob_id": "853f8ae7e00ff2755cd07eddc423f7a8be1dbc12",
"content_id": "3c246f3123e8c0a75d6131162f8ad67195059092",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 743,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 28,
"path": "/uniqueBST.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given n, how many structurally unique BST's (binary search trees) \n# that store values 1...n?\n\n# For example,\n# Given n = 3, there are a total of 5 unique BST's.\n\n# 1 3 3 2 1\n# \\ / / / \\ \\\n# 3 2 1 1 3 2\n# / / \\ \\\n# 2 1 2 3\n\n# exceed time limit\nclass Solution:\n # @return an integer\n def numTrees(self, n):\n if n <= 1: return 1\n num = [0]*(n+1)\n num[0], num[1] = 1,1\n for i in range(2,n+1):\n for j in range(i):\n num[i] += num[j]*num[i-j-1]\n return num[n]\n\nif __name__ == '__main__':\n test = Solution()\n out = test.numTrees(5)\n print out\n\n\n"
},
{
"alpha_fraction": 0.6519052386283875,
"alphanum_fraction": 0.6776518821716309,
"avg_line_length": 37.880001068115234,
"blob_id": "940e20d24076d09b6652f3b60eed8511fdc27665",
"content_id": "1bf7fd4b6b0d33a20d2052fa15c08c7bd17b36f7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 971,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 25,
"path": "/robber.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#House Robber Total Accepted: 8708 Total Submissions: 30753\n#You are a professional robber planning to rob houses along a street. \n#Each house has a certain amount of money stashed, \n#the only constraint stopping you from robbing each of them is that \n#adjacent houses have security system connected and it will automatically contact the police \n#if two adjacent houses were broken into on the same night.\n\n#Given a list of non-negative integers representing the amount of money of each house, \n#determine the maximum amount of money you can rob tonight without alerting the police.\n\nclass Solution:\n # @param num, a list of integer\n # @return an integer\n def rob(self, num):\n sumi1, sumi2= 0, 0\n for i in range(len(num)):\n temp = sumi2\n sumi2 = sumi1 + num[i]\n sumi1 = max(temp, sumi1)\n return max(sumi1, sumi2)\n \nif __name__ == '__main__':\n num = [2,1,1,2,2]\n x = Solution() \n print x.rob(num)"
},
{
"alpha_fraction": 0.5066518783569336,
"alphanum_fraction": 0.5077605247497559,
"avg_line_length": 29.100000381469727,
"blob_id": "0db8448a1fcbb615f4515eb7579ad5638ba6d807",
"content_id": "f0304563e90eddde69cf1cc0ea124633c9eae07f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 902,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 30,
"path": "/insertionSortList.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Sort a linked list using insertion sort.\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def insertionSortList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n newhead = ListNode(0)\n newhead.next = head \n temphead = newhead\n \n while head != None and head.next != None and temphead != None:\n if temphead.next == head.next:\n head = head.next \n temphead = newhead \n elif head.next.val < temphead.next.val:\n temp = head.next\n head.next = head.next.next\n temphead.next = temp\n temp.next = head \n else: temphead = temphead.next\n \n return newhead.next"
},
{
"alpha_fraction": 0.5438749194145203,
"alphanum_fraction": 0.5690703988075256,
"avg_line_length": 26.975608825683594,
"blob_id": "b0cb1ae7c3ceeaceb7286dbe568f4459ea64a5d1",
"content_id": "885ebe9ae5186a6185f514b425630b6fd2450c96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1151,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 41,
"path": "/removeNthNode.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a linked list, remove the nth node from the end of list and return its head.\n# For example,\n# Given linked list: 1->2->3->4->5, and n = 2.\n# After removing the second node from the end, the linked list becomes 1->2->3->5.\n# Note:\n# Given n will always be valid.\n# Try to do this in one pass.\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @return a ListNode\n def removeNthFromEnd(self, head, n):\n head0 = ListNode(0)\n head1 = ListNode(0)\n head0.next = head\n head1.next = head0\n \n for i in range(n-1):\n head = head.next\n \n while head.next != None:\n head = head.next\n head0 = head0.next\n \n head0.next = head0.next.next \n return head1.next.next\n\nif __name__ == '__main__':\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(3)\n head.next.next.next = ListNode(4)\n head.next.next.next.next = ListNode(5)\n \n test = Solution()\n out = test.removeNthFromEnd(head, 2)\n "
},
{
"alpha_fraction": 0.2907530963420868,
"alphanum_fraction": 0.3212583363056183,
"avg_line_length": 26.263158798217773,
"blob_id": "9cd65d3b4815d4266d0d0d95415f97cba8eba719",
"content_id": "39df9a27ae1d05c8a7c4cec4e771b4ee27e9a8a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1049,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 38,
"path": "/addBinary.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given two binary strings, \n# return their sum (also a binary string).\n\n# For example,\n# a = \"11\", b = \"1\"\n# Return \"100\".\n\nclass Solution:\n # @param a, a string\n # @param b, a string\n # @return a string\n def addBinary(self, a, b):\n if a == '' and b == '': return ''\n c = ''\n add = '0'\n \n while a != '' or b != '': \n if (a == '' or a[-1] == '0') and (b == '' or b[-1] == '0'):\n c += add \n add = '0'\n elif a != '' and a[-1] == '1' and b != '' and b[-1] == '1':\n c += add\n add = '1'\n elif add == '1':\n c += '0'\n else:\n c += '1' \n a = a[:-1] if a != '' else ''\n b = b[:-1] if b != '' else '' \n \n c = c + add if add == '1' else c \n return c[::-1]\n \nif __name__ == '__main__':\n test = Solution()\n a = '1010'\n b = '1011'\n out = test.addBinary(a,b)\n \n "
},
{
"alpha_fraction": 0.4988308548927307,
"alphanum_fraction": 0.517536997795105,
"avg_line_length": 23.66666603088379,
"blob_id": "cfd51e497c5e05a86e3138c9b17453667a8db489",
"content_id": "b1393e6d60a21f8685a91e1a196b977b464a9537",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1283,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 51,
"path": "/reverseListII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Reverse a linked list from position m to n. Do it in-place and in one-pass.\n\n# For example:\n# Given 1->2->3->4->5->NULL, m = 2 and n = 4,\n\n# return 1->4->3->2->5->NULL.\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @param m, an integer\n # @param n, an integer\n # @return a ListNode\n def reverseBetween(self, head, m, n):\n new, left = head, head\n if m == n: return head\n count = 1\n while count < m:\n if count < m-1: left = left.next\n head = head.next\n count += 1\n \n tail = head\n while count < n:\n temp = tail.next\n tail.next = temp.next\n temp.next = head\n head = temp\n count += 1 \n if m != 1: \n left.next = head\n return new\n else: return head\n\nif __name__ == '__main__':\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(3)\n head.next.next.next = ListNode(4)\n head.next.next.next.next = ListNode(5)\n\n test = Solution()\n out = test.reverseBetween(head, 1, 5)\n while out != None:\n print out.val\n out = out.next\n\n \n \n"
},
{
"alpha_fraction": 0.5363825559616089,
"alphanum_fraction": 0.542619526386261,
"avg_line_length": 34.55555725097656,
"blob_id": "0456524325a82912004305ded334662e51357dac",
"content_id": "11234c8a32387ed303b1c504bf2eb57d316de634",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 962,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 27,
"path": "/longestValidParenthese.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a string containing just the characters '(' and ')', \n# find the length of the longest valid (well-formed) parentheses substring.\n\n# For \"(()\", the longest valid parentheses substring is \"()\", which has length = 2.\n# Another example is \")()())\", where the longest valid parentheses substring is \"()()\", which has length = 4.\n\nclass Solution:\n # @param s, a string\n # @return an integer\n def longestValidParentheses(self, s):\n maxnum = 0\n tomatch = []\n for i in range(len(s)):\n if s[i] == '(': tomatch.append(i)\n else:\n if tomatch != [] and s[tomatch[-1]] == '(':\n tomatch.pop()\n temp = tomatch[-1] if tomatch != [] else -1 \n maxnum = max(maxnum, i-temp)\n else: tomatch.append(i)\n return maxnum\n\nif __name__ == '__main__':\n s = \")()())\" \n test = Solution()\n out = test.longestValidParentheses(s)\n print out\n\n\n"
},
{
"alpha_fraction": 0.5219435691833496,
"alphanum_fraction": 0.5909090638160706,
"avg_line_length": 28.045454025268555,
"blob_id": "0a32f715e1858175985f391e67a08c6c69d826ba",
"content_id": "16d00e492130b2813387cc69667ada000080d47c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 642,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 22,
"path": "/hamming.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n#Write a function that takes an unsigned integer and returns the number of ’1' bits it has \n#(also known as the Hamming weight).\n\n#For example, the 32-bit integer ’11' has binary representation \n#00000000000000000000000000001011, so the function should return 3.\n\nclass Solution:\n # @param n, an integer\n # @return an integer\n def hammingWeight(self, n):\n count = []\n res = abs(n)\n while res > 0:\n count.append(res%2)\n res = res/2 \n return sum(count) \n \n \nif __name__ == '__main__':\n x = Solution()\n print x.hammingWeight(11)"
},
{
"alpha_fraction": 0.5207692384719849,
"alphanum_fraction": 0.5476922988891602,
"avg_line_length": 29.9761905670166,
"blob_id": "f41b41bdb39a73a2a748107e933d170dba5f3b56",
"content_id": "69c7d4996edc566924816d445f3c248b56a9e80b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1300,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 42,
"path": "/insertInterval.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a set of non-overlapping intervals, \n# insert a new interval into the intervals (merge if necessary).\n# You may assume that the intervals were initially sorted according to their start times.\n\nclass Interval:\n def __init__(self, s=0, e=0):\n self.start = s\n self.end = e\n \ndef insert(intervals, newInterval):\n temp = []\n loc = 0\n for i in intervals: \n if newInterval.start > i.start:\n loc += 1 \n if overlap(i, newInterval):\n newInterval = \\\n Interval(min(i.start, newInterval.start), max(i.end, newInterval.end)) \n temp.append(i) \n \n intervals.insert(loc, newInterval) \n \n if temp != []:\n for i in temp:\n intervals.remove(i) \n return intervals\n \ndef overlap(int1, int2):\n if (int2.start >= int1.start and int2.start <= int1.end) or \\\n (int1.start >= int2.start and int1.start <= int2.end):\n return True\n else: \n return False\n \nif __name__ == '__main__':\n intervals = [Interval(1,2), Interval(3,5), Interval(6,7), Interval(8,10),\\\n Interval(12, 16)]\n newInterval = Interval(4,9)\n\n ints2 = [Interval(1,5)]\n new = Interval(5,7)\n insert(ints2, new)"
},
{
"alpha_fraction": 0.4606451690196991,
"alphanum_fraction": 0.5948387384414673,
"avg_line_length": 28.384614944458008,
"blob_id": "fa02b2e6306428e0c70a8263057c920cff21e087",
"content_id": "2e79ed485c9e68f8ed384f5e3643b95f4c667277",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 775,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 26,
"path": "/reverseBits.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Reverse bits of a given 32 bits unsigned integer.\n\n#For example, given input 43261596 (represented in binary as 00000010100101000001111010011100), \n#return 964176192 (represented in binary as 00111001011110000010100101000000).\n\n#Follow up:\n#If this function is called many times, how would you optimize it? \n\nclass Solution:\n # @param n, an integer\n # @return an integer\n def reverseBits(self, n):\n count = []\n res = abs(n)\n new = 0\n while res > 0:\n count.append(res%2)\n res = res/2 \n count = count+[0]*(32-len(count))\n for i in range(32):\n if count[i] == 1: new += 2**(31-i) \n return new\n\nif __name__ == '__main__':\n x = Solution()\n print x.reverseBits(43261596) "
},
{
"alpha_fraction": 0.5169082283973694,
"alphanum_fraction": 0.5472739934921265,
"avg_line_length": 31.200000762939453,
"blob_id": "755ae54a3334d0fe4e2ae38037af6de6491ac0aa",
"content_id": "5fca59e1ab02fb9d533a8d921517d43ac913f83d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1449,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 45,
"path": "/nextPermutation.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Implement next permutation, \n# which rearranges numbers into the lexicographically next greater permutation of numbers.\n\n# If such arrangement is not possible, \n# it must rearrange it as the lowest possible order (ie, sorted in ascending order).\n\n# The replacement must be in-place, do not allocate extra memory.\n\n# Here are some examples. \n# Inputs are in the left-hand column and its corresponding outputs are in the right-hand column.\n# 1,2,3 -> 1,3,2\n# 3,2,1 -> 1,2,3\n# 1,1,5 -> 1,5,1\n\n# Follow instructions online, don't quite understand\nclass Solution:\n # @param num, a list of integer\n # @return a list of integer\n def nextPermutation(self, num):\n if num == []: return []\n l = len(num)\n exist1, exist2 = False, False\n for i in range(l-1, 0, -1):\n if num[i] > num[i-1]: \n exist1 = True\n break \n if exist1:\n for j in range(l-1, i-1, -1):\n if num[j] > num[i-1]: \n exist2 = True\n break\n if exist1 and exist2:\n num[i-1], num[j] = num[j], num[i-1]\n for k in range((l-i)/2):\n num[i+k], num[l-1-k] = num[l-1-k], num[i+k]\n else:\n for k in range(l/2):\n num[k], num[l-1-k] = num[l-1-k], num[k]\n return num\n\nif __name__ == '__main__':\n num = [3,2,1]\n test = Solution()\n out = test.nextPermutation(num)\n print out\n"
},
{
"alpha_fraction": 0.40500569343566895,
"alphanum_fraction": 0.44482365250587463,
"avg_line_length": 24.114286422729492,
"blob_id": "e712cf0b02d990df796104d04f7f2d7386ab5a15",
"content_id": "f22c43595ca1aafdd8bc6d95f59e0545ec6b8f29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 879,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 35,
"path": "/sprialMatrixII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an integer n, generate a square matrix filled with elements from 1 to n2 in spiral order.\n\n# For example,\n# Given n = 3,\n\n# You should return the following matrix:\n# [\n# [ 1, 2, 3 ],\n# [ 8, 9, 4 ],\n# [ 7, 6, 5 ]\n# ]\n\nclass Solution:\n # @return a list of lists of integer\n def generateMatrix(self, n):\n x, y = 0, 0\n dx, dy = 0, 1 # dx in row, dy in col\n k = 0\n A = [[0]*n for i in range(n)]\n for i in range(1, n*n+1):\n A[x][y] = i\n if x == k and y == n-1-k: dx, dy = 1, 0\n elif x == n-1-k and y == n-1-k: dx, dy = 0, -1\n elif x == n-1-k and y == k: dx, dy = -1, 0\n elif x == k+1 and y == k: \n k += 1\n dx, dy = 0, 1\n x += dx\n y += dy\n\n return A\n\nif __name__ == '__main__':\n test = Solution()\n out = test.generateMatrix(4)\n"
},
{
"alpha_fraction": 0.5042613744735718,
"alphanum_fraction": 0.5085227489471436,
"avg_line_length": 26.60784339904785,
"blob_id": "fbdcd65de1821dd59c69a731b66e49d805fca9f9",
"content_id": "0e500a18aa73aae68bc39b9cc8a755be5df02cd1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1408,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 51,
"path": "/minWindowSubstrII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Gven a string S and a string T, \n# find the minimum window in S which will contain all the characters in T in complexity O(n).\n\n# For example,\n# S = \"ADOBECODEBANC\"\n# T = \"ABC\"\n# Minimum window is \"BANC\".\n\n# Note:\n# If there is no such window in S that covers all characters in T, \n# return the emtpy string \"\".\n\n# If there are multiple such windows, \n# you are guaranteed that there will always be only one unique minimum window in S.\n\n#this version ignore duplicates in T\nclass Solution:\n # @return a string\n def minWindow(self, S, T):\n if S == '' or T == '': return ''\n uniqT = {}\n for i in range(len(T)): uniqT[T[i]] = i\n start = 0\n minLen = float('inf')\n minWin = ''\n\n while start < len(S):\n i = start\n window = {}\n res = len(uniqT)\n while i < len(S) and res != 0:\n if S[i] in uniqT and S[i] not in window: res -= 1\n window[S[i]] = i\n i += 1\n if res != 0: break\n for j in range(start, i):\n if S[j] in uniqT and window[S[j]] == j: break\n if i-j < minLen:\n minWin = S[j:i]\n minLen = i-j\n start = j+1\n \n return minWin\n\nif __name__ == '__main__':\n S = \"a\"\n T = \"aa\"\n print 'original', S\n test = Solution()\n out = test.minWindow(S, T)\n print out\n"
},
{
"alpha_fraction": 0.4566473960876465,
"alphanum_fraction": 0.46820810437202454,
"avg_line_length": 28.689655303955078,
"blob_id": "6cfa01c0a474ee415dd5d5ca53bc228aa5b1fe42",
"content_id": "4aa0e55601e4988f14e26bcc3d53a1c65ac583bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 865,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 29,
"path": "/generateParentheses.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given n pairs of parentheses, \n# write a function to generate all combinations of well-formed parentheses.\n\n# For example, given n = 3, a solution set is:\n# \"((()))\", \"(()())\", \"(())()\", \"()(())\", \"()()()\"\n\ndef generateParenthesis(n):\n root = ['()'] \n for count in range(n-1):\n new = []\n for i in root:\n new += addOneParenthesis(i)\n root = list(set(new))\n return root\n \ndef addOneParenthesis(old):\n new = [] \n for i in range(len(old)-1):\n if old[i] == '(' and old[i+1] == ')':\n leftchild = old[0:i] + '()()' + old[i+2:]\n rightchild = old[0:i] + '(())' + old[i+2:]\n new.append(leftchild)\n new.append(rightchild) \n return new \n \nif __name__ == '__main__':\n root = ['()']\n count = 0 \n out = generateParenthesis(4)\n "
},
{
"alpha_fraction": 0.5570032596588135,
"alphanum_fraction": 0.5635179281234741,
"avg_line_length": 24.41666603088379,
"blob_id": "2e13623b303656db363236d7b6e4d87597b562a0",
"content_id": "6fef729694b9a1005a54dfe9789c6cf967b1b17f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 614,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 24,
"path": "/rmDuplicates_list.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a sorted linked list, \n# delete all duplicates such that each element appear only once.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\ndef deleteDuplicates(head):\n new = ListNode(0)\n new.next = head\n \n while head != None and head.next != None:\n if head.val == head.next.val:\n head.next = head.next.next \n else:\n head = head.next\n \n return new.next\n \nif __name__ == '__main__':\n head = ListNode(1)\n head.next = ListNode(1)\n head.next.next = ListNode(1)\n \n out = deleteDuplicates(head)\n "
},
{
"alpha_fraction": 0.3933601677417755,
"alphanum_fraction": 0.42756539583206177,
"avg_line_length": 31.064516067504883,
"blob_id": "389c8e80bdb5bd321b55c6b4f695c888228e1b83",
"content_id": "b940baabc6d218bb1483c627882c3a479ffb9e0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 994,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 31,
"path": "/permutationsII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a collection of numbers, return all unique permutations.\n# For example,\n# [1,1,2] have the following unique permutations:\n# [1,1,2], [1,2,1], and [2,1,1].\n\nclass Solution:\n # @param num, a list of integer\n # @return a list of lists of integers\n \n # per(1) = 1\n # per(n) = per(n-1)*n \n def permuteUnique(self, num):\n p0 = [[num[0]]] # permutations at step n-1\n for i in range(1, len(num)):\n p1 = []\n for item in p0: \n j = 0 \n while j <= len(item):\n if j < len(item) and item[j] == num[i]: \n j += 1\n continue\n new = item[0:j] + [num[i]] + item[j:]\n if new not in p1: p1.append(new)\n j += 1 \n p0 = p1\n return p0\n\nif __name__ == '__main__':\n test = Solution()\n out = test.permuteUnique([1,1,2,2])\n print out\n"
},
{
"alpha_fraction": 0.5139887928962708,
"alphanum_fraction": 0.525979220867157,
"avg_line_length": 23.076923370361328,
"blob_id": "8385cfa7adeb8265e4422179a236f734d05ccda0",
"content_id": "62c77aa730c4b62bd216aac0331002f472c08495",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1251,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 52,
"path": "/word_ladder.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given two words (start and end), and a dictionary, \n# find the length of shortest transformation sequence from start to end, \n# such that:\n# 1.Only one letter can be changed at a time\n# 2.Each intermediate word must exist in the dictionary\n\n# @param start, a string\n# @param end, a string\n# @param d, a set of string\n# @return an integer\n \ndef ladderLength(start, end, d):\n length = 1\n ladder = [start]\n \n while ladder != []:\n children = []\n \n for word in ladder:\n children += nextLevel(word, d)\n \n for word in children:\n if compareString(word, end):\n return length + 2\n \n length += 1\n ladder = children\n \n return 0\n \ndef nextLevel(word, d):\n temp = []\n for item in d:\n if compareString(word, item):\n temp.append(item)\n d.remove(item)\n return temp\n \ndef compareString(s1, s2):\n count = 0\n for i in range(len(s1)):\n if s1[i] != s2[i]:\n count += 1\n if count > 1:\n return False\n \n if count == 1:\n return True\n \nif __name__ == '__main__':\n d = ['hot', 'dot', 'dog', 'lot', 'log']\n print ladderLength('hit', 'cog', d)"
},
{
"alpha_fraction": 0.5489361882209778,
"alphanum_fraction": 0.5787234306335449,
"avg_line_length": 28.41666603088379,
"blob_id": "590dd0764a38416d20823853e254f2254634e4cf",
"content_id": "94e6f717eab08d76a29da9d689415d96427d5555",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 705,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 24,
"path": "/rotate.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Rotate an array of n elements to the right by k steps.\n\n#For example, with n = 7 and k = 3, the array [1,2,3,4,5,6,7] is rotated to [5,6,7,1,2,3,4].\n\n#Note:\n#Try to come up as many solutions as you can, there are at least 3 different ways to solve this problem. \n\nclass Solution:\n # @param nums, a list of integer\n # @param k, num of steps\n # @return nothing, please modify the nums list in-place.\n def rotate(self, nums, k):\n k = k%len(nums)\n if k == 0: return \n temp = nums[-k:]\n nums[k:] = nums[:len(nums)-k]\n nums[:k] = temp\n \nif __name__ == '__main__':\n nums = range(1,8)\n print nums\n x = Solution()\n x.rotate(nums, 3)\n print nums"
},
{
"alpha_fraction": 0.4991735517978668,
"alphanum_fraction": 0.5305784940719604,
"avg_line_length": 24.25,
"blob_id": "72cf8ed760f7c0fa454e34d275b07d5bc6015ba4",
"content_id": "f10ded2265c5925bef30b1640e536f794f7ce26d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 605,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 24,
"path": "/removeElement.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an array and a value, \n# remove all instances of that value in place and return the new length.\n# The order of elements can be changed. \n# It doesn't matter what you leave beyond the new length.\n\ndef removeElement_v1(A, elem):\n while True:\n try:\n A.remove(elem)\n except ValueError:\n return len(A)\n\ndef removeElement_v2(A, elem):\n j = 0 \n for i in A:\n if i != elem:\n A[j] = i\n j += 1\n \n return j\n \nif __name__ == '__main__':\n A = [1,3,5,7,9,2,4,6,8,10,1]\n print removeElement_v1(A, 11)"
},
{
"alpha_fraction": 0.47871333360671997,
"alphanum_fraction": 0.4976348280906677,
"avg_line_length": 27.54054069519043,
"blob_id": "16b9eea3e3c887ff9da9bacf9d6e0faf15e245b7",
"content_id": "636055be8c0d50f565d71f1910ccaffb440a3c59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1057,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 37,
"path": "/longestPalindromicSubStrII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding:UTF-8 -*-\n\n'''\nTHIS CODE IS NOT CORRECT\n'''\n# Given a string S, find the longest palindromic substring in S.\n# You may assume that the maximum length of S is 1000, \n# and there exists one unique longest palindromic substring.\n\n# DP solution, still exceed time limit on large data set\nclass Solution:\n # @return a string\n def longestPalindrome(self, s):\n l = len(s)\n if l == 0 or l == 1: return s\n maxlen = 1\n index = [0,0]\n\n P = [[0]*l for i in range(l)]\n for j in range(l):\n for i in range(j+1):\n if i == j: P[i][j] = True\n elif i+1 == j: P[i][j] = s[i] == s[j]\n else: P[i][j] = s[i] == s[j] and P[i+1][j-1] \n \n if P[i][j] == True and j-i+1 > maxlen:\n maxlen = j-i+1\n index = [i,j]\n \n return s[index[0]:index[1]+1]\n\nif __name__ == '__main__':\n s = 'AAAAccaaabbbbb'\n test = Solution()\n out = test.longestPalindrome(s)\n print(out)\n\n"
},
{
"alpha_fraction": 0.5330620408058167,
"alphanum_fraction": 0.5422176718711853,
"avg_line_length": 23.600000381469727,
"blob_id": "131a07db900cb6a5b076fc5f91eb0941d97cb2fa",
"content_id": "8aea5dcb9a8e79608502faf30bc8f7df443cd4c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 983,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 40,
"path": "/binaryTreePaths.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Given a binary tree, return all root-to-leaf paths.\n\n#For example, given the following binary tree:\n\n# 1\n# / \\\n#2 3\n# \\\n# 5\n#All root-to-leaf paths are:\n\n#[\"1->2->5\", \"1->3\"]\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def binaryTreePaths(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[str]\n \"\"\"\n self.paths = []\n self.DFS([], root)\n return map(lambda x:'->'.join(x), self.paths)\n \n def DFS(self, path, root):\n if root == None: return\n path += str(root.val),\n if root.left==None and root.right==None:\n self.paths.append(path)\n return\n pathleft, pathright = path[:], path[:]\n if root.left != None: self.DFS(pathleft, root.left)\n if root.right != None: self.DFS(pathright, root.right)\n return"
},
{
"alpha_fraction": 0.5218542814254761,
"alphanum_fraction": 0.5278145670890808,
"avg_line_length": 31.84782600402832,
"blob_id": "0509563ace56cf8cefbc1a3a548caf22f3326a98",
"content_id": "f30d03d5e7064f02c81196ebc52bd777896de3e8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1510,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 46,
"path": "/construBinaryTreePI.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given preorder and inorder traversal of a tree, construct the binary tree.\n\n# Note:\n# You may assume that duplicates do not exist in the tree.\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param preorder, a list of integers\n # @param inorder, a list of integers\n # @return a tree node\n \n # pre-order: root -> left -> right\n # post-order: left -> root -> right\n def buildTree(self, preorder, inorder):\n if preorder == [] or inorder == []:\n return None \n root = TreeNode(preorder[0]) \n loc = inorder.index(root.val)\n \n leftIn, rightIn = inorder[:loc], inorder[loc+1:] \n if leftIn != [] and rightIn != []:\n for i in range(len(1, preorder)-1):\n if preorder[i] in leftIn and preorder[i+1] in rightIn:\n sep = i\n break\n elif leftIn == []: sep = 0\n elif rightIn == []: sep = len(preorder) \n leftPre, rightPre = preorder[1:sep+1], preorder[sep+1:]\n \n root.left = self.buildTree(leftPre, leftIn)\n root.right = self.buildTree(rightPre, rightIn)\n \n return root\n \nif __name__ == '__main__':\n preorder = ['F','B','A','D','C','E','G','I','H']\n inorder = ['A','B','C','D','E','F','G','H','I']\n \n test = Solution()\n out = test.buildTree(preorder, inorder)"
},
{
"alpha_fraction": 0.5343618392944336,
"alphanum_fraction": 0.5406731963157654,
"avg_line_length": 32.1860466003418,
"blob_id": "189bec04e37666cf212eaf7ccaa8540767c73b42",
"content_id": "1705b289dd30cd0d6097acd70972d11a43ff9b0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1426,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 43,
"path": "/construBinaryTreeIP.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given inorder and postorder traversal of a tree, construct the binary tree.\n\n# Note:\n# You may assume that duplicates do not exist in the tree.\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param inorder, a list of integers\n # @param postorder, a list of integers\n # @return a tree node\n def buildTree(self, inorder, postorder):\n if postorder == [] or inorder == []:\n return None \n root = TreeNode(postorder[-1])\n loc = inorder.index(root.val)\n \n leftIn, rightIn = inorder[:loc], inorder[loc+1:] \n if leftIn != [] and rightIn != []:\n for i in range(len(postorder)-2):\n if postorder[i] in leftIn and postorder[i+1] in rightIn:\n sep = i+1\n break\n elif leftIn == []: sep = 0\n elif rightIn == []: sep = len(postorder)-1 \n leftPost, rightPost = postorder[0:sep], postorder[sep:-1]\n \n root.left = self.buildTree(leftIn, leftPost)\n root.right = self.buildTree(rightIn, rightPost)\n \n return root\n \nif __name__ == '__main__':\n inorder = ['A','B','C','D','E','F','G','H','I']\n postorder = ['A','C','E','D','B','H','I','G','F']\n \n test = Solution()\n out = test.buildTree(inorder, postorder)"
},
{
"alpha_fraction": 0.570588231086731,
"alphanum_fraction": 0.5794117450714111,
"avg_line_length": 22.581396102905273,
"blob_id": "74cbd8eb0ab075a6c27a3f0d22cfe73e6de729f3",
"content_id": "386b2394eb4e8b74f51bf22ff4ad25018ec585e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1020,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 43,
"path": "/inorderTraversal.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a binary tree, \n# return the inorder traversal of its nodes' values.\n# For example:\n# Given binary tree {1,#,2,3},\n# 1\n# \\\n# 2\n# /\n# 3\n# return [1,3,2].\n# Note: Recursive solution is trivial, could you do it iteratively?\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n \ndef inorderTraversal(root):\n values = []\n addNode(root, values)\n \n return values\n \ndef addNode(node, values):\n if node == None:\n return\n addNode(node.left, values)\n values.append(node.val) \n addNode(node.right, values) \n \nif __name__ == '__main__':\n root = TreeNode('F')\n root.left = TreeNode('B')\n root.left.left = TreeNode('A')\n root.left.right = TreeNode('D')\n root.left.right.left = TreeNode('C')\n root.left.right.right = TreeNode('E')\n root.right = TreeNode('G')\n root.right.right = TreeNode('I')\n root.right.right.left = TreeNode('H')\n \n out = inorderTraversal(root) \n \n"
},
{
"alpha_fraction": 0.4744667112827301,
"alphanum_fraction": 0.48739495873451233,
"avg_line_length": 35.83333206176758,
"blob_id": "675aef4297f0e9702d06023a73ef60342cc16ffd",
"content_id": "c9d70b543bac7b4d02503c8dc69096a7153ed8a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1547,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 42,
"path": "/searchRotatedArrayII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Follow up for \"Search in Rotated Sorted Array\":\n# What if duplicates are allowed?\n\n# Would this affect the run-time complexity? How and why?\n\n# Write a function to determine if a given target is in the array.\n\nclass Solution:\n # @param A, a list of integers\n # @param target, an integer to be searched\n # @return an integer\n def search(self, A, target): \n l = len(A)\n median = l//2\n \n if l == 0: return False \n if target == A[0]: return True \n if target == A[median]: return True\n \n if target > A[0]: # in the large part \n if target < A[median] or A[0] > A[median]:\n exist = self.search(A[:median], target)\n elif A[0] == A[median] and A[0] == A[-1]:\n exist = self.search(A[:median], target) or\\\n self.search(A[median+1:], target) \n else: \n exist = self.search(A[median+1:], target) \n else: # in the small part\n if target > A[median] or A[-1] < A[median]:\n exist = self.search(A[median+1:], target)\n elif A[-1] == A[median] and A[median] == A[0]:\n exist = self.search(A[:median], target) or\\\n self.search(A[median+1:], target) \n else:\n exist = self.search(A[:median], target)\n \n return exist\n \nif __name__ == '__main__':\n A = [1,1,3,1]\n test = Solution()\n out = test.search(A, 3) "
},
{
"alpha_fraction": 0.5281456708908081,
"alphanum_fraction": 0.5389072895050049,
"avg_line_length": 30.736841201782227,
"blob_id": "a393ce396ecafcb0e7658c642574ca412abfc928",
"content_id": "608980c4fb148c831aaae61f6cfc4b44e42fe4be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1208,
"license_type": "no_license",
"max_line_length": 264,
"num_lines": 38,
"path": "/distinctSubseq.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a string S and a string T, count the number of distinct subsequences of T in S.\n\n# A subsequence of a string is a new string which is formed from the original string by deleting some (can be none) of the characters without disturbing the relative positions of the remaining characters. (ie, \"ACE\" is a subsequence of \"ABCDE\" while \"AEC\" is not).\n\n# Here is an example:\n# S = \"rabbbit\", T = \"rabbit\"\n# Return 3.\n\n# wtf of the following ??? why are u expecting 3 ???\n# Input:\t\"ccc\", \"c\"\n# Output:\t1\n# Expected:\t3\n\nclass Solution:\n # @return an integer\n def numDistinct(self, S, T):\n if T == '': return 0\n i, cover, num = 0, 0, 0\n subseq = []\n for i in range(len(S)):\n j, count = 0, 0\n while j < len(T):\n if i+count< len(S) and S[i+count] == T[j]:\n count += 1\n j += 1\n if i+count > cover: \n cover = i+count\n if S[i:i+count] not in subseq:\n subseq.append(S[i:i+count])\n num += 1\n return num\n\nif __name__ == '__main__':\n S = \"rabbbit\"\n T = \"rabbit\"\n test = Solution()\n out = test.numDistinct(S, T)\n print out\n\n\n"
},
{
"alpha_fraction": 0.26829269528388977,
"alphanum_fraction": 0.3243902325630188,
"avg_line_length": 21.83333396911621,
"blob_id": "99753400547a3c0f596492d1646b6634e25c9ff9",
"content_id": "0eb160e49922fd0a7c6e762de496eb76aff23011",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 410,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 18,
"path": "/mergeArray.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "def merge(A, m, B, n):\n A += B\n \n for k in range(m+n-1, -1, -1):\n # make sure A[-1] or B[-1] does not appear\n if m == 0 or (n > 0 and A[m-1] < B[n-1]):\n A[k] = B[n-1]\n n -= 1\n else:\n A[k] = A[m-1]\n m -= 1\n \n return A\n \nif __name__ == '__main__':\n A = [1,3,5,7,9]\n B = [0,2,4]\n print merge(A,5,B,3)"
},
{
"alpha_fraction": 0.5958847999572754,
"alphanum_fraction": 0.606584370136261,
"avg_line_length": 29.325000762939453,
"blob_id": "a201d4e8e19521eb304715df57a2306419d1987e",
"content_id": "c360cbf948ceffe288a72d083e3a93e2fcec2859",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1215,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 40,
"path": "/recoverTree.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Two elements of a binary search tree (BST) are swapped by mistake.\n# Recover the tree without changing its structure.\n# Note:\n# A solution using O(n) space is pretty straight forward. \n# Could you devise a constant space solution?\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a tree node\n def recoverTree(self, root):\n self.n1, self.n2, self.pre = None, None, None\n self.findMistake(root)\n self.n1.val, self.n2.val = self.n2.val, self.n1.val\n return root\n\n def findMistake(self, root):\n if root == None: return\n self.findMistake(root.left)\n if self.pre and root.val < self.pre.val:\n if self.n1 == None:\n self.n1, self.n2 = self.pre, root\n else:\n self.n2 = root\n self.pre = root\n self.findMistake(root.right)\n\nif __name__ == '__main__':\n root = TreeNode(3)\n root.right = TreeNode(2)\n root.right.right = TreeNode(1)\n\n test = Solution()\n out = test.recoverTree(root)\n print out.val, out.right.val, out.right.right.val\n\n\n"
},
{
"alpha_fraction": 0.4668489396572113,
"alphanum_fraction": 0.4791524410247803,
"avg_line_length": 21.18181800842285,
"blob_id": "012e3c107a4d92b41cab2c0fb19cb2acd9356039",
"content_id": "0a600f0a6065e417520ba37ead2435b47f7839b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1463,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 66,
"path": "/flattenBinary.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a binary tree, flatten it to a linked list in-place.\n\n# For example, given\n# 1\n# / \\\n# 2 5\n# / \\ \\\n# 3 4 6 \n#\n# The flattened tree should look like:\n# 1\n# \\\n# 2\n# \\\n# 3\n# \\\n# 4\n# \\\n# 5\n# \\\n# 6\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return nothing, do it in place\n \n # flatten by pre-order\n def flatten(self, root):\n stk = []\n self.help(root, stk) \n self.move(root) \n \n def help(self, root, stk):\n if root == None: return\n if root.right != None:\n stk.append(root.right)\n root.right = None\n if root.left == None and stk != []:\n root.left = stk.pop() \n self.help(root.left, stk)\n \n # move from left subtree to right subtree \n def move(self, root): \n if root == None:\n return\n self.move(root.left)\n root.right = root.left\n root.left = None\n \nif __name__ == '__main__':\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.left.left = TreeNode(3)\n root.left.right = TreeNode(4)\n root.right = TreeNode(5)\n root.right.right = TreeNode(6)\n \n test = Solution()\n test.flatten(root)"
},
{
"alpha_fraction": 0.507478654384613,
"alphanum_fraction": 0.5352563858032227,
"avg_line_length": 30.233333587646484,
"blob_id": "e66afbf7ead8c1e03c2e9b8cdabdfe8a9a49cf2c",
"content_id": "1a5c9a50ad55ccc992a6b8e06613e0b4f235b46a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 938,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 30,
"path": "/uniquePaths.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# A robot is located at the top-left corner of a m x n grid.\n# The robot can only move either down or right at any point in time. \n# The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).\n# How many possible unique paths are there?\n\n# f(i, 0) = f(0, j) = 1\n# f(i, j) = f(i-1, j) + f(i, j-1)\n\ndef uniquePaths(m, n):\n if m == 1 or n == 1:\n return 1\n \n # Attention: \n # replicating a list with * doesn’t create copies, \n # it only creates references to the existing objects\n # Do not create 2d array like: A = [[None] * 2] * 3 !!! \n f = [0]* m\n for i in range(m): f[i] = [0]*n \n f[0] = [1]*n\n for i in range(m): f[i][0] = 1\n \n for i in range(1, m):\n for j in range(1, n):\n f[i][j] = f[i-1][j] + f[i][j-1]\n \n return f[m-1][n-1]\n \nif __name__ == '__main__':\n out = uniquePaths(2, 3)"
},
{
"alpha_fraction": 0.4779350459575653,
"alphanum_fraction": 0.5137385725975037,
"avg_line_length": 31.432432174682617,
"blob_id": "89a47b3440318270dd5a7116d9247fa3b8e916b8",
"content_id": "34677e8a78b9f0b0a54ab51a231826fcfb3cccb4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1201,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 37,
"path": "/longestPalindromicSubStr.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding:UTF-8 -*-\n\n# Given a string S, find the longest palindromic substring in S.\n# You may assume that the maximum length of S is 1000, \n# and there exists one unique longest palindromic substring.\n\n# this version exceeds time limit on large data set\n\nclass Solution:\n # @return a string\n def longestPalindrome(self, s):\n l = len(s)\n if l == 0: return s\n sub = s[0]\n s1, e1, s2, e2 = 0, 1, 0 ,1\n for i in range(1, l):\n if i > 0 and s[i] == s[i-1]:\n [s1, e1] = self.palindrome(s, i-1, i)\n sub = s[s1:e1] if e1-s1 > len(sub) else sub\n if s1 > 0 and i > 1 and s[i] == s[i-2]: \n [s2, e2] = self.palindrome(s, i-2, i)\n sub = s[s2:e2] if e2-s2 > len(sub) else sub \n if len(sub) == l: return s\n return sub \n\n def palindrome(self, s, start, end):\n while end < len(s)-1 and start > 0 and s[end+1] == s[start-1]:\n end += 1\n start -= 1\n return [start, end+1]\n\nif __name__ == '__main__':\n s = 'abcdeedasdfgfdsa'\n test = Solution()\n out = test.longestPalindrome(s)\n print(out)\n\n"
},
{
"alpha_fraction": 0.4232400357723236,
"alphanum_fraction": 0.4656488597393036,
"avg_line_length": 25.22222137451172,
"blob_id": "4d535024b1440e53a6144fb5acc3fffabc69bc4a",
"content_id": "696ab0dc88b7f6493e584f4857ff291bae447947",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1179,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 45,
"path": "/uniquePathsII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Follow up for \"Unique Paths\":\n# Now consider if some obstacles are added to the grids. \n# How many unique paths would there be?\n# An obstacle and empty space is marked as 1 and 0 respectively in the grid.\n\n# For example,\n# There is one obstacle in the middle of a 3x3 grid as illustrated below.\n#[[0,0,0],\n# [0,1,0],\n# [0,0,0]]\n# The total number of unique paths is 2.\n\ndef uniquePathsWithObs(Grid):\n m = len(Grid) # row number\n n = len(Grid[0]) # col number\n \n f = [0]* m\n for i in range(m): f[i] = [0]*n \n \n for i in range(n):\n if Grid[0][i] == 0:\n f[0][i] = 1\n else: break\n \n for j in range(m):\n if Grid[j][0] == 0:\n f[j][0] = 1\n else: break \n \n f[0][i+1:n] = [0]*(n-i-1)\n for j in range(j+1, m): f[j][0] = 0\n \n for i in range(1, m):\n for j in range(1, n): \n if Grid[i][j] == 1:\n f[i][j] = 0\n else:\n f[i][j] = f[i][j-1] + f[i-1][j] \n \n return f[m-1][n-1]\n\nif __name__ == '__main__':\n Grid = [[0,0,0,1,0],\\\n [0,1,0,0,0]]\n out = uniquePathsWithObs(Grid)"
},
{
"alpha_fraction": 0.5364540219306946,
"alphanum_fraction": 0.5472245216369629,
"avg_line_length": 35.54545593261719,
"blob_id": "eadca823f4adcd37bddb4555b4c531f6f8becd5b",
"content_id": "9e734d7f09dec0bf7aafd757ac5a57d1fd5708f0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2414,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 66,
"path": "/textJustify.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an array of words and a length L, \n# format the text such that each line has exactly L characters and is fully (left and right) justified.\n\n# You should pack your words in a greedy approach; \n# that is, pack as many words as you can in each line. \n# Pad extra spaces ' ' when necessary so that each line has exactly L characters.\n\n# Extra spaces between words should be distributed as evenly as possible. \n# If the number of spaces on a line do not divide evenly between words, \n# the empty slots on the left will be assigned more spaces than the slots on the right.\n\n# For the last line of text, \n# it should be left justified and no extra space is inserted between words.\n\n# For example,\n# words: [\"This\", \"is\", \"an\", \"example\", \"of\", \"text\", \"justification.\"]\n# L: 16.\n\n# Return the formatted lines as:\n# [\n# \"This is an\",\n# \"example of text\",\n# \"justification. \"\n# ]\n# Note: Each word is guaranteed not to exceed L in length\n\nclass Solution:\n # @param words, a list of strings\n # @param L, an integer\n # @return a list of strings\n def fullJustify(self, words, L):\n if words == []: return\n i = 0\n out = []\n while i < len(words):\n count, l = 0, 0\n line = ''\n while i+count < len(words) and l+len(words[i+count]) <= L:\n l += len(words[i+count]) + 1 # 1 comes from ' '\n count += 1\n \n l -= 1 # drop the last ' '\n fill = L-l\n if count == 1 or i+count == len(words): # only one word or last line\n for add in range(count-1):\n line += words[i+add]+' '\n line += words[i+count-1] + ' '*fill\n else: \n even = fill / (count-1)\n res = fill % (count-1)\n for add in range(count-1):\n space = even+2 if res>0 else even+1\n line += words[i+add]+' '*(space)\n res -= 1\n line += words[i+count-1]\n out.append(line)\n i += count\n return out\n\nif __name__ == '__main__':\n w1 = [\"This\", \"is\", \"an\", \"example\", \"of\", \"text\", \"justification.\"]\n w2 = [\"What\",\"must\",\"be\",\"shall\",\"be.\"]\n w3 = [\"Don't\",\"go\",\"around\",\"saying\",\"the\",\"world\",\"owes\",\"you\",\"a\",\"living;\",\"the\",\"world\",\"owes\",\"you\",\"nothing;\",\"it\",\"was\",\"here\",\"first.\"]\n test = Solution()\n out = test.fullJustify(w3, 15)\n print out\n\n\n"
},
{
"alpha_fraction": 0.5373134613037109,
"alphanum_fraction": 0.5497512221336365,
"avg_line_length": 26.620689392089844,
"blob_id": "3d0cc33d9a548d43e5863d11e79da4982e714083",
"content_id": "d7be3cb1cf78a682e2faa1e529f645565b911296",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 804,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 29,
"path": "/lengthOfLastWord.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a string s consists of upper/lower-case alphabets and empty space characters ' ', \n# return the length of last word in the string.\n\n# If the last word does not exist, return 0.\n# Note: A word is defined as a character sequence consists of non-space characters only.\n\n# For example, \n# Given s = \"Hello World\",\n# return 5.\n\nclass Solution:\n # @param s, a string\n # @return an integer\n def lengthOfLastWord(self, s):\n count, end = 0, -1\n for i in range(len(s)-1, -1, -1):\n if s[i] != ' ': \n end = i\n break\n for j in range(end, -1, -1):\n if s[j] != ' ': count += 1\n else: break\n return count\n\nif __name__ == '__main__':\n s = 'a '\n test = Solution()\n out = test.lengthOfLastWord(s)\n print out\n\n\n\n"
},
{
"alpha_fraction": 0.6471816301345825,
"alphanum_fraction": 0.6471816301345825,
"avg_line_length": 35.92307662963867,
"blob_id": "67b71d01d8aa96ffd70f5bd042cf0d39515d883f",
"content_id": "0777ae10a4a26f91406fb5558e9331e364815069",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 479,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 13,
"path": "/containsDuplicate.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Given an array of integers, find if the array contains any duplicates. \n#Your function should return true if any value appears at least twice in the array, \n#and it should return false if every element is distinct. \n\nclass Solution:\n # @param {integer[]} nums\n # @return {boolean}\n def containsDuplicate(self, nums):\n had = {}\n for i in range(len(nums)):\n if nums[i] not in had: had[nums[i]]=i\n else: return True\n return False"
},
{
"alpha_fraction": 0.48426395654678345,
"alphanum_fraction": 0.4994923770427704,
"avg_line_length": 30.516128540039062,
"blob_id": "418c6743bbca208c0a6293d8c605468e76d1f8bf",
"content_id": "f6548ffcb94109d0f847d54880cd20d676994d00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 985,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 31,
"path": "/partitionPalindromeII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a string s, \n# partition s such that every substring of the partition is a palindrome.\n# Return the minimum cuts needed for a palindrome partitioning of s.\n\n# For example, given s = \"aab\",\n# Return 1 since the palindrome partitioning [\"aa\",\"b\"] could be produced using 1 cut.\n\n# Exceed time limit, wtf... \ndef minCut(s):\n parlen = [0] # min cuts ending at i, now i = 0\n for i in range(1, len(s)+1):\n temp = len(s) \n for j in range(0, i): \n if palindrome(s[j:i]): \n temp = min(temp, parlen[j])\n parlen.append(temp + 1) \n return parlen[-1] - 1\n \ndef palindrome(sub):\n start = 0\n end = len(sub) - 1\n while sub[start] == sub[end]:\n if end - start <= 1:\n return True\n start += 1\n end -= 1\n return False\n \nif __name__ == '__main__':\n s = 'aaabbcaccc'\n out = minCut(s)\n "
},
{
"alpha_fraction": 0.4814189076423645,
"alphanum_fraction": 0.5202702879905701,
"avg_line_length": 22.559999465942383,
"blob_id": "af5bd93ab7580d0d5dd28db4365cfc46a1b68a79",
"content_id": "053036717d3f224284e7f301ab213534dcdc3e01",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 592,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 25,
"path": "/PascalTriangle.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given numRows, generate the first numRows of Pascal's triangle.\n# For example, given numRows = 5,\n# Return\n\n# [[1],\n# [1,1],\n# [1,2,1],\n# [1,3,3,1],\n# [1,4,6,4,1]]\n\nclass Solution:\n # @return a list of lists of integers\n def generate(self, numRows):\n triangle = []\n for i in range(numRows):\n new = [1]*(i+1)\n for j in range(1,i):\n new[j] = triangle[i-1][j-1]+triangle[i-1][j]\n triangle.append(new)\n return triangle\n\nif __name__ == '__main__':\n test = Solution()\n out = test.generate(5)\n print out\n\n\n\n"
},
{
"alpha_fraction": 0.4769301116466522,
"alphanum_fraction": 0.4965737760066986,
"avg_line_length": 25.670732498168945,
"blob_id": "8bbdd5bfd07eeabffb718a72b59f7bae02dbedbc",
"content_id": "cc93da067157dacb5454740c815f414def5a466b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2189,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 82,
"path": "/scrambleStr.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a string s1, we may represent it as a binary tree \n# by partitioning it to two non-empty substrings recursively.\n\n# Below is one possible representation of s1 = \"great\":\n# great\n# / \\\n# gr eat\n# / \\ / \\\n# g r e at\n# / \\\n# a t\n\n# To scramble the string, we may choose any non-leaf node and swap its two children.\n\n# For example, if we choose the node \"gr\" and swap its two children, \n# it produces a scrambled string \"rgeat\".\n\n# rgeat\n# / \\\n# rg eat\n# / \\ / \\\n# r g e at\n# / \\\n# a t\n# We say that \"rgeat\" is a scrambled string of \"great\".\n\n# Similarly, if we continue to swap the children of nodes \"eat\" and \"at\", \n# it produces a scrambled string \"rgtae\".\n\n# rgtae\n# / \\\n# rg tae\n# / \\ / \\\n# r g ta e\n# / \\\n# t a\n# We say that \"rgtae\" is a scrambled string of \"great\".\n\n# Given two strings s1 and s2 of the same length, \n# determine if s2 is a scrambled string of s1.\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n# results[k][i][j]: \n# k: length of substring-1\n# i: start position of s1\n# j: start position of s2\n\nclass Solution:\n # @return a boolean\n def isScramble(self, s1, s2):\n l1, l2 = len(s1), len(s2)\n if l1 != l2: return False\n if l1 == '': return True\n\n results = [[[False]*l1 for i in range(l1)] for j in range(l1)]\n for i in range(l1):\n for j in range(l2):\n results[0][i][j] = s1[i] == s2[j]\n\n for k in range(1, l1):\n for i in range(l1-k):\n for j in range(l2-k):\n for m in range(k):\n r = (results[m][i][j] and results[k-m-1][i+m+1][j+m+1]) or\\\n (results[m][i][j+k-m] and results[k-m-1][i+m+1][j])\n if r == True: \n results[k][i][j] = True\n break\n return results[l1-1][0][0]\n\nif __name__ == '__main__':\n s1 = \"great\"\n s2 = \"rgtae\"\n\n test = Solution()\n out = test.isScramble(s1, s2)\n print out\n\n\n"
},
{
"alpha_fraction": 0.48156508803367615,
"alphanum_fraction": 0.5154251456260681,
"avg_line_length": 24.09433937072754,
"blob_id": "3873a9114285fba14d86e99b8e2ef9b56f3c0f94",
"content_id": "0c4df80fbfbf43f9549d63a4031660d80b9d9d5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1329,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 53,
"path": "/isNumber.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Validate if a given string is numeric.\n\n# Some examples:\n# \"0\" => true\n# \" 0.1 \" => true\n# \"abc\" => false\n# \"1 a\" => false\n# \"2e10\" => true\n# Note: It is intended for the problem statement to be ambiguous. \n# You should gather all requirements up front before implementing one.\n\n# Seems tedious but atucally a good practice \ndef isNumber(s):\n rmSp = s.split()\n if len(rmSp) != 1:\n return False \n \n p1 = rmSign(rmSp[0]).isdigit()\n p2 = isFloat(rmSign(rmSp[0]))\n p3 = isScientific(rmSp[0])\n \n return p1 or p2 or p3\n\ndef rmSign(s):\n if len(s) > 1:\n if s[0] == '+' or s[0] == '-':\n return s[1:]\n return s\n \ndef isFloat(s):\n parts = s.split('.')\n if len(parts) == 2 and not (parts[0] == '' and parts[1] == ''):\n if parts[0].isdigit():\n return parts[1].isdigit() or parts[1] == ''\n elif parts[0] == '':\n return parts[1].isdigit() \n return False\n \ndef isScientific(s):\n parts = s.split('e')\n if len(parts) == 2:\n parts = [rmSign(parts[0]), rmSign(parts[1])]\n if parts[0].isdigit() or isFloat(parts[0]):\n return parts[1].isdigit() \n return False\n \nif __name__ == '__main__':\n s1 = '0'\n s2 = ' 0.1 '\n s3 = 'abc'\n s4 = '2e10'\n \n print isNumber('+')"
},
{
"alpha_fraction": 0.5083909034729004,
"alphanum_fraction": 0.5113524198532104,
"avg_line_length": 23.731706619262695,
"blob_id": "c237123062514ea8da1d99d4b17620beca1205ad",
"content_id": "f0778e6e383fb70561ed519c030b1857abb2d085",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1013,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 41,
"path": "/Anagrams.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an array of strings, return all groups of strings that are anagrams.\n\n# Note: All inputs will be in lower-case.\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n \ndef anagrams(strs):\n table = dict()\n headtable = []\n group = []\n \n for i in range(len(strs)):\n new = str(sorted(strs[i]))\n if new not in table:\n table[new] = ListNode(i)\n trace = table[new]\n headtable.append(trace) \n else:\n table[new].next = ListNode(i)\n table[new] = table[new].next\n \n for head in headtable:\n if head.next != None:\n group += listToArray(head, strs)\n \n return group\n \ndef listToArray(head, strs):\n array = []\n while head != None:\n array.append(strs[head.val])\n head = head.next\n return array\n\nif __name__ == '__main__':\n s1 = ['abcd', 'efgh', 'bcda','egfh', 'effg']\n s2 = ['', 'a', '']\n out = anagrams(s1)"
},
{
"alpha_fraction": 0.46319738030433655,
"alphanum_fraction": 0.47444912791252136,
"avg_line_length": 33.77049255371094,
"blob_id": "a8294de9afb1ab0bec79be01da6d825c702f0ab9",
"content_id": "538b5e1b82c09dcd43ebd7179c70dad18c0d98c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2133,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 61,
"path": "/surroundedRegions.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a 2D board containing 'X' and 'O', capture all regions surrounded by 'X'.\n# A region is captured by flipping all 'O's into 'X's in that surrounded region.\n\n# For example,\n# X X X X\n# X O O X\n# X X O X\n# X O X X\n\n# After running your function, the board should be:\n# X X X X\n# X X X X\n# X X X X\n# X O X X\n\n# This verison exceeds time limit on big data test\nclass Solution:\n # @param board, a 2D array\n # Capture all regions by modifying the input board in-place.\n # Do not return any value.\n def solve(self, board):\n if board == []: return []\n visited = []\n change = []\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 'O':\n if [i,j] not in visited:\n visited.append([i,j])\n self.count = 1\n if self.checkNeighbor(board, i, j, visited): \n change += visited[len(visited)-self.count:]\n \n for item in change:\n board[item[0]][item[1]] = 'X'\n \n def checkNeighbor(self, board, i, j, visited):\n # hit boundary\n if i == 0 or i == len(board) - 1 or j == 0 or j == len(board[0])-1:\n return False\n \n neighbors = [True, True, True, True] # left, right, up, down\n directions = [[i, j-1], [i, j+1], [i-1, j], [i+1, j]]\n \n for i in range(len(directions)):\n if directions[i] not in visited and board[directions[i][0]][directions[i][1]] == 'O':\n visited.append(directions[i])\n self.count += 1\n neighbors[i] = self.checkNeighbor(board, directions[i][0], directions[i][1], visited) \n \n return neighbors[0] and neighbors[1] and neighbors[2] and neighbors[3]\n \n \nif __name__ == '__main__':\n board = [['X', 'X', 'X', 'X'], \\\n ['X', 'O', 'O', 'X'], \\\n ['X', 'X', 'O', 'X'], \\\n ['X', 'O', 'X', 'X']]\n \n test = Solution()\n out = test.solve(board)\n "
},
{
"alpha_fraction": 0.525073766708374,
"alphanum_fraction": 0.5476892590522766,
"avg_line_length": 34.068965911865234,
"blob_id": "508b8b0880463d365f8a6d23420aee083bc02136",
"content_id": "8a5827d8bcfc5bb90fd4cb6cabf73f569a6f152b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1017,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 29,
"path": "/sortColors.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an array with n objects colored red, white or blue, \n# sort them so that objects of the same color are adjacent, \n# with the colors in the order red, white and blue.\n\n# Here, we will use the integers 0, 1, and 2 to represent the color red, white, and blue respectively.\n\n# Note:\n# You are not suppose to use the library's sort function for this problem.\n\nclass Solution:\n # @param A a list of integers\n # @return nothing, sort in place\n def sortColors(self, A):\n colors = [0, 0, 0] # number of color objects\n l = len(A)\n for i in range(l):\n for j in range(len(colors)):\n if A[i] == j:\n sorted = sum(colors[:j+1])\n if i > sorted-1 and sorted < l:\n if i > sorted: colors[A[sorted]] -= 1\n colors[j] += 1\n A[sorted], A[i] = A[i], A[sorted]\n\nif __name__ == '__main__':\n A = [1,2,0,2,1,1,0,2,0,2,0,1,0] \n test = Solution()\n test.sortColors(A)\n print A\n"
},
{
"alpha_fraction": 0.3904465138912201,
"alphanum_fraction": 0.43302181363105774,
"avg_line_length": 26.514286041259766,
"blob_id": "e6040b921bc5eaa3bf5908eee5bae7696b420b27",
"content_id": "ed30d398a4dcdae9125450de776949ebfaa3681c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 963,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 35,
"path": "/3sum.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? \n# Find all unique triplets in the array which gives the sum of zero.\n\n# For example, given array S = {-1 0 1 2 -1 -4},\n# A solution set is:\n# (-1, 0, 1)\n# (-1, -1, 2)\n\nclass Solution:\n # @return a list of lists of length 3, [[val1,val2,val3]]\n def threeSum(self, num):\n num.sort()\n l = len(num)\n out = []\n \n for i in range(l-2):\n j, k = 1, 1\n while i+j < l-k:\n sumi = [num[i], num[i+j], num[l-k]]\n if sum(sumi) == 0:\n if sumi not in out:\n out.append(sumi)\n j += 1\n k += 1\n elif sum(sumi) > 0:\n k += 1\n else: \n j += 1\n\n return out\n \nif __name__ == '__main__':\n num = [-4,-2,-2,-2,0,1,2,2,2,3,3,4,4,6,6]\n test = Solution()\n out = test.threeSum(num)\n"
},
{
"alpha_fraction": 0.42946991324424744,
"alphanum_fraction": 0.44025155901908875,
"avg_line_length": 30.814285278320312,
"blob_id": "bd8cc36da065e2d75a02302639721c298686eabe",
"content_id": "604eef7ef86a8eed71cdf50f44eba7cea138de66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2226,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 70,
"path": "/surroundedRegionsI.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a 2D board containing 'X' and 'O', capture all regions surrounded by 'X'.\n# A region is captured by flipping all 'O's into 'X's in that surrounded region.\n\n# For example,\n# X X X X\n# X O O X\n# X X O X\n# X O X X\n\n# After running your function, the board should be:\n# X X X X\n# X X X X\n# X X X X\n# X O X X\n\nclass Solution:\n # @param board, a 2D array\n # Capture all regions by modifying the input board in-place.\n # Do not return any value.\n def solve(self, board):\n if board == []: return []\n row = len(board)\n col = len(board[0])\n \n left = [[i, 0] for i in range(row)]\n right = [[i, col-1] for i in range(row)]\n up = [[0, j] for j in range(1, col-1)]\n down = [[row-1, j] for j in range(1, col-1)]\n boundary = left + right + up + down\n visited = []\n \n for item in boundary:\n i, j = item[0], item[1]\n if board[i][j] == 'O':\n board[i][j] = 'Y'\n visited.append([i,j])\n \n while visited != []:\n neighbors = []\n for item in visited:\n neighbors += self.checkNeighbor(board, item[0], item[1]) \n visited = neighbors\n \n for i in range(row):\n for j in range(col):\n if board[i][j] == 'O': board[i][j] = 'X'\n if board[i][j] == 'Y': board[i][j] = 'O' \n \n def checkNeighbor(self, board, i, j):\n directions, neighbors = [], []\n if i != 0: directions.append([i-1, j])\n if i != len(board) - 1: directions.append([i+1, j])\n if j != 0: directions.append([i, j-1])\n if j != len(board[0])-1: directions.append([i, j+1])\n \n for item in directions:\n [m,n] = item\n if board[m][n] == 'O':\n board[m][n] = 'Y'\n neighbors.append([m,n]) \n return neighbors \n \nif __name__ == '__main__':\n board = [['X', 'X', 'X', 'X'], \\\n ['X', 'O', 'O', 'X'], \\\n ['X', 'X', 'O', 'X'], \\\n ['X', 'O', 'X', 'X']]\n \n test = Solution()\n out = test.solve(board)"
},
{
"alpha_fraction": 0.5106951594352722,
"alphanum_fraction": 0.5374331474304199,
"avg_line_length": 22.375,
"blob_id": "c0b2a2f6a68ed38cf93d402bca9440c79f4bf459",
"content_id": "fb0a9b13c62f74cb470075de6938466e5eaee5cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 374,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 16,
"path": "/palindromeNum.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Determine whether an integer is a palindrome. \n# Do this without extra space.\n\nclass Solution:\n # @return a boolean\n def isPalindrome(self, x):\n if x < 0: return False\n a, b = x, 0\n while a != 0:\n a, b = a/10, b*10 + a%10\n return b == x\n\nif __name__ == '__main__':\n num = 0\n test = Solution()\n out = test.isPalindrome(num)\n"
},
{
"alpha_fraction": 0.34950384497642517,
"alphanum_fraction": 0.3858875334262848,
"avg_line_length": 24.94285774230957,
"blob_id": "3101a26383a2aaa8d6b02dbf59ebe84bbd23a8a8",
"content_id": "2658432c08ce5d4ecf6ad41d0fed75ee9461eb4d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 907,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 35,
"path": "/atoi.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Implement atoi to convert a string to an integer.\n\nclass Solution:\n # @return an integer\n def atoi(self, s):\n if s == '': return 0 \n sign = 1 \n num = 0\n power = 10\n maxi = 2147483647\n mini = -2147483648\n \n for i in range(len(s)):\n if s[i] == ' ': continue\n break \n\n if s[i] == '+' or s[i] == '-':\n if i == len(s) -1: return 0\n if s[i] == '-': sign = -1 \n s = s[i+1:]\n else: s = s[i:]\n \n for i in s: \n if i.isdigit():\n num = num*power + (ord(i) - ord('0'))\n else: break\n \n num *= sign \n if num > maxi: return maxi\n elif num < mini: return mini\n else: return num \n \nif __name__ == '__main__':\n test = Solution()\n out = test.atoi(' 010')"
},
{
"alpha_fraction": 0.5741158723831177,
"alphanum_fraction": 0.5906696915626526,
"avg_line_length": 25.058822631835938,
"blob_id": "d1b43fd11392fc5f52be2b73a66c066f254c8da1",
"content_id": "436df784e5c3607848ea3e9462b29cb731c17714",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1329,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 51,
"path": "/maxPathSum.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a binary tree, find the maximum path sum.\n# The path may start and end at any node in the tree.\n\n# For example:\n# Given the below binary tree,\n# 1\n# / \\\n# 2 3\n# Return 6.\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return an integer\n def maxPathSum(self, root):\n self.max = 0\n self.dfs(root)\n return self.max\n\n def dfs(self, root):\n if root == None: return 0\n left,right = 0, 0\n left += self.dfs(root.left)\n right += self.dfs(root.right)\n temp = max(left, right, 0)\n self.max = max(self.max, temp+root.val, left+right+root.val)\n return temp + root.val\n\nif __name__ == \"__main__\":\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(3)\n root.left.left = TreeNode(0)\n root.left.right = TreeNode(7)\n root.right.left = TreeNode(10)\n root.right.right = TreeNode(1)\n root.left.left.left = TreeNode(2)\n root.left.right.left = TreeNode(1)\n root.left.right.right = TreeNode(0)\n root.right.right.left = TreeNode(8)\n root.right.right.right = TreeNode(8)\n\n test = Solution()\n out = test.maxPathSum(root)\n print out\n"
},
{
"alpha_fraction": 0.4929356276988983,
"alphanum_fraction": 0.4960753619670868,
"avg_line_length": 26.69565200805664,
"blob_id": "57ca37e199540a0ba2511ffdaee0106724f7d310",
"content_id": "34c52bcaba2487a0ccad350756d5db4b783b65ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 637,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 23,
"path": "/LongestCommPrefix.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Write a function to find the longest common prefix string \n# among an array of strings.\n\nclass Solution:\n # @return a string\n def longestCommonPrefix(self, strs):\n if strs == []: return ''\n prefix = strs[0]\n for s in strs[1:]:\n new = ''\n for j in range(min(len(prefix), len(s))):\n if prefix[j] == s[j]:\n new += s[j]\n else: break\n prefix = new\n if prefix == '': return ''\n return prefix\n\nif __name__ == '__main__':\n strs = ['pp','p']\n test = Solution()\n out = test.longestCommonPrefix(strs)\n print out\n"
},
{
"alpha_fraction": 0.3885180354118347,
"alphanum_fraction": 0.4072096049785614,
"avg_line_length": 23.19354820251465,
"blob_id": "cdce5e11c9ecf568589cbfc55b17ba877bcb726f",
"content_id": "8a946240abeaede4f76eeaac97c11c0cb70bb5b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 749,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 31,
"path": "/combintionSum3.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param {integer} k\n # @param {integer} n\n # @return {integer[][]}\n def combinationSum3(self, k, n):\n if 9*k < n: return []\n self.out = []\n self.k = k\n i = 1\n while i < 9:\n self.count = 1\n self.my = [i]\n self.out.append(self.DFS(i,n))\n i += 1\n return self.out\n \n \n def DFS(self, i, n):\n n = n - i\n self.count += 1\n if self.count == self.k:\n if n == 0: return [i]\n else: return []\n \n for j in range(i+1, 10):\n out = self.DFS(j, n)\n if out != []: out = [j] + out\n return out\n \nout = Solution()\nx = out.combinationSum3(3, 7)"
},
{
"alpha_fraction": 0.46292585134506226,
"alphanum_fraction": 0.4709418714046478,
"avg_line_length": 20.7391300201416,
"blob_id": "cbbd9ae2b56c84911793fac983421594061c493e",
"content_id": "a12b19d368b855b2b5954d4aa5948b532fc0cbb7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 499,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 23,
"path": "/reverseWords.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an input string, reverse the string word by word.\n\n# For example,\n# Given s = \"the sky is blue\",\n# return \"blue is sky the\".\n\ndef reverseWords(s):\n lt = s.split(' ')\n new = removeElement(lt, '') \n return ' '.join(new[::-1])\n\ndef removeElement(A, elem):\n j = 0 \n for i in A:\n if i != elem:\n A[j] = i\n j += 1\n \n return A[0:j]\n \nif __name__ == '__main__':\n s = \"the sky is blue\" \n out = reverseWords(' ')"
},
{
"alpha_fraction": 0.5568783283233643,
"alphanum_fraction": 0.5608465671539307,
"avg_line_length": 36.849998474121094,
"blob_id": "e5ed3c066df687bcdb2c17b45506926a7996b0c5",
"content_id": "88b9da1179431b929d8a2ac80a4d375e094ed0c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 756,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 20,
"path": "/containsNearbyAlmostDuplicate.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an array of integers, find out whether there are two distinct indices i and j in the array \n# such that the difference between nums[i] and nums[j] is at most t and the difference between i and j is at most k.\n\nclass Solution(object):\n def containsNearbyAlmostDuplicate(self, nums, k, t):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :type t: int\n :rtype: bool\n \"\"\"\n seq = sorted(range(len(nums)), key=nums.__getitem__)\n for i in range(len(nums)-1):\n counter = 1\n while i+counter < len(nums) and nums[seq[i+counter]]-nums[seq[i]] <= t:\n if abs(seq[i+counter]-seq[i]) <= k:\n return True\n counter += 1\n\n return False"
},
{
"alpha_fraction": 0.5131672620773315,
"alphanum_fraction": 0.5288256406784058,
"avg_line_length": 26.5,
"blob_id": "6b0f883afcd682cc8454e5cac6a5d9e43bca4eca",
"content_id": "93f2302db0c4575b4d26c5228c1f19b0cb1c910c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1405,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 48,
"path": "/partitionList.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a linked list and a value x, \n# partition it such that all nodes less than x come before nodes greater than or equal to x.\n\n# You should preserve the original relative order of the nodes in each of the two partitions.\n\n# For example,\n# Given 1->4->3->2->5->2 and x = 3,\n# return 1->2->2->4->3->5.\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @param x, an integer\n # @return a ListNode\n def partition(self, head, x):\n left = ListNode(0)\n right = ListNode(0)\n left_head, right_head = left, right\n \n while head != None:\n if head.val < x:\n left.next = head\n left = left.next\n else:\n right.next = head\n right = right.next\n head = head.next\n right.next = None\n left.next = right_head.next\n \n return left_head.next\n\nif __name__ == '__main__':\n head = ListNode(1)\n head.next = ListNode(4)\n head.next.next = ListNode(3)\n head.next.next.next = ListNode(2)\n head.next.next.next.next = ListNode(5)\n head.next.next.next.next.next = ListNode(2)\n \n \n test = Solution()\n out = test.partition(head, 3)\n \n\n \n \n \n \n \n \n \n \n"
},
{
"alpha_fraction": 0.5412654876708984,
"alphanum_fraction": 0.54676753282547,
"avg_line_length": 32.8139533996582,
"blob_id": "a8001148b470f06b8bb6909b3fb91e3479124615",
"content_id": "eaba76d35dbb6ed62e0ef6d914ab8177731f97dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1454,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 43,
"path": "/wildcardMatch.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Implement wildcard pattern matching with support for '?' and '*'.\n\n# '?' Matches any single character.\n# '*' Matches any sequence of characters (including the empty sequence).\n\n# The matching should cover the entire input string (not partial).\n\n# The function prototype should be:\n# bool isMatch(const char *s, const char *p)\n\n# Some examples:\n# isMatch(\"aa\",\"a\") -> false\n# isMatch(\"aa\",\"aa\") -> true\n# isMatch(\"aaa\",\"aa\") -> false\n# isMatch(\"aa\", \"*\") -> true\n# isMatch(\"aa\", \"a*\") -> true\n# isMatch(\"ab\", \"?*\") -> true\n# isMatch(\"aab\", \"c*a*b\") -> false\n\n# time limit exceed\nclass Solution:\n # @param s, an input string\n # @param p, a pattern string\n # @return a boolean\n def isMatch(self, s, p):\n if s == '' and p == '': return True\n i, j = 0, 0\n if i < len(s) and s[i] == '*': \n for k in range(j, len(p)+1):\n if self.isMatch(s[i+1:], p[k:]) == True: return T\n return False\n elif j < len(p) and p[j] == '*':\n for k in range(i, len(s)+1):\n if self.isMatch(s[k:], p[j+1:]) == True: return True\n return False\n elif i < len(s) and j < len(p) and (s[i] == p[j] or s[i] == '?' or p[j] == '?'):\n return self.isMatch(s[i+1:], p[j+1:])\n else: return False\n\nif __name__ == '__main__':\n test = Solution()\n out = test.isMatch(\"babaaababaabababbbbbbaabaabbabababbaababbaaabbbaaab\", \"***bba**a*bbba**aab**b\")\n print out\n"
},
{
"alpha_fraction": 0.5432525873184204,
"alphanum_fraction": 0.5839100480079651,
"avg_line_length": 31,
"blob_id": "e9bbc610e94985a8c89e8af18ffccdf8b83c3247",
"content_id": "ab5386a78e7f22ad47be5aa25ee1c89bfee16e7a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1156,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 36,
"path": "/jumpGameII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an array of non-negative integers, you are initially positioned at the first index of the array.\n# Each element in the array represents your maximum jump length at that position.\n# Your goal is to reach the last index in the minimum number of jumps.\n\n# For example:\n# Given array A = [2,3,1,1,4]\n# The minimum number of jumps to reach the last index is 2. \n# (Jump 1 step from index 0 to 1, then 3 steps to the last index.)\n\n# exceed time limit on large data set\nclass Solution:\n # @param A, a list of integers\n # @return a boolean\n def jump(self, A):\n self.minstep = float('inf')\n self.dfs(A, 0, 0)\n return self.minstep\n\n def dfs(self, A, start, steps):\n l = len(A)\n if start == l-1: \n self.minstep = min(self.minstep, steps)\n return \n if A[start] == 0: return\n\n for i in range(1, A[start]+1):\n if start + i > l-1: return \n self.dfs(A, start + i, steps+1)\n return\n \nif __name__ == '__main__':\n A = [2,3,1,1,4]\n B = [6,2,6,1,7,9,3,5,3,7,2,8,9,4,7,7,2,2,8,4,6,6,1,3]\n test = Solution()\n out = test.jump(B)\n print out\n \n"
},
{
"alpha_fraction": 0.5361899137496948,
"alphanum_fraction": 0.5460335612297058,
"avg_line_length": 32.05769348144531,
"blob_id": "6f489a53692690844978da473903b1787e7fd0b6",
"content_id": "295289a07bede73279c71d8ce52f9bcc6bbcc3cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1727,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 52,
"path": "/zigzag.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a binary tree, \n# return the zigzag level order traversal of its nodes' values. \n# (ie, from left to right, then right to left for the next level and alternate between).\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a list of lists of integers\n def zigzagLevelOrder(self, root):\n if root == None: return []\n current = [root]\n direction = -1\n out = []\n \n while current != []:\n out.append([i.val for i in current])\n current = self.nextLevel(current, direction)\n direction *= -1 \n return out\n \n def nextLevel(self, current, direction):\n nextlevel = []\n if current == None:\n return nextlevel\n \n if direction == 1: # left to right\n for i in range(len(current)-1, -1, -1):\n if current[i].left != None: nextlevel.append(current[i].left)\n if current[i].right != None: nextlevel.append(current[i].right)\n \n if direction == -1: # right to left\n for i in range(len(current)-1, -1, -1):\n if current[i].right != None: nextlevel.append(current[i].right)\n if current[i].left != None: nextlevel.append(current[i].left)\n \n return nextlevel\n \nif __name__ == '__main__':\n root = TreeNode(3)\n root.left = TreeNode(9)\n root.right = TreeNode(20)\n root.right.left = TreeNode(15)\n root.right.right = TreeNode(7) \n \n test = Solution()\n out = test.zigzagLevelOrder(root) "
},
{
"alpha_fraction": 0.5759259462356567,
"alphanum_fraction": 0.5888888835906982,
"avg_line_length": 29.85714340209961,
"blob_id": "a6f48b1588835c62eafcf48341434bbdb0a06e93",
"content_id": "b10a3842403cd8644869accf0607974315d59ce1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1080,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 35,
"path": "/gasStationII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# There are N gas stations along a circular route, \n# where the amount of gas at station i is gas[i].\n\n# You have a car with an unlimited gas tank \n# and it costs cost[i] of gas to travel from station i to its next station (i+1). \n# You begin the journey with an empty tank at one of the gas stations.\n\n# Return the starting gas station's index if you can travel around the circuit once, \n# otherwise return -1.\n\n# Note:\n# The solution is guaranteed to be unique.\n\nclass Solution:\n # @param gas, a list of integers\n # @param cost, a list of integers\n # @return an integer\n def canCompleteCircuit(self, gas, cost):\n res = []\n tol, sumi, start = 0, 0, 0 \n for i in range(len(gas)): \n res.append(gas[i]-cost[i])\n tol += res[i]\n if sumi < 0: \n start = i\n sumi = res[i]\n else: sumi += res[i]\n return -1 if tol < 0 else start\n\nif __name__ == '__main__':\n gas = [5, 2, 4]\n cost = [2, 3, 4]\n test = Solution()\n out = test.canCompleteCircuit(gas, cost)\n print out\n"
},
{
"alpha_fraction": 0.5797619223594666,
"alphanum_fraction": 0.5952380895614624,
"avg_line_length": 31.30769157409668,
"blob_id": "66e7d6f86e025e4462e68931e5600e32ecbff185",
"content_id": "e9de935a6baabd4e1d8c882040e39cfca246809d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 840,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 26,
"path": "/searchInsert.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a sorted array and a target value, \n# return the index if the target is found. \n# If not, return the index where it would be if it were inserted in order.\n\n# You may assume no duplicates in the array.\n\nclass Solution:\n # @param A, a list of integers\n # @param target, an integer to be inserted\n # @return integer\n def searchInsert(self, A, target):\n if A == []: return 0\n if len(A) == 1: return 1 if target > A[0] else 0\n median = len(A)/2\n if target == A[median]: return median\n elif target > A[median]: \n index = median + 1+ self.searchInsert(A[median+1:], target)\n else:\n index = self.searchInsert(A[:median], target)\n return index\n\nif __name__ == '__main__':\n A = [1,3,5,6]\n test = Solution()\n out = test.searchInsert(A, 5)\n print out\n"
},
{
"alpha_fraction": 0.535538375377655,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 26.25,
"blob_id": "67292645dd4bcae40337697a8c058c2c3ad7c568",
"content_id": "2c20f5047fa5207fb62629aba1f90ca3e6235966",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1421,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 52,
"path": "/searchMatrix.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Write an efficient algorithm that searches for a value in an m x n matrix. \n# This matrix has the following properties:\n# Integers in each row are sorted from left to right.\n# The first integer of each row is greater than the last integer of the previous row.\n\n# For example,\n# Consider the following matrix:\n#[[1, 3, 5, 7],\n# [10, 11, 16, 20],\n# [23, 30, 34, 50]]\n# Given target = 3, return true.\n\n# Total numbers of the matrix = m*n\n# The i-th number is at:\n# row = i//n\n# col = i%n\n\ndef searchMatrix(matrix, target):\n if matrix == [] or matrix == [[]]:\n return False\n m = len(matrix)\n n = len(matrix[0]) \n \n return compare(matrix, m, n, [0, m*n], target)\n\ndef transfer(num, m,n):\n i = num//n\n j = num%n\n return [i,j]\n \ndef compare(matrix, m, n, interval, target): \n if interval[0] == interval[1]:\n return False\n mid = sum(interval)//2\n [i,j] = transfer(mid, m, n)\n \n left, right = False, False\n \n if matrix[i][j] > target:\n leftInt = [interval[0], mid]\n left = compare(matrix, m, n, leftInt, target)\n elif matrix[i][j] < target:\n rightInt = [mid+1, interval[1]]\n right = compare(matrix, m, n, rightInt, target)\n else:\n return True\n \n return left or right\n\nif __name__ == '__main__':\n matrix = [[1, 3, 5, 7], [10, 11, 16, 20], [23, 30, 34, 50]]\n out = searchMatrix(matrix, 16)\n "
},
{
"alpha_fraction": 0.4870345890522003,
"alphanum_fraction": 0.5006648898124695,
"avg_line_length": 28.78217887878418,
"blob_id": "6016bc0f62d82d7c2436c071c0a32771b722eb88",
"content_id": "73e0bf2ebabb20e24d48e66a60f897ff865f36e0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3008,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 101,
"path": "/wordBreak.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a string s and a dictionary of words dict, \n<<<<<<< HEAD\n# add spaces in s to construct a sentence where each word is a valid dictionary word.\n\n# Return all such possible sentences.\n\n# For example, given\n# s = \"catsanddog\",\n# dict = [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"].\n\n# A solution is [\"cats and dog\", \"cat sand dog\"].\n\n# time limit exceed\nclass Solution2(object):\n def wordBreak(self, s, wordDict):\n \"\"\"\n :type s: str\n :type wordDict: Set[str]\n :rtype: List[str]\n \"\"\"\n self.sent = []\n self.search(s, 0, wordDict, [])\n return self.sent\n\n def search(self, s, start, wordDict, cur):\n if start == len(s): return cur\n j = 1\n while j+start <= len(s):\n if s[start:start+j] in wordDict:\n if cur == []: new = [s[start:start+j]]\n else: new = [i+' '+s[start:start+j] for i in cur]\n self.search(s, start+j, wordDict, new)\n j += 1\n \n return \n \nclass Solution2(object):\n def wordBreak(self, s, wordDict):\n \"\"\"\n :type s: str\n :type wordDict: Set[str]\n :rtype: List[str]\n \"\"\"\n self.sent = []\n self.len = len(s)\n self.possible = [True]*len(s)\n self.search(s, wordDict, [])\n \n return self.sent\n\n def search(self, s, wordDict, cur):\n if len(s) == 0: \n self.sent += cur\n return 0\n res = len(s)\n for j in range(1, len(s)): \n if s[:j] in wordDict and self.possible[self.len-len(s)] == True:\n if cur == []: new = [s[:j]]\n else: new = [i+' '+s[:j] for i in cur]\n res = min(res, self.search(s[j:], wordDict, new))\n print s, j, res, self.possible\n if res > 0: self.possible[self.len-len(s)] = False \n \n \n=======\n# determine if s can be segmented into a space-separated sequence of one or more dictionary words.\n\n# For example, given\n# s = \"leetcode\",\n# dict = [\"leet\", \"code\"].\n\n# Return true because \"leetcode\" can be segmented as \"leet code\".\n\n# p[start][end] is whether s[start:end] can be segmented\nclass Solution:\n # @param s, a string\n # @param dict, a set of string\n # @return a boolean\n def wordBreak(self, s, dict):\n ls = len(s)\n p = [[False]*(ls+1) for i in range(ls)]\n for end in range(1, ls+1):\n for start in range(end-1, -1, -1):\n if s[start:end] in dict: p[start][end] = True\n else:\n for mid in range(start+1, end): \n temp = p[start][mid] and p[mid][end]\n if temp == True: \n p[start][end] = True\n break\n return p[0][ls]\n\nif __name__ == '__main__':\n s = 'pplovelppp'\n dict = ['pp','love','lp']\n test = Solution()\n out = test.wordBreak(s, dict)\n print out\n\n\n>>>>>>> c9daaa8fc778290aa4bd8c08de16455ca9112257\n"
},
{
"alpha_fraction": 0.40137389302253723,
"alphanum_fraction": 0.4416094124317169,
"avg_line_length": 32.96666717529297,
"blob_id": "a34ae263276c9c50267f9d99265365d2beb9f64a",
"content_id": "8e51abc5aa14f43f13569699a4511063254e3d5f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1019,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 30,
"path": "/permutations.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a collection of numbers, return all possible permutations.\n# For example,\n# [1,2,3] have the following permutations:\n# [1,2,3], [1,3,2], [2,1,3], [2,3,1], [3,1,2], and [3,2,1].\n\nclass Solution:\n # @param num, a list of integer\n # @return a list of lists of integers\n \n # per(1) = 1\n # per(n) = per(n-1)*n \n def permute(self, num):\n p0 = [[num[0]]] # permutations at step n-1\n for i in range(1, len(num)):\n p1 = [] #permutations at step n\n for item in p0: \n j = 0 \n while j <= len(item):\n # insert num[i] before item[j]\n p1.append(item[0:j] + [num[i]] + item[j:])\n # avoid duplicates\n if j != len(item) and item[j] == num[i]: j += 2\n else: j += 1 \n p0 = p1\n return p0\n\nif __name__ == '__main__':\n test = Solution()\n out = test.permute([1,2,3])\n print out\n"
},
{
"alpha_fraction": 0.45733460783958435,
"alphanum_fraction": 0.49952059984207153,
"avg_line_length": 26.63888931274414,
"blob_id": "c8e37304c64681c36db7287b38c47b6b04623b4e",
"content_id": "bd591c10924873a0c55003b003632fc002f5a619",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1043,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 36,
"path": "/MergeIntervals.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a collection of intervals, merge all overlapping intervals.\n# For example,\n# Given [1,3],[2,6],[8,10],[15,18],\n# return [1,6],[8,10],[15,18].\n\nclass Interval:\n def __init__(self, s=0, e=0):\n self.start = s\n self.end = e\n \ndef merge(intervals): \n if len(intervals) <= 1:\n return intervals\n \n record = []\n l = 0\n intervals.sort(key = lambda x: x.start) \n \n for i in range(1, len(intervals)):\n if intervals[i].start <= intervals[i-1].end:\n intervals[i].end = max(intervals[i].end, intervals[i-1].end)\n l += 1\n else: \n record.append([i-1, l])\n l = 0\n record.append([i, l])\n \n new = []\n for j in record: \n new.append(Interval(intervals[j[0]-j[1]].start, intervals[j[0]].end))\n \n return new\n \nif __name__ == '__main__':\n intervals = [Interval(2,3), Interval(4,5), Interval(6,7), Interval(8,9), Interval(1,10)]\n out = merge(intervals)\n \n \n \n \n \n \n"
},
{
"alpha_fraction": 0.571906328201294,
"alphanum_fraction": 0.5735785961151123,
"avg_line_length": 24.913043975830078,
"blob_id": "7bea7460f247de0b6fe7b4c21090e7a33ad1414f",
"content_id": "a3dd6a437da3d98659079a1396daccacfc951ef8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 598,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 23,
"path": "/linkedListCycleII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a linked list, return the node where the cycle begins. \n# If there is no cycle, return null.\n\n# Follow up:\n# Can you solve it without using extra space?\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @return a list node\n def detectCycle(self, head):\n record = {}\n while head != None:\n record[head] = 0\n if head.next in record:\n return head.next\n head = head.next\n return None\n\n\n"
},
{
"alpha_fraction": 0.4703783094882965,
"alphanum_fraction": 0.479657381772995,
"avg_line_length": 29.45652198791504,
"blob_id": "eafb65de3356b8e5a93b7fe064cd9d48a2eefe59",
"content_id": "1952615638c184a9feae6be21361d1d065f55b23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1401,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 46,
"path": "/wildcardMatch_v2.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Implement wildcard pattern matching with support for '?' and '*'.\n\n# '?' Matches any single character.\n# '*' Matches any sequence of characters (including the empty sequence).\n\n# The matching should cover the entire input string (not partial).\n\n# The function prototype should be:\n# bool isMatch(const char *s, const char *p)\n\n# Some examples:\n# isMatch(\"aa\",\"a\") -> false\n# isMatch(\"aa\",\"aa\") -> true\n# isMatch(\"aaa\",\"aa\") -> false\n# isMatch(\"aa\", \"*\") -> true\n# isMatch(\"aa\", \"a*\") -> true\n# isMatch(\"ab\", \"?*\") -> true\n# isMatch(\"aab\", \"c*a*b\") -> false\n\n# dp[i][j] = s[:i] matches p[:j]\nclass Solution:\n # @param s, an input string\n # @param p, a pattern string\n # @return a boolean\n def isMatch(self, s, p):\n ls, lp = len(s), len(p)\n dp = [[False]*(lp+1) for i in range(ls+1)]\n dp[0][0] = True\n i, j = 0, 0\n while i < ls and j < lp:\n if dp[i][j] == True and \\\n p[j] == s[i] or p[j] == '?':\n dp[i+1][j+1] = True\n i += 1\n j += 1 \n elif p[j] == '*': \n for k in range(i,ls): dp[k+1][j+1] = True\n j += 1\n else: return False\n if i == ls: return p[j:] == '*'*(lp-j)\n return dp[ls][lp] \n \nif __name__ == '__main__':\n test = Solution()\n out = test.isMatch('hi','*?')\n print out\n"
},
{
"alpha_fraction": 0.523809552192688,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 31.387096405029297,
"blob_id": "b0487dfd2119e36bf9f9d8e05e2d88ae94b700d2",
"content_id": "8f56bb0f4bec09a1ed21611fbb7b0bb58fb9b90d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1008,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 31,
"path": "/jumpGameIII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an array of non-negative integers, you are initially positioned at the first index of the array.\n# Each element in the array represents your maximum jump length at that position.\n# Your goal is to reach the last index in the minimum number of jumps.\n\n# For example:\n# Given array A = [2,3,1,1,4]\n# The minimum number of jumps to reach the last index is 2. \n# (Jump 1 step from index 0 to 1, then 3 steps to the last index.)\n\nclass Solution:\n # @param A, a list of integers\n # @return a boolean\n def jump(self, A):\n l = len(A)\n steps, curr, last = 0, 0, 0\n for i in range(l):\n if i > last and i<= curr:\n last = curr\n steps += 1 \n curr = max(curr, A[i]+i)\n\n if curr < l-1: return #fail to reach the end\n return steps\n \nif __name__ == '__main__':\n A = [2,3,1,1,4]\n B = [6,2,6,1,7,9,3,5,3,7,2,8,9,4,7,7,2,2,8,4,6,6,1,3]\n C = [1,1,1,1]\n test = Solution()\n out = test.jump(B)\n print out\n \n"
},
{
"alpha_fraction": 0.5878661274909973,
"alphanum_fraction": 0.5951882600784302,
"avg_line_length": 33.03571319580078,
"blob_id": "391cbd56f025fe68f40b311618ec4fec6a79d85e",
"content_id": "3b376e18c02e165871979b262eda3fbd7279e243",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 956,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 28,
"path": "/gasStation.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# There are N gas stations along a circular route, \n# where the amount of gas at station i is gas[i].\n\n# You have a car with an unlimited gas tank \n# and it costs cost[i] of gas to travel from station i to its next station (i+1). \n# You begin the journey with an empty tank at one of the gas stations.\n\n# Return the starting gas station's index if you can travel around the circuit once, \n# otherwise return -1.\n\n# Note:\n# The solution is guaranteed to be unique.\n\nclass Solution:\n # @param gas, a list of integers\n # @param cost, a list of integers\n # @return an integer\n def canCompleteCircuit(self, gas, cost):\n N = len(gas)\n for start in range(N): \n count, res = 0, 0\n for i in range(start, start + N):\n curr = res + gas[i/N] \n res = curr - cost[i/N]\n if res < 0: break \n count += 1\n if count == N: return start\n return -1 \n\n\n"
},
{
"alpha_fraction": 0.5535353422164917,
"alphanum_fraction": 0.5595959424972534,
"avg_line_length": 30.870967864990234,
"blob_id": "79fb82199ec36ce583e297800af40dd5a00e196b",
"content_id": "19ee0f92bdc92c29a7120fb625d02c1939dffe6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 990,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 31,
"path": "/wordBreakII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a string s and a dictionary of words dict, \n# add spaces in s to construct a sentence where each word is a valid dictionary word.\n\n# Return all such possible sentences.\n\n# For example, given\n# s = \"catsanddog\",\n# dict = [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"].\n# A solution is [\"cats and dog\", \"cat sand dog\"].\n\n# p[start][end] is all possible ways that s[start:end] can be segmented\nclass Solution:\n # @param s, a string\n # @param dict, a set of string\n # @return a boolean\n def wordBreak(self, s, dict):\n ls = len(s)\n p = [[] for i in range(ls+1)]\n for end in range(1,ls+1):\n if s[:end] in dict: p[end].append(s[:end])\n for mid in range(end, 0, -1):\n if s[mid:end] in dict:\n for i in p[mid]: p[end].append(i+' '+ s[mid:end])\n return p[-1]\n\nif __name__ == '__main__':\n s = 'pplovelp'\n dict = ['p','pp','love','lp']\n test = Solution()\n out = test.wordBreak(s, dict)\n print out\n\n\n"
},
{
"alpha_fraction": 0.352187842130661,
"alphanum_fraction": 0.3959445059299469,
"avg_line_length": 26.75757598876953,
"blob_id": "fedb4d2ee9ef4b1ee69a94de2ead6b7d2708977d",
"content_id": "7053d288e98d0fc24f3aec2384003da274d25dda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 937,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 33,
"path": "/intToRoman.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an integer, convert it to a roman numeral.\n# Input is guaranteed to be within the range from 1 to 3999.\n\nclass Solution:\n # @return a string\n def intToRoman(self, num):\n out = ''\n roman = 'MDCLXVI'\n inti = [1000, 500, 100, 50, 10, 5, 1]\n \n i = 0\n while num > 0:\n print num\n fac = num // inti[i]\n res = num - fac*inti[i]\n if res != 0 and res//inti[i+1] == 4:\n if fac == 1:\n out += roman[i+1] + roman[i-1]\n elif fac == 0:\n out += roman[i+1] + roman[i]\n num = res - 4*inti[i+1]\n i += 2\n else:\n out += roman[i]*fac\n num = res\n i += 1\n \n return out\n\nif __name__ == '__main__':\n test = Solution()\n num = 1040\n out = test.intToRoman(44)\n \n "
},
{
"alpha_fraction": 0.5209380388259888,
"alphanum_fraction": 0.5435510873794556,
"avg_line_length": 26.06818199157715,
"blob_id": "d82c8f4fcc14d8b2ae11e108db2a0c6b5939a9e4",
"content_id": "0385896fbd766db2a52e5e62e37467ad91cc6fe4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1194,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 44,
"path": "/reorderList.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a singly linked list L: L0->L1->...->Ln-1->Ln,\n# reorder it to: L ->Ln->L->Ln-1->L2->Ln-2...\n\n# You must do this in-place without altering the nodes' values.\n\n# For example,\n# Given {1,2,3,4}, reorder it to {1,4,2,3}.\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @return nothing\n def reorderList(self, head):\n if head == None: return head\n record = {}\n index = 0 \n while head != None:\n record[index] = head\n index += 1\n head = head.next\n record[index/2].next = None\n for i in range((index-1)/2):\n temp = record[i].next\n record[i].next = record[index-1-i]\n record[index-1-i].next = temp\n return record[0]\n\nif __name__ == '__main__':\n head = ListNode(1)\n head.next = ListNode(2)\n #head.next.next = ListNode(3)\n #head.next.next.next = ListNode(4)\n #head.next.next.next.next = ListNode(5)\n \n test = Solution() \n out = test.reorderList(head) \n while out != None:\n print out.val\n out = out.next \n"
},
{
"alpha_fraction": 0.3790476322174072,
"alphanum_fraction": 0.48571428656578064,
"avg_line_length": 30.420000076293945,
"blob_id": "ffb1e13c0496d3050bd26f95e173b8a678e32e8a",
"content_id": "28e0a3bea3b404eae2e756c42f4a9f8dd6a3ca77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1575,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 50,
"path": "/decodeways.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# A message containing letters from A-Z is being encoded to numbers using the following mapping:\n# 'A' -> 1\n# 'B' -> 2\n# ...\n# 'Z' -> 26\n\n# Given an encoded message containing digits, \n# determine the total number of ways to decode it.\n\n# For example,\n# Given encoded message \"12\", it could be decoded as \"AB\" (1 2) or \"L\" (12).\n# The number of ways decoding \"12\" is 2.\n\nclass Solution:\n # @param s, a string\n # @return an integer\n \n def numDecodings(self, s):\n#-----------------initialization----------------\n if s == '' or s[0] == '0': return 0 \n l = len(s)\n ways0, ways1 = 1, 1\n digit = lambda x: ord(x) - ord('0')\n \n if l == 1: return ways0\n \n num = digit(s[0])*10 + digit(s[1])\n if num > 10 and num < 27 and num != 20: \n ways1 = 2\n elif num > 27 and num%10 == 0:\n ways1 = 0\n#-----------------dp----------------------------- \n for i in range(2, l):\n num = digit(s[i-1])*10 + digit(s[i])\n if num > 10 and num < 27 and num != 20:\n temp = ways0 + ways1 \n elif num == 10 or num == 20:\n temp = ways0\n elif num%10 != 0:\n temp = ways1 \n else: return 0 \n ways0, ways1 = ways1, temp\n \n return ways1\n \nif __name__ == '__main__':\n test = Solution()\n num = \"47575625458446174945557745813412115112968167865867877552577411785\\\n 99337186486723247528324612117156948\"\n out = test.numDecodings(num)\n "
},
{
"alpha_fraction": 0.5491241216659546,
"alphanum_fraction": 0.5681645274162292,
"avg_line_length": 31.725000381469727,
"blob_id": "77f55c5246c2bcb8c20152773114517769810cb5",
"content_id": "3221ccb9c96a4c8db38ec74668be28769b27a61e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1313,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 40,
"path": "/longestConsecutiveII.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an unsorted array of integers, \n# find the length of the longest consecutive elements sequence.\n\n# For example,\n# Given [100, 4, 200, 1, 3, 2],\n# The longest consecutive elements sequence is [1, 2, 3, 4]. Return its length: 4.\n\n# Your algorithm should run in O(n) complexity.\n\n# This version report Runtime Error with big data \nclass Solution:\n # @param num, a list of integer\n # @return an integer \n def longestConsecutive(self, num):\n if num == []: return 0\n hashmap = {}\n for i in range(len(num)): hashmap[num[i]] = i\n maxlen = 1\n \n for i in range(len(num)): \n if num[i] not in hashmap: continue \n self.length = 1\n del hashmap[num[i]]\n hashmap = self.findNext(num[i], hashmap, 1)\n hashmap = self.findNext(num[i], hashmap, -1)\n maxlen = max(maxlen, self.length)\n \n return maxlen\n \n def findNext(self, elem, hashmap, direction): \n if elem + direction in hashmap:\n del hashmap[elem + direction]\n self.findNext(elem + direction, hashmap, direction)\n self.length += 1\n return hashmap \n\nif __name__ == '__main__':\n num = [1, 2, 0 ,1]\n test = Solution()\n out = test.longestConsecutive(num)\n "
},
{
"alpha_fraction": 0.4934409558773041,
"alphanum_fraction": 0.5116044282913208,
"avg_line_length": 30.935483932495117,
"blob_id": "20e815f02f1f1300b2681d5394fb7fe1aca397a9",
"content_id": "4d06722120eb0cedfe65491724023486a143dc8e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 991,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 31,
"path": "/3sumClosest.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an array S of n integers, \n# find three integers in S such that the sum is closest to a given number, target. \n# Return the sum of the three integers. \n# You may assume that each input would have exactly one solution.\n\nclass Solution:\n # @return an integer\n def threeSumClosest(self, num, target):\n if num == []: return\n num.sort()\n lnum = len(num)\n minres = float('inf')\n out = None\n\n for i in range(lnum):\n res = target - num[i]\n left, right = i+1, lnum-1\n while left < right:\n res2 = res - num[left] - num[right] \n if res2 > 0: left += 1\n if res2 == 0: return target\n if res2 < 0: right -= 1\n if abs(res2) < abs(minres):\n minres = res2\n out = target - res2 \n return out\n\nif __name__ == '__main__':\n test = Solution()\n out = test.threeSumClosest([0,0,0],1)\n print out\n\n"
},
{
"alpha_fraction": 0.5072084069252014,
"alphanum_fraction": 0.5124508738517761,
"avg_line_length": 28.384614944458008,
"blob_id": "fdfedd16042b0458057f6d5fe4fd01a1b079b462",
"content_id": "2c4ea6074c22af4fe58f8e80f5477cbd8bc0c932",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 763,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 26,
"path": "/isAnagram.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Given two strings s and t, write a function to determine if t is an anagram of s.\n\n#For example,\n#s = \"anagram\", t = \"nagaram\", return true.\n#s = \"rat\", t = \"car\", return false.\n\n#Note:\n#You may assume the string contains only lowercase alphabets.\n\nclass Solution:\n # @param {string} s\n # @param {string} t\n # @return {boolean}\n def isAnagram(self, s, t):\n if len(s) != len(t): return False\n s_dict, t_dict = {}, {}\n for i in range(len(s)):\n if s[i] not in s_dict:\n s_dict[s[i]] = 1\n else: s_dict[s[i]] += 1\n \n if t[i] not in t_dict:\n t_dict[t[i]] = 1\n else: t_dict[t[i]] += 1\n if s_dict == t_dict: return True\n else: return False"
},
{
"alpha_fraction": 0.48134326934814453,
"alphanum_fraction": 0.4987562298774719,
"avg_line_length": 32.85714340209961,
"blob_id": "68692a6390f125551bae565b66dd81f29f511977",
"content_id": "d833c513619709642d006462cb0f8ab5f4cdfeec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 804,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 21,
"path": "/largestNumber.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a list of non negative integers, arrange them such that they form the largest number.\n\n# For example, given [3, 30, 34, 5, 9], the largest formed number is 9534330.\n\n# Note: The result may be very large, so you need to return a string instead of an integer.\n\nclass Solution(object):\n def largestNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: str\n \"\"\"\n nums = map(lambda x: ','.join(x).split(','), nums)\n sortbylen = {}\n for i in nums:\n if len(i) in sortbylen:\n sortbylen[len(i)].append(i)\n else: sortbylen[len(i)] = [i]\n \n for key in sortbylen.keys():\n sortbylen[key].sort()\n \n \n \n \n \n \n \n \n \n "
},
{
"alpha_fraction": 0.4489194452762604,
"alphanum_fraction": 0.4670923352241516,
"avg_line_length": 34.71929931640625,
"blob_id": "d54043b1a352e5da0c387b3c1a5edfe74d6afd74",
"content_id": "ca7982ff5e6b11a997b320573d657a510d7286b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2036,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 57,
"path": "/SudokuSolver.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Write a program to solve a Sudoku puzzle by filling the empty cells.\n# Empty cells are indicated by the character '.'.\n# You may assume that there will be only one unique solution.\n\nclass Solution:\n # @param board, a 9x9 2D array\n # Solve the Sudoku by modifying the input board in-place.\n # Do not return any value.\n def solveSudoku(self, board):\n self.rows, self.cols = [{} for i in range(9)], [{} for i in range(9)]\n self.squares = [[{} for j in range(3)] for i in range(3)]\n for i in range(9):\n for j in range(9):\n if board[i][j] != '.': \n self.rows[i][board[i][j]] = [i,j]\n self.squares[i/3][j/3][board[i][j]] = [i,j]\n if board[j][j] != '.': \n self.cols[j][board[i][j]] = [j,i]\n self.solver(0, board)\n return board\n\n def solver(self, index, board):\n if index > 80: return\n row, col = index/9, index%9\n if board[row][col] != '.': \n self.solver(index+1, board)\n for val in range(9):\n if self.isValid(row, col, val):\n temp = board[row]\n board[row]= board[row][:col]+str(val)+board[row][col+1:]\n self.rows[row][str(val)] = [row,col]\n self.cols[col][str(val)] = [row, col]\n self.solver(index+1, board)\n del self.rows[row][str(val)]\n del self.cols[col][str(val)]\n board[row] = temp\n\n def isValid(self, row, col, val):\n if str(val) in self.rows[row] or str(val) in self.cols[col] or\\\n str(val) in self.squares[row][col]: \n return False\n else: return False\n\n\nif __name__ == '__main__':\n board = [\"..5.....6\",\\\n \"....14...\",\\\n \".........\",\\\n \".....92..\",\\\n \"5....2...\",\\\n \".......3.\",\\\n \"...54....\",\\\n \"3.....42.\",\\\n \"...27.6..\"]\n test = Solution()\n out = test.solveSudoku(board)\n print out\n"
},
{
"alpha_fraction": 0.5577639937400818,
"alphanum_fraction": 0.5726708173751831,
"avg_line_length": 22.705883026123047,
"blob_id": "8c592906a0667a36f61d5f3d82d70ba5d5e77596",
"content_id": "3e31957b1025e2cbbd13063728ae353f8883a522",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 805,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 34,
"path": "/isBalanced.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "class Treenode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n \ndef isBalanced(tree):\n if tree == None:\n return True\n \n if abs(treeDepth(tree.left) - treeDepth(tree.right)) <= 1:\n left = isBalanced(tree.left)\n right = isBalanced(tree.right)\n return left and right\n else: \n return False\n\ndef treeDepth(tree):\n if tree == None:\n return 0\n lmax = treeDepth(tree.left)\n rmax = treeDepth(tree.right)\n \n return max(lmax, rmax) + 1\n \nif __name__ == '__main__':\n tree = Treenode(10)\n tree.left = Treenode(9)\n tree.left.left = Treenode(8)\n tree.right = Treenode(12)\n tree.right.right = Treenode(30)\n tree.right.right.right = Treenode(0)\n\nprint isBalanced(tree)"
},
{
"alpha_fraction": 0.5853333473205566,
"alphanum_fraction": 0.6006666421890259,
"avg_line_length": 34.619049072265625,
"blob_id": "d1d0ce37ee690131a037e4e0c5b743ba94255e34",
"content_id": "205d7dc4b8576f63c02a69462f9a718265aaf3b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1502,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 42,
"path": "/combinationSum.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Given a set of candidate numbers (C) and a target number (T), \n# find all unique combinations in C where the candidate numbers sums to T.\n\n# The same repeated number may be chosen from C unlimited number of times.\n\n# Note:\n# All numbers (including target) will be positive integers.\n# Elements in a combination (a1, a2, … , ak) must be in non-descending order.\n# The solution set must not contain duplicate combinations.\n# For example, given candidate set 2,3,6,7 and target 7, \n# A solution set is: [7] [2, 2, 3] \n\nclass Solution:\n # @param candidates, a list of integers\n # @param target, integer\n # @return a list of lists of integers\n def combinationSum(self, candidates, target):\n candidates.sort()\n self.collection = []\n temp = []\n self.dfs(candidates, target, 0, 0, temp) \n return self.collection\n \n def dfs(self, candidates, target, now, start, temp):\n if now == target: \n # Attention: do NOT directly append temp, it will change!\n self.collection.append(temp[:]) \n return\n if now > target: return\n \n for i in range(start, len(candidates)): \n temp.append(candidates[i])\n self.dfs(candidates, target, now + candidates[i], i, temp)\n temp.pop() # hmmmm... smart!\n \n \nif __name__ == '__main__':\n cand = [2,3,6,7]\n tar = 7\n test = Solution()\n out = test.combinationSum([2,3,5], 8) "
},
{
"alpha_fraction": 0.5669565200805664,
"alphanum_fraction": 0.5852174162864685,
"avg_line_length": 25.159090042114258,
"blob_id": "69305e9660d6fb405a7299836e01c4e7c89a6b1a",
"content_id": "5c53a638cdfffce51d23d741609e7383bbcedd67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1150,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 44,
"path": "/symmetricTree.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a binary tree, \n# check whether it is a mirror of itself (ie, symmetric around its center).\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n \n# root.left = root.right\n# root.left.left = root.right.right\n# root.left.right = root.right.left \ndef isSymmetric(root):\n if root == None:\n return True\n return compare(root.left, root.right)\n \ndef compare(node1, node2):\n if node1 == None and node2 == None:\n return True\n \n if node1 == None and node2 != None or\\\n node2 == None and node1 != None:\n return False\n \n left, right = False, False\n if node1.val == node2.val:\n left = compare(node1.left, node2.right)\n right = compare(node1.right, node2.left)\n else: \n return False\n \n return left and right\n \nif __name__ == '__main__':\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(2)\n root.left.left = TreeNode(3)\n #root.left.right = TreeNode(4)\n root.right.right = TreeNode(3)\n #root.right.left = TreeNode(4)\n \n out = isSymmetric(root)"
},
{
"alpha_fraction": 0.516853928565979,
"alphanum_fraction": 0.5350043177604675,
"avg_line_length": 24.622222900390625,
"blob_id": "84cd53c9a0d6397b18c412bee328640fb1edd716",
"content_id": "c292d6b1445eb3673f0172a41c11b344bc233d52",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1157,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 45,
"path": "/rotateList.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a list, rotate the list to the right by k places, where k is non-negative.\n\n# For example:\n# Given 1->2->3->4->5->NULL and k = 2,\n# return 4->5->1->2->3->NULL.\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @param k, an integer\n # @return a ListNode\n def rotateRight(self, head, k):\n if head == None: return\n new, old = head, head\n count = 1\n while head.next != None:\n head = head.next\n count += 1\n\n k = k%count\n if k == 0: return old\n head.next = old\n for i in range(count-k):\n new = new.next\n if i == count-k-1: old.next = None\n else: old = old.next\n return new\n \nif __name__ == '__main__':\n head = ListNode(1)\n head.next = ListNode(2)\n head.next.next = ListNode(3)\n# head.next.next.next = ListNode(4)\n# head.next.next.next.next = ListNode(5)\n\n test = Solution()\n out = test.rotateRight(head, 1)\n while out != None:\n print out.val\n out = out.next\n \n"
},
{
"alpha_fraction": 0.3956044018268585,
"alphanum_fraction": 0.43014129996299744,
"avg_line_length": 31.947368621826172,
"blob_id": "7b3ccde8a5265cc93abf2992973e1608b0130063",
"content_id": "363d01e6fbdc83204800ab70549adbc9c3b932e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1274,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 38,
"path": "/4sum.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an array S of n integers, \n# are there elements a, b, c, and d in S such that a + b + c + d = target? \n# Find all unique quadruplets in the array which gives the sum of target.\n# For example, given array S = {1 0 -1 0 -2 2}, and target = 0.\n# A solution set is:\n# (-1, 0, 0, 1)\n# (-2, -1, 1, 2)\n# (-2, 0, 0, 2)\n\n# exceed time limit on large data set\nclass Solution:\n # @return a list of lists of length 4, [[val1,val2,val3,val4]]\n def fourSum(self, num, target):\n num.sort()\n l = len(num)\n out = set()\n \n for i in range(l-3):\n if i > 0 and num[i] == num[i-1]: continue\n for j in range(i+1, l-2):\n if j > 0 and num[j] == num[j-1]: continue\n m, n = 1, 1 \n while j+m < l-n:\n sumi = (num[i], num[j], num[j+m], num[l-n])\n if sum(sumi) == target:\n out.add(sumi)\n m += 1\n n += 1\n elif sum(sumi) > target:\n n += 1\n else: m += 1\n return list(out)\n\nif __name__ == '__main__':\n s = [1, 0, -1, 0, -2, 2]\n test = Solution()\n out = test.fourSum(s, 0)\n print out \n \n\n"
},
{
"alpha_fraction": 0.2851985692977905,
"alphanum_fraction": 0.3249097466468811,
"avg_line_length": 12.899999618530273,
"blob_id": "c6989f01de97fbdc26fb84562e87e64521b8cf54",
"content_id": "f333bc2f9bf062008d6d37b842cea88e9f701e1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 20,
"path": "/pow.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "def pow(x, n):\n x0 = x\n if n == 0:\n return 1\n \n if n == 1:\n return x\n \n i = 1\n while 2*i < n:\n x *= x\n i *= 2\n print x, i\n \n x *= pow(x0, n - i)\n \n return x\n\nif __name__ == '__main__':\n print pow(2, 11)"
},
{
"alpha_fraction": 0.4343434274196625,
"alphanum_fraction": 0.44107744097709656,
"avg_line_length": 20.285715103149414,
"blob_id": "5ed08dfe8661e9ed9e08e06b55272d9b67aac790",
"content_id": "e76fea9738c68e323118fc3d574d59d39d8c5ce7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 297,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 14,
"path": "/wordCount.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "def wordCount(s):\n dictionary = {}\n \n for i in s:\n if dictionary.get(i) == None:\n dictionary[i] = 1\n \n else: dictionary[i] += 1\n \n return dictionary\n \nif __name__ == '__main__':\n s = ['abd', 'c', 'pp','abc', 'c','pp','pp']\n cc = wordCount(s)"
},
{
"alpha_fraction": 0.5061403512954712,
"alphanum_fraction": 0.5184210538864136,
"avg_line_length": 23.276596069335938,
"blob_id": "93a8e1f4572cb10168615433b72e570c4bb4fb3e",
"content_id": "5035cdad86b8487af25e4c3953b2714459f9199e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1140,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 47,
"path": "/strStr.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Implement strStr().\n# Returns a pointer to the first occurrence of needle in haystack, \n# or null if needle is not part of haystack.\n\ndef strStr(haystack, needle):\n hlen = len(haystack)\n nlen = len(needle)\n i = 0\n\n while i <= hlen - nlen:\n overlapped = compare(haystack[i:i + nlen], needle, nlen)\n if overlapped == nlen:\n return haystack[i:]\n elif overlapped == 0:\n i += 1\n else: \n partial = matchTable(haystack[i:i+overlapped])\n i += overlapped - partial \n \n return None\n\ndef matchTable(s):\n l = len(s)\n if l <= 1: return 0 \n maxlen = 0\n \n for i in range(1,l):\n if s[0:i] == s[l-i:l]:\n maxlen = i\n \n return maxlen\n\n# assume two strings have same lengthout\ndef compare(s1, s2, length): \n count = 0\n for i in range(length):\n if s1[i] == s2[i]:\n count += 1\n else:\n break\n return count\n \nif __name__ == '__main__':\n haystack = 'BBC ABCDAB ABCDABCDABDE'\n needle = 'ABCDABE'\n \n out = strStr('mississippi', 'issip')"
},
{
"alpha_fraction": 0.5797872543334961,
"alphanum_fraction": 0.6223404407501221,
"avg_line_length": 28.736841201782227,
"blob_id": "2f1245dce02865ab97e30c67bf662658d4fe081a",
"content_id": "6c185284b9d97cf796ce87f349f781d05861b3b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 564,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 19,
"path": "/multiplyStr.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given two numbers represented as strings, \n# return multiplication of the numbers as a string.\n\n# Note: The numbers can be arbitrarily large and are non-negative.\n\nclass Solution:\n # @param num1, a string\n # @param num2, a string\n # @return a string\n def multiply(self, num1, num2):\n n1 = int(num1) if num1.isdigit() else float(num1)\n n2 = int(num2) if num2.isdigit() else float(num2)\n return str(n1*n2)\n \nif __name__ == '__main__':\n num1 = '45.4'\n num2 = '42.3'\n test = Solution()\n out = test.multiply(num1, num2)"
},
{
"alpha_fraction": 0.42105263471603394,
"alphanum_fraction": 0.4412400722503662,
"avg_line_length": 32.82926940917969,
"blob_id": "90833b854f906a3736f53e7da2ecc817d2071485",
"content_id": "4b2ff0b79215f6a9c9a97660d4d6a95e44d952ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1387,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 41,
"path": "/validSudoku.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Determine if a Sudoku is valid, according to: Sudoku Puzzles - The Rules.\n\n# The Sudoku board could be partially filled, where empty cells are filled with the character '.'.\n\n# Note:\n# A valid Sudoku board (partially filled) is not necessarily solvable. \n# Only the filled cells need to be validated.\n\nclass Solution:\n # @param board, a 9x9 2D array\n # @return a boolean\n def isValidSudoku(self, board):\n square = [[{} for j in range(3)] for i in range(3)]\n for i in range(9):\n row, col = {}, {}\n for j in range(9):\n if board[i][j] != '.':\n if board[i][j] not in row: row[board[i][j]] = [i,j]\n else: return False\n if board[i][j] not in square[i/3][j/3]:\n square[i/3][j/3][board[i][j]] = [i,j]\n else: return False\n if board[j][i] != '.':\n if board[j][i] not in col: col[board[j][i]] = [j,i]\n else: return False\n return True\n\nif __name__ == '__main__':\n board = [\"..5.....6\",\\\n \"....14...\",\\\n \".........\",\\\n \".....92..\",\\\n \"5....2...\",\\\n \".......3.\",\\\n \"...54....\",\\\n \"3.....42.\",\\\n \"...27.6..\"]\n \n test = Solution()\n out = test.isValidSudoku(board)\n print out\n"
},
{
"alpha_fraction": 0.6172680258750916,
"alphanum_fraction": 0.6172680258750916,
"avg_line_length": 28.865385055541992,
"blob_id": "a7f9510f8a202e2108f97044d9199bf5ff41fb2d",
"content_id": "b2ae321fa126cb345926d62f67eb8c20d79f4136",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1552,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 52,
"path": "/WordDictionary.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "#Design a data structure that supports the following two operations:\n\n#void addWord(word)\n#bool search(word)\n\n#search(word) can search a literal word or a regular expression string containing only letters a-z or .. A . means it can represent any one letter.\n\n#For example:\n\n#addWord(\"bad\")\n#addWord(\"dad\")\n#addWord(\"mad\")\n#search(\"pad\") -> false\n#search(\"bad\") -> true\n#search(\".ad\") -> true\n#search(\"b..\") -> true\n\nclass WordDictionary:\n # initialize your data structure here.\n def __init__(self):\n self.lt = dict()\n\n # @param {string} word\n # @return {void}\n # Adds a word into the data structure.\n def addWord(self, word):\n l = len(word)\n if l not in self.lt:\n self.lt[l] = set([word])\n else: self.lt[l].add(word)\n\n # @param {string} word\n # @return {boolean}\n # Returns if the word is in the data structure. A word could\n # contain the dot character '.' to represent any one letter.\n def search(self, word):\n l = len(word)\n if l not in self.lt: return False\n for tomatch in self.lt[l]:\n matched = self.match(tomatch, word)\n if matched == True: return True\n return False\n \n def match(self, tomatch, word):\n for i in range(len(word)):\n if word[i] != tomatch[i] and word[i] != '.': return False\n return True \n\n# Your WordDictionary object will be instantiated and called as such:\n# wordDictionary = WordDictionary()\n# wordDictionary.addWord(\"word\")\n# wordDictionary.search(\"pattern\")"
},
{
"alpha_fraction": 0.49805447459220886,
"alphanum_fraction": 0.5252918004989624,
"avg_line_length": 27.518518447875977,
"blob_id": "6269c118b785c226dfabf5a8ce4a5b88e4cb06f7",
"content_id": "f53139f9b8de197091a11d21d31e56f4456ff46c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 771,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 27,
"path": "/firstMissingPos.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given an unsorted integer array, find the first missing positive integer.\n# For example,\n# Given [1,2,0] return 3,\n# and [3,4,-1,1] return 2.\n\n# Your algorithm should run in O(n) time and uses constant space.\n\nclass Solution:\n # @param A, a list of integers\n # @return an integer\n def firstMissingPositive(self, A):\n l = len(A)\n for i in range(l):\n while A[i]-1 != i and -1 < A[i]-1 < l:\n loc = A[i]-1\n if A[loc] == A[i]: break\n A[loc], A[i] = A[i], A[loc] \n count = 0\n while count < l and A[count]-1 == count:\n count += 1\n return count+1\n\nif __name__ == '__main__':\n A = [3,4,-1,1]\n test = Solution()\n out = test.firstMissingPositive(A)\n print out\n\n"
},
{
"alpha_fraction": 0.45374879240989685,
"alphanum_fraction": 0.4917234778404236,
"avg_line_length": 24.04878044128418,
"blob_id": "14f466637ca386d53dd4ef382221b42f7bc7baa1",
"content_id": "56e14298a8d76ca90d1e19d629d12311a8ae70cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1027,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 41,
"path": "/permutationSeq.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# The set [1,2,3,...,n] contains a total of n! unique permutations.\n\n# By listing and labeling all of the permutations in order,\n# We get the following sequence (ie, for n = 3):\n\n# \"123\"\n# \"132\"\n# \"213\"\n# \"231\"\n# \"312\"\n# \"321\"\n# Given n and k, return the kth permutation sequence.\n\n# Note: Given n will be between 1 and 9 inclusive.\n\nclass Solution:\n # @return a string\n def getPermutation(self, n, k):\n if n == 1 and k == 1: return '1'\n out = ''\n tol = 1\n for i in range(1, n): tol *= i\n num = range(1, n+1)\n \n for j in range(n-1, 0, -1):\n factor = k/tol\n res = k%tol\n if res == 0:\n out += str(num.pop(factor-1))\n for i in num[::-1]: out += str(i)\n return out\n out += str(num[factor])\n num.pop(factor)\n if num == []: return out\n tol /= j\n k = res\n\nif __name__ == '__main__':\n test = Solution()\n out = test.getPermutation(3,1)\n print out\n"
},
{
"alpha_fraction": 0.40835267305374146,
"alphanum_fraction": 0.4470224380493164,
"avg_line_length": 34.91666793823242,
"blob_id": "339dbd75bf8d98321e7054626fc9171947d7f575",
"content_id": "a86eff3d1840b52ed0349298f835ac164dbe42b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1293,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 36,
"path": "/maxRectangle.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a 2D binary matrix filled with 0's and 1's, \n# find the largest rectangle containing all ones and return its area.\n\nclass Solution:\n # @param matrix, a list of lists of 1 length string\n # @return an integer\n def maximalRectangle(self, matrix):\n if matrix == []: return 0\n m = len(matrix)\n n = len(matrix[0])\n height = [[0]*(n+1) for i in range(m)]\n maxarea, area = 0, 0\n index = []\n for i in range(m):\n matrix[i].append('0')\n index = []\n for j in range(n+1):\n if matrix[i][j] == '1':\n height[i][j] = 1+height[i-1][j] if i>0 else 1\n while index != [] and height[i][j] < height[i][index[-1]]:\n curr = index.pop()\n area = height[i][curr]*(j-index[-1]-1) if index!=[] else height[i][curr]*j\n maxarea = max(maxarea, area)\n index.append(j)\n return maxarea\n\nif __name__ == '__main__':\n matrix = [['0','1','1','0','1'],\n ['1','1','0','1','0'],\n ['0','1','1','1','0'],\n ['1','1','1','1','0'],\n ['1','1','1','1','1'],\n ['0','0','0','0','0']]\n test = Solution()\n out = test.maximalRectangle(matrix)\n print out\n"
},
{
"alpha_fraction": 0.5651697516441345,
"alphanum_fraction": 0.5772179365158081,
"avg_line_length": 31.64285659790039,
"blob_id": "9f6888404f2a205d9e8186bdf5e6588770808f99",
"content_id": "71f888d145cc16880739f3af5320391798fbe2f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 913,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 28,
"path": "/divide2int.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Divide two integers without using multiplication, division and mod operator.\n\nclass Solution:\n # @return an integer\n def divide(self, dividend, divisor):\n dividend_sign = 1 if dividend > 0 else -1\n divisor_sign = 1 if divisor > 0 else -1\n dividend_abs = abs(dividend)\n divisor_abs = abs(divisor)\n \n factor = self.binarySearch(dividend_abs, divisor_abs) \n return factor if dividend_sign == divisor_sign else 0-factor\n \n def binarySearch(self, dividend, divisor): \n if divisor > dividend: return 0 \n tol = divisor\n factor = 1\n while tol < dividend - tol:\n tol += tol\n factor += factor\n \n overhead = dividend - tol\n factor += self.binarySearch(overhead, divisor) \n return factor\n \nif __name__ == '__main__':\n test = Solution()\n out = test.divide(0, 3)"
},
{
"alpha_fraction": 0.4291338622570038,
"alphanum_fraction": 0.4488188922405243,
"avg_line_length": 23.612903594970703,
"blob_id": "1660e1002b1ef549f2a8c00d3553e5cf0fcf4339",
"content_id": "3d1d23f793908bb58c97791ee91f365f7df2588e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 762,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 31,
"path": "/setZero.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a m x n matrix, \n# if an element is 0, set its entire row and column to 0. \n# Do it in place.\n\n# @param matrix, a list of lists of integers\n# RETURN NOTHING, MODIFY matrix IN PLACE.\n\ndef setZeros(matrix):\n m = len(matrix)\n n = len(matrix[0])\n \n rowzero = set()\n colzero = set()\n \n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n rowzero.add(i)\n colzero.add(j)\n \n for j in range(n):\n for i in range(m):\n if i in rowzero:\n matrix[i] = [0]*n\n if j in colzero: \n matrix[i][j] = 0\n \n\nif __name__ == '__main__':\n B = [[1,2,0],[3,4,0],[6,7,8]]\n setZeros(B)"
},
{
"alpha_fraction": 0.48917749524116516,
"alphanum_fraction": 0.49696969985961914,
"avg_line_length": 26.071428298950195,
"blob_id": "1fe739d206c1647dc36b1edf1c8976972d42c503",
"content_id": "3b75494be565213d4f8b3a09f628ec94584fe18f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1155,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 42,
"path": "/partitionPalindrome.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a string s, \n# partition s such that every substring of the partition is a palindrome.\n# Return all possible palindrome partitioning of s.\n\n# For example, given s = \"aab\",\n# Return\n# [[\"aa\",\"b\"],[\"a\",\"a\",\"b\"]]\n\n# palindromes which end at position i start at walls[j] (a list of start positions)\n# all partitions end BEFORE j: par(j)\n# for k in walls[j]:\n# par(j) = par(k).append(s[k:j])\n\ndef partition(s):\n par = [[[]]]\n for i in range(1, len(s)+1):\n temp = []\n for j in range(0, i): \n if palindrome(s[j:i]): \n temp += addOnePalindrome(par[j], s[j:i])\n par.append(temp) \n return par[-1]\n\ndef addOnePalindrome(par, addon):\n new = []\n for i in range(len(par)):\n new.append(par[i] + [addon])\n return new\n \ndef palindrome(sub):\n start = 0\n end = len(sub) - 1\n while sub[start] == sub[end]:\n if end - start <= 1:\n return True\n start += 1\n end -= 1\n return False\n \nif __name__ == '__main__':\n s = 'aab'\n out = partition(s)\n \n \n "
},
{
"alpha_fraction": 0.537172794342041,
"alphanum_fraction": 0.5528795719146729,
"avg_line_length": 28.80645179748535,
"blob_id": "9b07647e13150767b5c7f0bcd2de9c77b01dc89a",
"content_id": "8841ec0ff2e3219f6c27e6c3cfbc4e461a1ecf64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 955,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 31,
"path": "/validBST.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# leecode 98, validate binary search tree\nclass Treenode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a boolean\n def isValidBST(self, tree, small, large):\n if tree == None: return True\n \n if tree.val > small and tree.val < large:\n left = self.isValidBST(tree.left, small, tree.val)\n right = self.isValidBST(tree.right, tree.val, large)\n return (left and right)\n \n else: return False\n \nif __name__ == '__main__':\n tree = Treenode(10)\n tree.left = Treenode(9)\n tree.right = Treenode(11)\n tree.left.left = Treenode(8)\n tree.left.right = Treenode(9.5)\n tree.right.right = Treenode(12)\n tree.right.left = Treenode(10.5)\n \n Out = Solution()\n print Out.isValidBST(tree, -float('inf'), float('inf'))\n \n \n \n"
},
{
"alpha_fraction": 0.550000011920929,
"alphanum_fraction": 0.5833333134651184,
"avg_line_length": 30.78125,
"blob_id": "cc56b4f6af2ed164b90154d9c54fddc346795a97",
"content_id": "45d82e3263c1069627d703a410ba58a606f7ead7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1020,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 32,
"path": "/triangle.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a triangle, find the minimum path sum from top to bottom. \n# Each step you may move to adjacent numbers on the row below.\n\n# For example, given the following triangle\n# [ [2],\n# [3,4],\n# [6,5,7],\n# [4,1,8,3]]\n\n# The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11).\n# Note:\n# Bonus point if you are able to do this using only O(n) extra space, \n# where n is the total number of rows in the triangle.\n\nclass Solution:\n # @param triangle, a list of lists of integers\n # @return an integer\n def minimumTotal(self, triangle):\n if triangle == []: return 0\n minsum = triangle[-1]\n for i in range(len(triangle)-2, -1, -1):\n temp = []\n for j in range(len(triangle[i])):\n temp.append(triangle[i][j]+min(minsum[j],minsum[j+1]))\n minsum = temp\n return min(minsum)\n\nif __name__ == '__main__':\n triangle = [[2],[3,4],[6,5,7],[4,1,8,3]]\n test = Solution()\n out = test.minimumTotal(triangle)\n print out\n\n\n\n"
},
{
"alpha_fraction": 0.5727788209915161,
"alphanum_fraction": 0.5746691823005676,
"avg_line_length": 25.399999618530273,
"blob_id": "36895d6a89bef8f3c10f31a082427931252856f7",
"content_id": "6c438acc5032a6255a8cde9b257c5e4178b0b357",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 529,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 20,
"path": "/linkedListCycle.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a linked list, determine if it has a cycle in it.\n# Follow up:\n# Can you solve it without using extra space?\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @return a boolean\n def hasCycle(self, head):\n record = {}\n while head != None:\n record[head] = 0\n if head.next in record: return True\n head = head.next \n return False\n\n"
},
{
"alpha_fraction": 0.5688889026641846,
"alphanum_fraction": 0.5844444632530212,
"avg_line_length": 25.13725471496582,
"blob_id": "bfb56ee7e47ba7ed5373d718553a3eebf8708d85",
"content_id": "ee41e32bb1f4c8cf5002deccc38147e0b2ade0d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1350,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 51,
"path": "/connectTree.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a binary tree\n# Populate each next pointer to point to its next right node. \n# If there is no next right node, the next pointer should be set to NULL.\n# Initially, all next pointers are set to NULL.\n\n# Note:\n# You may only use constant extra space.\n# You may assume that it is a perfect binary tree \n# (ie, all leaves are at the same level, and every parent has two children).\n\n# For example,\n# Given the following perfect binary tree,\n# 1\n# / \\\n# 2 3\n# / \\ / \\\n# 4 5 6 7\n# After calling your function, the tree should look like:\n# 1 -> NULL\n# / \\\n# 2 -> 3 -> NULL\n# / \\ / \\\n# 4->5->6->7 -> NULL\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n self.next = None\n\ndef connect(root):\n if root == None or root.left == None:\n return\n root.left.next = root.right\n if root.next != None:\n root.right.next = root.next.left\n connect(root.left)\n connect(root.right)\n \nif __name__ == '__main__':\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.left.left = TreeNode(4)\n root.left.right = TreeNode(5)\n root.right = TreeNode(3)\n root.right.left = TreeNode(6)\n root.right.right =TreeNode(7)\n\n connect(root) \n \n \n\n "
},
{
"alpha_fraction": 0.40185675024986267,
"alphanum_fraction": 0.4204244017601013,
"avg_line_length": 31.904762268066406,
"blob_id": "22163ca8cbd8aa2e978963894208d4a6fd5cc7e4",
"content_id": "aee2a23ca89e556e9353598e2bf452aea2493d6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 754,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 21,
"path": "/letterCombinations.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a digit string, \n# return all possible letter combinations that the number could represent.\n\nclass Solution:\n # @return a list of strings, [s1, s2]\n def letterCombinations(self, digits):\n mapper = {'0':' ', '1':'', '2':'abc', '3':'def', '4':'ghi', '5':'jkl',\\\n '6':'mno', '7':'pqrs', '8':'tuv', '9':'wxyz'} \n comb = ['']\n for i in digits:\n temp = []\n for j in comb:\n for k in mapper[i]:\n temp.append(j + k)\n if temp != []: comb = temp\n \n return comb\n\nif __name__ == '__main__':\n test = Solution()\n out = test.letterCombinations('23')\n \n \n \n "
},
{
"alpha_fraction": 0.4913468658924103,
"alphanum_fraction": 0.4988713264465332,
"avg_line_length": 27.276596069335938,
"blob_id": "757273e16ad58272f92a42665812447ebd10f60d",
"content_id": "2c66a64046f4414e1c844f21e30a33bd344c11fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1329,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 47,
"path": "/wildcardMatch_v3.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Implement wildcard pattern matching with support for '?' and '*'.\n\n# '?' Matches any single character.\n# '*' Matches any sequence of characters (including the empty sequence).\n\n# The matching should cover the entire input string (not partial).\n\n# The function prototype should be:\n# bool isMatch(const char *s, const char *p)\n\n# Some examples:\n# isMatch(\"aa\",\"a\") -> false\n# isMatch(\"aa\",\"aa\") -> true\n# isMatch(\"aaa\",\"aa\") -> false\n# isMatch(\"aa\", \"*\") -> true\n# isMatch(\"aa\", \"a*\") -> true\n# isMatch(\"ab\", \"?*\") -> true\n# isMatch(\"aab\", \"c*a*b\") -> false\n\n# Finally got it !\nclass Solution:\n # @param s, an input string\n # @param p, a pattern string\n # @return a boolean\n def isMatch(self, s, p):\n ls, lp = len(s), len(p)\n i, j = 0, 0\n star = []\n while i < ls:\n if j<lp and (p[j] == s[i] or p[j] == '?'): \n i, j = i+1, j+1\n elif j<lp and p[j] == '*':\n star = [i, j]\n j += 1\n elif star != []: \n star[0] += 1\n i, j = star[0], star[1]+1 \n else: return False\n\n if i == ls and j == lp: return True\n elif i == ls: return p[j:] == '*'*(lp-j)\n else: return False\n\nif __name__ == '__main__':\n test = Solution()\n out = test.isMatch('hi','*?')\n print out\n"
},
{
"alpha_fraction": 0.49033817648887634,
"alphanum_fraction": 0.5398550629615784,
"avg_line_length": 29.66666603088379,
"blob_id": "9c6e1ab4a7ec164ede9dd2aa515f6a517e697557",
"content_id": "598d9776ffb9ef9e5ace34ccaa6055b3316c9215",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 828,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 27,
"path": "/trapRain.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given n non-negative integers representing an elevation map where the width of each bar is 1, # compute how much water it is able to trap after raining.\n\n# For example, \n# Given [0,1,0,2,1,0,1,3,2,1,2,1], return 6.\n\nclass Solution:\n # @param A, a list of integers\n # @return an integer\n def trap(self, A):\n l = len(A)\n if l < 3: return 0\n leftMax, rightMax, area = A[0], A[l-1] , 0\n right = []\n for j in range(1, l-1):\n rightMax = max(rightMax, A[l-1-j])\n right.append(rightMax)\n for i in range(1, l-1):\n leftMax = max(leftMax, A[i-1])\n area += max(0, min(leftMax, right[-i])-A[i])\n return area \n\nif __name__ == '__main__':\n A = [0,1,0,2,1,0,1,3,2,1,2,1] \n B = [2,0,2]\n test = Solution()\n out = test.trap(B)\n print out\n"
},
{
"alpha_fraction": 0.4026622176170349,
"alphanum_fraction": 0.4625623822212219,
"avg_line_length": 26.976743698120117,
"blob_id": "7cc4f00538a67d13446e6f26f4d09948e6baac4f",
"content_id": "b73d9d8ea1ec6c8061c5f14be6143b03ba2a3f66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1202,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 43,
"path": "/restoreIP.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a string containing only digits, \n# restore it by returning all possible valid IP address combinations.\n\n# For example:\n# Given \"25525511135\",\n\n# return [\"255.255.11.135\", \"255.255.111.35\"]. (Order does not matter)\n\nclass Solution:\n # @param s, a string\n # @return a list of strings\n def restoreIpAddresses(self, s):\n self.ans = []\n temp = ''\n self.dfs(s, temp)\n return self.ans\n \n def dfs(self, s, temp): \n sep = ''\n if temp != '': sep = temp[:-1].split('.')\n if len(sep) > 4: return\n for x in sep: \n if int(x) > 255: return\n if s == '':\n if len(sep) == 4: self.ans.append(temp[:-1])\n return\n \n temp += s[0] +'.'\n self.dfs(s[1:], temp)\n temp = temp[:-2]\n if len(s) > 1 and s[0] != '0':\n temp += s[:2] + '.'\n self.dfs(s[2:], temp)\n temp = temp[:-3]\n if len(s) > 2 and s[0] != '0':\n temp += s[:3] + '.'\n self.dfs(s[3:], temp)\n temp = temp[:-4]\n \nif __name__ == '__main__':\n s = '25525511135'\n test = Solution()\n out = test.restoreIpAddresses(\"010010\")"
},
{
"alpha_fraction": 0.30882352590560913,
"alphanum_fraction": 0.34558823704719543,
"avg_line_length": 21.70833396911621,
"blob_id": "4920804605a2075f01570841d1e24520ec1ee48a",
"content_id": "ca1d72f86e570c6ff2192f0fbe59866824df2fe5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 544,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 24,
"path": "/threesum.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# 3 sum\ndef threeSum(num):\n num.sort()\n l = len(num)\n out = []\n \n for i in range(l-2):\n j, k = 1, 1\n if i > 0 and num[i] == num[i-1]:\n continue\n while i+j < l-k:\n if num[i] + num[i+j] + num[l-k] == 0:\n out.append([num[i], num[i+j], num[l-k]])\n j += 1\n k += 1\n elif num[i] + num[i+j] + num[l-k] > 0:\n k += 1\n else: \n j += 1\n \n return out\n \nA = [0, 1, -1, 2, 3, -2, 4, 2]\nprint threeSum(A)"
},
{
"alpha_fraction": 0.5011467933654785,
"alphanum_fraction": 0.5057339668273926,
"avg_line_length": 25.78125,
"blob_id": "deb239bb25b81d84e852a06fb1585d2d60f812d4",
"content_id": "73557b77c9ee0d2aafd082e96223b9612aeba853",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 872,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 32,
"path": "/isPalindrome.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Given a singly linked list, determine if it is a palindrome.\n\n# Follow up:\n# Could you do it in O(n) time and O(1) space?\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def isPalindrome(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n if head == None: return True\n new = ListNode(0)\n new.next = head\n \n record = []\n while head != None:\n record.append(head.val)\n head = head.next\n \n newhead = new.next\n while newhead != None:\n if len(record) <=1: return True\n elif newhead.val != record[-1]: return False\n else:\n record.pop()\n newhead = newhead.next\n "
},
{
"alpha_fraction": 0.42081448435783386,
"alphanum_fraction": 0.4819004535675049,
"avg_line_length": 18.954545974731445,
"blob_id": "1bee72c3dab7bab8f40c6a0cc4a1ce777ed5c5dc",
"content_id": "a02ad0b53285794d4589c549274d2d55fe40f828",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 442,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 22,
"path": "/reverseInteger.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Reverse digits of an integer.\n\n# Example1: x = 123, return 321\n# Example2: x = -123, return -321\ndef reverse(x):\n digit = len(str(abs(x)))\n if digit == 1:\n return x\n new = 0\n xabs = abs(x)\n sign = xabs/x\n \n \n for d in range(digit-1, -1, -1):\n new += xabs//(10**d) * 10**(digit-1-d)\n xabs = xabs%(10**d)\n \n return sign*new\n \nif __name__ == '__main__':\n x = 0\n out = reverse(x)\n\n\n\n"
},
{
"alpha_fraction": 0.3739837408065796,
"alphanum_fraction": 0.37886178493499756,
"avg_line_length": 24.45833396911621,
"blob_id": "a37db5e967cd4ceece40d168cfd561f024052eb5",
"content_id": "437ff2f040ebac40d5e3a47833c11a01a8e6cdd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 615,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 24,
"path": "/validParentheses.py",
"repo_name": "ag300g/leecode",
"src_encoding": "UTF-8",
"text": "# Valid Parentheses\n\nclass Solution:\n # @return a boolean\n def isValid(self, s): \n pair = {'(':')', '[':']', '{':'}'}\n \n stk = []\n for c in s:\n # if c is left side, apptend to stk\n if pair.get(c) != None:\n stk.append(c)\n\n elif len(stk) == 0 or pair.get(stk[-1]) != c:\n return False\n else:\n stk.pop()\n \n return True if len(stk) == 0 else False \n \nif __name__ == '__main__':\n s = '[[{}]]{[()()'\n out = Solution()\n print out.isValid(s)\n "
}
] | 196 |
andersonmoura87/aulas_python | https://github.com/andersonmoura87/aulas_python | eb53e8eeef49d6e2efdd282a390ec2ab8d41747a | 3504cb7e38c5761905e03d6a628502a7486bd766 | d0f48fdbe1edd3104984577bec8b6316cb433b9d | refs/heads/main | 2023-07-28T11:54:20.618423 | 2021-09-08T13:32:02 | 2021-09-08T13:32:02 | 404,352,143 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5040000081062317,
"alphanum_fraction": 0.5640000104904175,
"avg_line_length": 15.666666984558105,
"blob_id": "9b0e45e0f1b67f848128832c5696dae1889240ac",
"content_id": "7d5151257c082cdeab8af6bcb8e4cb59387505c9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 251,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 15,
"path": "/Testes/Teste3.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "#Tabuada\n\nnum = int(input(\"Qual tabudada você quer saber? \"))\nprint(\"-\"*15)\nprint(\"TABUADA\")\nprint(\"-\"*15)\n\nnum1 = 0\n\nwhile num1 < 10:\n num1 += 1\n resultado = num * num1\n print(\"{} X {} = {}\".format(num, num1, resultado))\n \nprint(\"-\"*15)\n"
},
{
"alpha_fraction": 0.7018072009086609,
"alphanum_fraction": 0.7078313231468201,
"avg_line_length": 24.538461685180664,
"blob_id": "ad1e0861bd4d00acc348bf6888ae4c06c9938fd0",
"content_id": "fab01e9059d2f94abaa3ba7cb170de82d1a3d646",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 332,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 13,
"path": "/Basico/pratica/ex10.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# criar uma lista vazia\n# criar um loop while para pedir ao menos 5 nomes\n# cada nome digitado ser inserido na lista - append\n# Criar um loop for para exibir cada valor da lista\n\nestados = []\n#while\nwhile len(estados) < 5:\n estado = input(\"Digite o seu estado: \")\n estados.append(estado)\n\nfor item in estados:\n print(item)\n"
},
{
"alpha_fraction": 0.6200000047683716,
"alphanum_fraction": 0.6200000047683716,
"avg_line_length": 14,
"blob_id": "ebdac801bc852e08811d95036c015d75cb6597a6",
"content_id": "89d2b988f5d6cb865a19a074a42b5994f52e19d3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 155,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 10,
"path": "/aulas/aula10.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# funções - def\n\ndef conhecer_pessoa():\n nome = input(\"Qual é o seu nome: \")\n\n print(\"Muito Prazer \" + nome)\n\nprint(\"Chamar função\")\n\nconhecer_pessoa()\n"
},
{
"alpha_fraction": 0.6380952596664429,
"alphanum_fraction": 0.6380952596664429,
"avg_line_length": 14.800000190734863,
"blob_id": "da7a7b71e03991ed1be66ae0849f6163e6b6cba9",
"content_id": "6754101a4132da2da8d0c8e9bcaa902409dd34ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 317,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 20,
"path": "/aulas/aula12.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "#Manipulação de Arquivos com Python - Banco de dados\n#CRUD \n#C - Create - Criar\n#R - Read - Ler\n#U - Update - Atualizar\n#D - Delete - Deletar\n\n\"\"\"\narq = open(\"teste.txt\", \"w\") #abrir\n\narq.write(\"uma linha de codigo\") #escrevendo\n\narq.close() #fechar\n\n\"\"\"\nler = open(\"teste.txt\", \"r\")\n\nprint(ler.read())\n\nler.close()"
},
{
"alpha_fraction": 0.61623615026474,
"alphanum_fraction": 0.61623615026474,
"avg_line_length": 22.60869598388672,
"blob_id": "941d97516af5a732792961b7b60063113c0e1955",
"content_id": "e7035f1933ecf8bff92df75a28f5fd04072739d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 546,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 23,
"path": "/Basico/pratica/ex12.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# Fazer uma função que perguta para a pessoa:\n#qual a cor do carro?\n# se o carro for vermelho é a cro do amor\n# se o carro for amarelo é a cor da amizade\n# se o carro for verde cor do ciumes\n# else: seu carro tem uma cor bonita!\n\ndef cor_do_carro():\n cor = input(\"Digite a cor de um carro: \")\n\n if cor == \"vermelho\":\n print(\"Cor do amor\")\n\n elif cor == \"amarelo\":\n print(\"Cor da amizade\")\n\n elif cor == \"verde\":\n print(\"Cor da inveja\")\n\n else:\n print(\"Seu carro tem uma cor bonita!\")\n\ncor_do_carro()"
},
{
"alpha_fraction": 0.6190476417541504,
"alphanum_fraction": 0.6190476417541504,
"avg_line_length": 20,
"blob_id": "ed618ac937b5b14d9587e9eeeb317ae5e7fa1eda",
"content_id": "052c4cf11e476766f222c5714eddc90cd0002e93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 1,
"path": "/Basico/aula01.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "print(\"Olà, mundo!\")\n"
},
{
"alpha_fraction": 0.6779026389122009,
"alphanum_fraction": 0.6966292262077332,
"avg_line_length": 23.090909957885742,
"blob_id": "9263d109170d455f1c2077436589a0a1878f6212",
"content_id": "7c8d53be44dd91937e1571f0397cc4354c4a51a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 267,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 11,
"path": "/Basico/aula03.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# tipos de dados\n\nnome = type(\"Anderson\") #sting = \"texto\"\nidade = type(34) #inteiro = \"numero\"\ncasado = type(False) #booleano = \"verdadeiro ou falso\"\ntamanho = type(73.5) #float = \"numeros com casas decimais\"\n\nprint(nome)\nprint(idade)\nprint(casado)\nprint(tamanho)\n\n\n"
},
{
"alpha_fraction": 0.7452229261398315,
"alphanum_fraction": 0.7452229261398315,
"avg_line_length": 21.428571701049805,
"blob_id": "984a0e0d81e79b990f3d3486258ae8f0309e8784",
"content_id": "c13be95fdd6404fa8ac7567fb636717dcb0bcc0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 160,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 7,
"path": "/Basico/pratica/ex11.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# fazer um programa, usando funções\n# perguntando um numero \n# perguntando outro numero\n# exibir \"o resultado da soma é: \"\n# inserir um loop while true\n\ndef "
},
{
"alpha_fraction": 0.6630434989929199,
"alphanum_fraction": 0.6630434989929199,
"avg_line_length": 22.851852416992188,
"blob_id": "61cac9124c8f0f001622b28abed7dd0103dbc2b0",
"content_id": "6c71f03d7babaa8eed62428ba899d4e2b194d0f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 656,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 27,
"path": "/Basico/pratica/ex03.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# simulação de alimentação de dados\n\nnome = input(\"Digite o nome, \")\nidade = input(\"Digite a idade, \")\ncasado = input(\"Você é casado, \")\ntamanho = input(\"Digite o seu tamanho, \")\n\nprint(type(nome))\nprint(type(idade))\nprint(type(casado))\nprint(type(tamanho))\n##### Errei ######\n\n##### Explicação Correta #####\nnome = input(\"Digite o nome: \")\nidade = int(input(\"Digite a idade: \"))\ncasado = bool(input(\"Você é casado: \"))\ntamanho = float(input(\"Digite o seu tamanho: \"))\n\nprint(type(nome))\nprint(type(idade))\nprint(type(casado))\nprint(type(tamanho))\n\n##### Explicações Extras do Prof. Nicolas #####\n# print(test)\n# teste = False # case-sensitive\n"
},
{
"alpha_fraction": 0.5195071697235107,
"alphanum_fraction": 0.5605749487876892,
"avg_line_length": 18.280000686645508,
"blob_id": "b2c38ea3397901a42dc6a00b7259e9f66b50d4ee",
"content_id": "4b4944bc576bd0eaf7db3c10415c50304e7b0be0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 500,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 25,
"path": "/Basico/aula05.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "### Condições ###\n\n# se = if\n#else = \"se não\"\n#espaçamento = \"identação\"\n\n'''\nif (2 > 1): \n print(\"2 é maior que 1\")\nelse: \n print(\"não é maior do que 1\")\n'''\nnum1 = int(input(\"Digite um numero: \"))\nnum2 = int(input(\"Digite um numero: \"))\n\nif (num1 > num2):\n print(\"{} é maior que {}\".format(num1, num2))\nelif (num1 == num2):\n print(\"{} é igual que {}\".format(num1, num2))\nelse:\n print(\"{} não é maior que {}\".format(num1, num2))\n\n#tarefa\nBrasil = 5\nBelgica = 10\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.45787546038627625,
"alphanum_fraction": 0.5091575384140015,
"avg_line_length": 29.27777862548828,
"blob_id": "e81c287fa87ef7214ab9a1a75dae777819aa9607",
"content_id": "116a1a201126e91ee2606f97ad2755f3a39527bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 547,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 18,
"path": "/Basico/pratica/ex05.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# Tabuada\n\nnum = int(input(\"Qual tabudada você quer saber? \"))\nprint(\"-\"*15)\nprint(\"TABUADA\")\nprint(\"-\"*15)\n\nprint(\"{} X 1 = {}\".format(num, num * 1))\nprint(\"{} X 2 = {}\".format(num, num * 2))\nprint(\"{} X 3 = {}\".format(num, num * 3))\nprint(\"{} X 4 = {}\".format(num, num * 4))\nprint(\"{} X 5 = {}\".format(num, num * 5))\nprint(\"{} X 6 = {}\".format(num, num * 6))\nprint(\"{} X 7 = {}\".format(num, num * 7))\nprint(\"{} X 8 = {}\".format(num, num * 8))\nprint(\"{} X 9 = {}\".format(num, num * 9))\nprint(\"{} X 10 = {}\".format(num, num * 10))\nprint(\"-\"*15)\n\n"
},
{
"alpha_fraction": 0.5770925283432007,
"alphanum_fraction": 0.5881057381629944,
"avg_line_length": 10.868420600891113,
"blob_id": "9b5e915125da8eff6f17f94e5697948b7bd1fe82",
"content_id": "542580804aac34d0fdbfbca7446ee3f57205b230",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 465,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 38,
"path": "/Basico/aula04.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "##### operadores de comparação #####\n\n\"\"\" #comentarios_com_varias_linhas_ajudam a enteder e explicar pontos sem alterar o print\nigual: \"==\"\nmaior: \">\"\nmenor: \"<\"\nmaior ou igual: \">=\"\nmenor ou igual: \"<=\"\ndiferente: \"!=\"\n\nprint(3 >= 2)\n\"\"\"\n\n\n#concatenação\n\n\"\"\"\nnum = int(input(\"Digite um numero: \"))\n\n\nprint(\"o numero é: \"+str(num))\n\n\"\"\"\n\n# operadores aritiméticos\n\n\n\"\"\"\n\nsoma: \"+\"\nsubtração: \"-\"\nmultiplicação: \"*\"\ndivisão: \"/\"\n\n\"\"\"\n\n\nprint(16/8)\n\n\n\n"
},
{
"alpha_fraction": 0.6649484634399414,
"alphanum_fraction": 0.7061855792999268,
"avg_line_length": 23.125,
"blob_id": "f269cf2b2cb46fe66cc18690f18b536fcddad9e8",
"content_id": "934c13095cb9a40b9a82a6da7f5e8c9e1342aaa8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 194,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 8,
"path": "/aulas/aula11.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "\ndef chamada(nome_digitado1, nome_digitado2):\n print(nome_digitado1)\n print(nome_digitado2)\n\nnome1 = input(\"Digite seu nome: \")\nnome2 = input(\"Digite outro nome: \")\n\nchamada(nome1, nome2)\n"
},
{
"alpha_fraction": 0.6052631735801697,
"alphanum_fraction": 0.6052631735801697,
"avg_line_length": 26.625,
"blob_id": "5ed44f43dcc8abca86ff96d55153d1e900c9539c",
"content_id": "f8e28ad5a000328fd154eb5df93c913b680ca777",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 232,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 8,
"path": "/Basico/pratica/ex06.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "nome = input(\"Digite o nome: \")\n\nif(nome == \"Anderson\"):\n print(\"Ola Anderson, tudo bem?\")\nelif (nome == \"Nicolas\"):\n print(\"Bom dia Professor, tudo bem?\")\nelse:\n print(\"Saudações, é um prazer tê-lo aqui \"+nome)\n \n\n\n"
},
{
"alpha_fraction": 0.6098654866218567,
"alphanum_fraction": 0.6636771559715271,
"avg_line_length": 10.736842155456543,
"blob_id": "1e066df0457229b4f644894bbe8552d427c717dc",
"content_id": "190b83ae929a61c6778c4b49c7d920b5fe92a764",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 225,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 19,
"path": "/Basico/aula06.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# lista\n\nnum = [2, 4, 8, 5, 7]\n\n# acessando item: primeira posição = 0\nprint(num[1])\n\n# adicionando item\nnum.append(15)\n\n# remover item da lista\nprint(num)\nnum.remove(8)\nprint(num)\n\n# substituir item\nnum[1] = 8\n\nprint(num)\n"
},
{
"alpha_fraction": 0.6582278609275818,
"alphanum_fraction": 0.6582278609275818,
"avg_line_length": 14.800000190734863,
"blob_id": "ef65dabb0cc4c55ad39d8cee3c83fdb052a6a3fa",
"content_id": "ad916e8b8e4034834b61f919c63727e34404e628",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 79,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 5,
"path": "/Basico/aula02.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "#nome = \"Anderson\"\n#print(nome)\n\nnome = input(\"digite seu nome: \")\nprint(nome)\n"
},
{
"alpha_fraction": 0.7920792102813721,
"alphanum_fraction": 0.7920792102813721,
"avg_line_length": 39.599998474121094,
"blob_id": "b551cddb99e6eaa0a5693d1e364693bfee6412b4",
"content_id": "9413c59c382f8c08eff6fb6a85d615228f03a26c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 209,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 5,
"path": "/Basico/pratica/ex14.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# fazer uma funçao de somar\n#solicitar os dois numeros\n# criar uma variavel da função\n# executar a função num loop e usando tratamento de erro\n# exibir o resultado da soma com concatenação e tudo bonito"
},
{
"alpha_fraction": 0.6235741376876831,
"alphanum_fraction": 0.6235741376876831,
"avg_line_length": 16.46666717529297,
"blob_id": "735c0d4df88d3af9c83e6263507e9d4f2c3bf659",
"content_id": "f3a8abf4059b7f38ce3d0be2d8dc08eba6804742",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 266,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 15,
"path": "/Basico/aula07.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# dicionarios\n\n# Key Valor=value\ncapital = {\"São Paulo\" : \"SP\", \"Rio de Janeiro\" : \"RJ\"}\n\n# print(type(capital))\n\nprint(capital[\"São Paulo\"])\n\ncapital[\"Minas Gerais\"] = \"Belo Horizonte\"\n\nprint(capital)\n\ncapital.pop(\"São Paulo\")\nprint(capital)\n\n"
},
{
"alpha_fraction": 0.5751072764396667,
"alphanum_fraction": 0.6094420552253723,
"avg_line_length": 13.625,
"blob_id": "de85d52417842c6d84a1103a107307fd3a4bdee4",
"content_id": "2330d6af6edcfb552483c3984e546c0a5e7ee4d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 233,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 16,
"path": "/Testes/Teste2.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "lista_de_nomes = []\n\nprint(\"-\"*15)\nprint(\"Lista de Chamada\")\nprint(\"_\"*15)\n\nnum = 0\n\nwhile num < 5:\n num += 1\n nome = input(\"Digite o seu nome: \")\n lista_de_nomes.append(nome)\n\n\nprint(lista_de_nomes)\nprint(lista_de_nomes[3])"
},
{
"alpha_fraction": 0.7398945689201355,
"alphanum_fraction": 0.7398945689201355,
"avg_line_length": 26.047618865966797,
"blob_id": "d46ce1451b5d90dcbb04e51e4752f604615b8039",
"content_id": "4818fb84813291d0506b90ef744b6580eb20b484",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 570,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 21,
"path": "/Testes/Teste.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# Criar um dicionario vazio\n# Solicite estado e capital do estado do Usuario\n# adicione esses valores ao dicionario\n# Estado Key e a capital Value\n# Pegue o estado digitado e buscar no dicionario\n# Exibir a capital \n\n#Capital: capital\n\n# dicionario vazio\nadress = {} #ok\n\n# Solicitar Estado e Capital - OK\nestado = input(\"Digite o seu estado: \")\ncapital = input(\"Digite a capital: \")\n\n# adicione valores ao dicionario - Errei uma parte\nadress[estado] = capital\n\n# pegue o estado digitado e buscar no dicionario / Exibir capital\nprint(\"A capital é: \" + adress[estado])\n\n"
},
{
"alpha_fraction": 0.5444839596748352,
"alphanum_fraction": 0.5516014099121094,
"avg_line_length": 14.61111068725586,
"blob_id": "40ad066954ce693fa1a27f8cba47720048b4546c",
"content_id": "98f6b05510847b6a4a40dc2581f1111a0e232cf4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 281,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 18,
"path": "/Basico/aula10.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# tratamentos de erros\n\ndef somar_cinco():\n num = int(input(\"Digite um numero: \"))\n print(\"{} + 5 = {}\".format(num, num + 5))\n\n\nwhile True:\n try:\n somar_cinco()\n\n except:\n print(\"Digite apenas numeros\")\n\n finally:\n break\n\nprint(\"Chegou no fim\")\n"
},
{
"alpha_fraction": 0.6057347655296326,
"alphanum_fraction": 0.6379928588867188,
"avg_line_length": 16.808509826660156,
"blob_id": "2b17dd4018eff956b587bee57743e3fa2115f0d4",
"content_id": "7b52df89ea6fa4f9916003c4f77df19435674d88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 848,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 47,
"path": "/Testes/extra.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "\"\"\"\n\nnum1 = int(input(\"Número um: \"))\nnum2 = int(input(\"Número dois: \"))\nnum3 = int(input(\"Número três: \"))\n\nif num1 > num2 and num3:\n print (\"Número um é o maior numero!\")\nelif num2 > num1 and num3:\n print (\"Número dois é o maior numero\")\nelif num3 > num1 and num2:\n print (\"Número três é o maior numero\")\n\nmaior = 5\nmenor = 10\n\nprint ('Maior: %d ' %maior)\nprint ('Menor: %d ' %menor)\n\n\"\"\"\nfrom tkinter import *\n\nroot = Tk()\nroot.geometry(\"500x400\") \na = Label(root, text =\"Digite o seu email\")\nb = Button(root, text=\"ENTRAR\")\ne = Entry(root)\na.pack()\nb.pack()\ne.pack()\n\nroot.mainloop()\n\n# Python tkinter hello world program\n \nfrom tkinter import *\n\nroot = Tk()\nroot.geometry(\"500x400\") \na = Label(root, text =\"Digite o seu email\")\nb = Button(root, text=\"ENTRAR\")\ne = Entry(root)\na.pack()\ne.pack()\nb.pack()\n\nroot.mainloop()\n"
},
{
"alpha_fraction": 0.6186046600341797,
"alphanum_fraction": 0.6558139324188232,
"avg_line_length": 25.75,
"blob_id": "e1f16ba65667fa64ecdb2c7c570c5a978884da42",
"content_id": "e7a4d60805e50168c054dd00883f4620e5a87820",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 216,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 8,
"path": "/Basico/pratica/ex013.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "\ndef somar(numero1, numero2):\n soma = numero1 + numero2\n print(\"O resultado é: \" + str(soma))\n\nnum1 = int(input(\"Digite o primeiro numero: \"))\nnum2 = int(input(\"Digite o segundo numero: \"))\n\nsomar(num1, num2)\n"
},
{
"alpha_fraction": 0.541218638420105,
"alphanum_fraction": 0.5722819566726685,
"avg_line_length": 21.97222137451172,
"blob_id": "2e0ae328ecce3ef99b8cf554ad8ede0fd750c8ca",
"content_id": "70dce07f5deb770d961435db76a18b89dcaedf4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 853,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 36,
"path": "/Projetos/calculadora.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "def mais(num1, num2):\n result = num1 + num2\n print(\"O resultado é: \" +str(result))\n\ndef menos(num1, num2):\n result = num1 - num2\n print(\"O resultado é: \" +str(result))\n\ndef vezes(num1, num2):\n result = num1 * num2\n print(\"O resultado é: \" +str(result))\n\ndef dividir(num1, num2):\n result = num1 / num2\n print(\"O resultado é: \" +str(result))\n\nwhile True:\n num1 = int(input(\"Digite o primeiro numero: \"))\n num2 = int(input(\"Digite o segundo numero: \"))\n\n conta = input(\"Qual operação você deseja fazer? \")\n\n if conta == \"adição\":\n mais(num1 , num2)\n \n elif conta == \"subtração\":\n menos(num1 , num2)\n \n elif conta == \"multiplicação\":\n vezes(num1 , num2)\n \n elif conta == \"divisão\":\n dividir(num1 , num2)\n\n else:\n print(\"Operação invalida\")\n\n \n"
},
{
"alpha_fraction": 0.48507463932037354,
"alphanum_fraction": 0.5149253606796265,
"avg_line_length": 9.307692527770996,
"blob_id": "4c98876c6f662e025f44c66d79473c9397eb5fcc",
"content_id": "e781f79be84ef30b629d17c9709e8dc0f5eb3bba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 134,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 13,
"path": "/Basico/aula08.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# loops\n# loop: for\n# loop: while\n\n# while = \"enquanto\"\n\nnum = 0\n\nwhile (num < 10):\n num += 0\n print(num)\n \nprint(\"Saiu...\")\n"
},
{
"alpha_fraction": 0.5675895810127258,
"alphanum_fraction": 0.5749185681343079,
"avg_line_length": 65.37837982177734,
"blob_id": "2c9be56359419fefdfb890276ebc59aaa8f17c6d",
"content_id": "7b0f00fcd0ddbdf52fcc4fc5a4ae714b4309dc3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2460,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 37,
"path": "/Testes/anaconda.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# INICIO DESCARGA FERTILIZANTES ETC\n barge_index_fertETC = -1\n if (~manutencao_carregamento_barcFertETC and\n collections.Counter(lista_abertura_posicao_barcacas[1])[\n 'Cheia ETC Fert.'] > 0 and barcacas_descarga_fertETC == 1):\n barcacas_descarga_fertETC = 1\n barge_index_fertETC = lista_abertura_posicao_barcacas[1].index('Cheia ETC Fert.')\n lista_abertura_posicao_barcacas[1][barge_index_fertETC] = 'Em Descarga ETC Fert.'\n # print('INÍCIO DE DESCARGA BARCAÇA FERTILIZANTE' , dt_simulacao )\n\n if (~manutencao_carregamento_barcFertETC and\n collections.Counter(lista_abertura_posicao_barcacas[1])[\n 'Em Descarga ETC Fert.'] > 0 and barcacas_descarga_fertETC):\n for i in lista_abertura_posicao_barcacas_aux[2:]:\n if (lista_abertura_posicao_barcacas[lista_abertura_posicao_barcacas_aux.index(i)][\n barge_index_fertETC] > 0):\n lista_descarga_fertilizantesETC[0].append(dt_simulacao)\n lista_descarga_fertilizantesETC[1].append(min(taxa_descarga_fertETC,\n lista_abertura_posicao_barcacas[\n lista_abertura_posicao_barcacas_aux.index(i)][\n barge_index_fertETC]))\n lista_descarga_fertilizantesETC[2].append(\n lista_abertura_posicao_barcacas_aux[lista_abertura_posicao_barcacas_aux.index(i)])\n lista_descarga_fertilizantesETC[3].append(nome_cenario)\n\n lista_abertura_posicao_barcacas[lista_abertura_posicao_barcacas_aux.index(i)][\n barge_index_fertETC] -= min(taxa_descarga_fertETC, lista_abertura_posicao_barcacas[\n lista_abertura_posicao_barcacas_aux.index(i)][barge_index_fertETC])\n\n break\n\n if verificarCargaBarcacaRetorno(barge_index_fertETC) == 0:\n barcacas_descarga_fertETC = 0\n print('Indice da Barcaça Fim Carregamento:'barge_index_fertETC')\n # print('DESCARGA BARCAÇA FERTILIZANTE REALIZADA' , dt_simulacao )\n lista_abertura_posicao_barcacas[1][barge_index_fertETC] = 'Vazia ETC'\n # FIM DESCARGA FERTILIZANTES ETC\n"
},
{
"alpha_fraction": 0.6583850979804993,
"alphanum_fraction": 0.6583850979804993,
"avg_line_length": 24.421052932739258,
"blob_id": "53ab15bfbf437e65420fca6d54a3f2f271d43e2e",
"content_id": "dc71c64182734643bbb309a37122d63be6f1bcb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 485,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 19,
"path": "/Basico/pratica/ex07p1.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "nomes_conhecidos = [\"Anderson\", \"Nicolas\"]\n\nnome = input(\"Digite seu nome: \")\n\nif (nome in nomes_conhecidos): \n print(\"Ola bem vindo de volta {}\".format(nome))\n\nelse:\n print(\"Ola {} é um prazer te conhecer\".format(nome))\n nomes_conhecidos.append(nome)\n\nnome = input(\"Digite seu nome: \")\n\nif (nome in nomes_conhecidos): \n print(\"Ola bem vindo de volta {}\".format(nome))\n\nelse:\n print(\"Ola {} é um prazer te conhecer\".format(nome))\n nomes_conhecidos.append(nome)\n"
},
{
"alpha_fraction": 0.679425835609436,
"alphanum_fraction": 0.7081339955329895,
"avg_line_length": 15,
"blob_id": "438f66db8cb30d395b2ed4115fe29d6349e416b4",
"content_id": "c9c5c4373b6e6d50ab079ec535b164edc113ddd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 209,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 13,
"path": "/Basico/aula06p2.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# revisando listas\n\nemails = []\n\nemail1 = input(\"Digite seu E-mail: \")\nemail2 = input(\"Digite seu E-mail: \")\n\nemails.append(email1)\nemails.append(email2)\n\nprint(emails[0])\nemails.remove(email2)\nprint(emails)\n\n"
},
{
"alpha_fraction": 0.5839753746986389,
"alphanum_fraction": 0.597842812538147,
"avg_line_length": 22.178571701049805,
"blob_id": "63e64258c1fe123cef1e16d58bf0f4009dcfdaca",
"content_id": "7eaad08594118d04bf460cf4f67f258717fb8cee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 654,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 28,
"path": "/aulas/aula13.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# Bibliotecas no Python\n\nfrom random import randint\n\nnumero_aleatorio = randint(1, 10)\n\ncontador = 0\n\nwhile True:\n contador += 1\n\n if contador < 4:\n tentativa = int(input(\"Digite um numero de 1 a 10: \"))\n\n if tentativa == numero_aleatorio:\n print(\"Parabéns, você acentou | tentativa: \"+str(contador))\n exit()\n\n elif numero_aleatorio > tentativa:\n print(\"O numero é maior! Tente novamente!\")\n \n elif numero_aleatorio < tentativa:\n print(\"O numero é menor! Tente novamente!\")\n \n else:\n break\n\nprint(\"Você perdeu | o numero era: \"+str(numero_aleatorio))\n"
},
{
"alpha_fraction": 0.5736433863639832,
"alphanum_fraction": 0.5968992114067078,
"avg_line_length": 20.25,
"blob_id": "0096a1645323ea76e03a1ad1ab9901e355d8dce9",
"content_id": "53718e93efe53d834c6a083d03cd04f03f6247ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 261,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 12,
"path": "/Testes/Teste4.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# teste calculadora\n\ndef somar():\n num1 = int(input(\"Digite um numero: \"))\n num2 = int(input(\"digite um numero: \"))\n\n soma = num1 + num2 \n\n print(\"O resulatdados da soma de {} com {} é: {}\".format(num1, num2, soma))\n\n# chamar a função\nsomar() "
},
{
"alpha_fraction": 0.7338129281997681,
"alphanum_fraction": 0.7338129281997681,
"avg_line_length": 25.769229888916016,
"blob_id": "e3056f6b83754263cc5a0440776c68236ab0332e",
"content_id": "d945fdf49160ee18e179744e678313df57a7f6e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 706,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 26,
"path": "/Basico/pratica/ex09.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# Criar um dicionario vazio\n# capturar email e senha\n# Adicione os dados no dicionario\n# Pedir para o usuario digitar tudo \n# Criar uma condição que vai verificar se os valores estão corretos\n\n# dicionario de usuarios \nusers = {}\n\n# informações de cadastro\nemail = input(\"Digite seu e-mail: \")\nsenha = input(\"Digite sua senha: \")\n\n# salvando as informações de cadastro no dicionario\nusers[email] = senha\n\n# capturando as informações de login\nemail_login = input(\"Digite seu e-mail: \")\nsenha_login = input(\"Digite sua senha: \")\n\n# Verificar se o usario existe\nif email_login in users and senha_login == users[email_login]:\n print(\"Login ok!\")\n\nelse:\n print(\"Informações de login invalidas\")"
},
{
"alpha_fraction": 0.603210985660553,
"alphanum_fraction": 0.6238532066345215,
"avg_line_length": 14.571428298950195,
"blob_id": "45a030243b81c92c3f5a86d8f0e34986f6fbdc5b",
"content_id": "cff1cbd96893251e8834eadb4c228115de7d8ead",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 440,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 28,
"path": "/Basico/aula09.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# Loop For - percorrer coisas\n\nnomes = [\"Paulo\", \"Maria\", \"João\", \"Fernando\"]\n\n# 1°valor = item 2° valore Objeto completo\nfor item in nomes:\n print(item)\n\n\"\"\" jeito trabalhoso\nprint(nomes[0])\nprint(nomes[1])\nprint(nomes[2])\nprint(nomes[3])\n\"\"\"\n\n\n\"\"\"\n# Pelo while\n\nnomes = [\"Paulo\", \"Maria\", \"João\", \"Fernando\"]\n\ncontador = 0\n\nwhile contador < len(nomes):\n print(nomes[contador])\n\n contador += 1 # contador = contador + 1\n\"\"\"\n"
},
{
"alpha_fraction": 0.663847804069519,
"alphanum_fraction": 0.6955602765083313,
"avg_line_length": 20.545454025268555,
"blob_id": "417afa7c00396bbd08f4045fb83edebbd3d7acc1",
"content_id": "d6b0577c6b63622b81464167e457e0d19b34fdce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 473,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 22,
"path": "/Basico/pratica/ex07p2.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "lista_de_nomes = []\n\nprint(\"-\"*15)\nprint(\"Lista de Chamada\")\nprint(\"_\"*15)\n\nnome1 = input(\"Digite o seu nome: \")\nnome2 = input(\"Digite o seu nome: \")\nnome3 = input(\"Digite o seu nome: \")\nnome4 = input(\"Digite o seu nome: \")\nnome5 = input(\"Digite o seu nome: \")\n\n# adicionar itens\nlista_de_nomes.append(nome1)\nlista_de_nomes.append(nome2)\nlista_de_nomes.append(nome3)\nlista_de_nomes.append(nome4)\nlista_de_nomes.append(nome5)\n\nprint(lista_de_nomes)\n\nprint(lista_de_nomes[3])"
},
{
"alpha_fraction": 0.6978417038917542,
"alphanum_fraction": 0.7002398371696472,
"avg_line_length": 17.954545974731445,
"blob_id": "2f4caee1a2376958a0500f6722ffaa01416a4ff7",
"content_id": "a9474112020e718666093008c3c4949bf2d80088",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 420,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 22,
"path": "/Basico/pratica/ex02.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# nomes de variaveis proibidas\n# 1teste = \"valor\"\n#@teste = \"valor\"\n#t%este = \"valor\"\n#profissão= \"valor\"\n\n\nnome= input(\"Digite seu nome: \")\nidade= input(\"Digite sua idade: \")\nprofissao= input(\"Digite sua profissão: \")\n\nprint(nome)\nprint(idade)\nprint(profissao)\n\nnome= input(\"Digite seu nome: \")\nidade= input(\"Digite sua idade: \")\nprofissao= input(\"Digite sua profissão: \")\n\nprint(nome)\nprint(idade)\nprint(profissao)\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 23,
"blob_id": "dae5955f304a7bb2f9740b8db14bdf29e0a19c4d",
"content_id": "2ebb1d4d8da07e75b4d51b76679b0b770883d46e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 54,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 2,
"path": "/Basico/pratica/ex01.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "print(\"olà, mundo!\")\nprint(\"instalação ubuntu\")\n\n\n\n"
},
{
"alpha_fraction": 0.6102941036224365,
"alphanum_fraction": 0.6397058963775635,
"avg_line_length": 21.66666603088379,
"blob_id": "6252b6dbe47d7dc52b21e0a5785c1cdd72d20ac9",
"content_id": "9055d02215da2e32b4bafe77cd8e631ad5d402cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 137,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 6,
"path": "/Basico/pratica/ex04.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "num1 = int(input(\"digite um numero: \"))\nnum2 = int(input(\"digite um numero: \"))\n\nsoma = num1 + num2\n\nprint(\"O resultado é: \"+str(soma))\n"
},
{
"alpha_fraction": 0.7253788113594055,
"alphanum_fraction": 0.7291666865348816,
"avg_line_length": 28.38888931274414,
"blob_id": "d997752a63c520f221afa1458638020de0f760fe",
"content_id": "4d4e0948767671923ed206b7c9642c0db2b8d8dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 528,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 18,
"path": "/Basico/pratica/ex08.py",
"repo_name": "andersonmoura87/aulas_python",
"src_encoding": "UTF-8",
"text": "# criar lista com 5 cores - x\n# exibir a lista completa - exibir lista completa x\n# exibir um item da lista x\n# remover um item da lista - exibir lista completa x\n# adicionar uma cor nova - exibir lista completa x\n\nLista_de_cores = [\"Amarelo\", \"Verde\", \"Azul\", \"Marrom\", \"Vermelho\"]\n\nprint(Lista_de_cores)\n\nprint(Lista_de_cores[3]) #exibir a cor marrom\n\nLista_de_cores.remove(\"Azul\") #remover a cor azul\nprint(Lista_de_cores) #exibir lista completa\n\nLista_de_cores.append(\"Branco\") #adicionar uma nova cor\n\nprint(Lista_de_cores)"
}
] | 37 |
RexTitanium/Python_Program | https://github.com/RexTitanium/Python_Program | 53fbb44dcaed2aaeffbbc7e6b55474bce452696d | 3bc8c28980f7f616ea1e32be14f103d651240696 | f49fc52a9a59b489c38da912af7a4f89275fb787 | refs/heads/master | 2023-08-17T06:49:05.188746 | 2021-09-30T15:21:30 | 2021-09-30T15:21:30 | 412,117,417 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5851648449897766,
"alphanum_fraction": 0.5879120826721191,
"avg_line_length": 26.200000762939453,
"blob_id": "2d76bddc78a04644c0e18f483f2bf849d93a7282",
"content_id": "2231baeed585a7bdec7e8c8cece4a98e188982b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1092,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 40,
"path": "/bollywood.py",
"repo_name": "RexTitanium/Python_Program",
"src_encoding": "UTF-8",
"text": "import random;\n\ndef bollywood(count,bolly):\n for i in range(count): \n print (bolly[i],end=\" \")\n\n\nmovieList =[\"The Martian\",\"Interstellar\",\"Slumdog Millionaire\",\"Hot Tub Time Machine\",\"Frozen\"]\nmovieLetters = list(random.choice(movieList).lower())\nguessLetters=[]\nbolly = list(\"BOLLYWOOD\")\ncount = 9\nfor i in movieLetters :\n if i != \" \":\n guessLetters.append(\"_\")\n else :\n guessLetters.append(\" \")\n\n\nwhile(True) :\n bollywood(count,bolly)\n if count == 0:\n print (\"Looks like you could not guess the Movie and your lives are over too\")\n break\n print(\"\\n\\n\")\n for i in guessLetters :\n print (i.upper(), end=\" \")\n print(\"\\n\\n\")\n if(guessLetters == movieLetters):\n print (\"Successfully Guessed\")\n break\n letter=input(\"Guess the letter:\")\n if letter == \"quit\":\n break\n for i in range(len(movieLetters)) :\n if letter not in movieLetters:\n count= count - 1\n break\n if letter == movieLetters[i] and letter != \" \":\n guessLetters[i] = movieLetters[i]\n "
}
] | 1 |
OpiumSmoke/django-tutorial-todolist | https://github.com/OpiumSmoke/django-tutorial-todolist | 5314c2dfa8e6fd4303c063f7184ac5be96e08bfb | 722d75803fdac3e360fe3fc9e76f501fe441f4fc | 070fcc589f91d5868125ac55cae3c64cd65844a9 | refs/heads/master | 2020-04-29T03:53:39.293779 | 2018-02-11T14:23:47 | 2018-02-12T11:57:56 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7247244119644165,
"alphanum_fraction": 0.7360630035400391,
"avg_line_length": 43.08333206176758,
"blob_id": "4eba04f71e942de69b8a5e693405a80a1780a1bf",
"content_id": "0a338878bdb63e4a56e2ea2828a48d74c431a0fa",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3175,
"license_type": "permissive",
"max_line_length": 265,
"num_lines": 72,
"path": "/README.md",
"repo_name": "OpiumSmoke/django-tutorial-todolist",
"src_encoding": "UTF-8",
"text": "### :coffee: Motivation\n\nThis project is a simple tutorial web application managing personal todo list.\n\nIt's written on top of [Django framework](https://docs.djangoproject.com/) and it's main purpose is educating myself and other people who are not familiar with many things in Django. Notice that this project is far from an web application with some serious quality.\n\n### :coffee: A Starting Point\n\nThis project is based on [this excellent tutorial video](https://www.youtube.com/watch?v=2yXfUPwlZTw&t=218s). (You can find the tutorial project associated with the video [here](https://github.com/bradtraversy/django-todolist))\n\n### :coffee: Demonstrated Features\n\nThe original todo list application from the video was concise, but too simple, so I started to add a few extras to educate myself as follows:\n\n- Support for Django 2.0\n- Complete CRUD functions for models\n- Adding another model called Category to demonstrate many-to-many relations among models\n- Basic styling and frontend scripting\n- Adding extra shell scripts to help minor issues\n - Making Django shell a little more convenient\n - Quickly adding test data to the DB \n\n### :coffee: How To Run It By Yourself\n\n1. Install or set up prerequisites (Mentioning installation steps for these prerequisites is out of scope of this readme) \n - Python 3 **(Notice that Python 2.7 won't work for Django 2.0)**\n - Django - [Refer to the official installation guide](https://docs.djangoproject.com/en/2.0/intro/install/)\n - Database backend software such as Sqlite3, or MySQL, etc.\n - This project uses Sqlite3, but also has been tested with MySQL.\n1. Clone the project to your local system \n - `git clone https://github.com/suewonjp/django-tutorial-todolist.git` \n - `cd django-tutorial-todolist` \n1. Run the DB migrations \n - `./manage.py migrate`\n1. Add test data into the DB \n - python [tools/populate-test-models.py](tools/populate-test-models.py)\n1. Confirm it works \n - `./manage.py runserver`\n - Access the application in your browser (The url is `localhost:8000` by default)\n\n\n> **TIP**\n>\n> You can invoke Django shell by executing [tools/shell.sh](tools/shell.sh). The shell script will import model classess and others for you. \n> It's much more convenient rather than using `./manage.py shell`\n\n### :coffee: Todo\n\n- Adding unit test\n- Demonstration of how to use Django's Form class when dealing with HTML forms\n\n### :copyright: COPYRIGHT/LICENSE/DISCLAIMER\n\n Copyright (c) 2018 Suewon Bahng, [email protected]\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n \n http://www.apache.org/licenses/LICENSE-2.0\n \n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n### :busts_in_silhouette: CONTRIBUTORS\nSuewon Bahng \n\n* * *\nUpdated by Suewon Bahng ( Jan 2018 )\n\n"
},
{
"alpha_fraction": 0.6745098233222961,
"alphanum_fraction": 0.6745098233222961,
"avg_line_length": 41.44444274902344,
"blob_id": "2bc0f8525f69bb74d54434d9c456d0280dd2d1ce",
"content_id": "c12b75760788f124b71be20b48da61e6a3bc8081",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 765,
"license_type": "permissive",
"max_line_length": 95,
"num_lines": 18,
"path": "/todos/urls.py",
"repo_name": "OpiumSmoke/django-tutorial-todolist",
"src_encoding": "UTF-8",
"text": "from django.urls import path\n\nfrom . import views\n\napp_name = 'todos'\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('details/<int:id>', views.details, name='detail'),\n path('add', views.add, name='add'),\n path('update/<int:id>', views.update, name='update'),\n path('delete/<int:id>', views.delete, name='delete'),\n path('category', views.category_index, name='category/index'),\n path('category/add', views.category_add, name='category/add'),\n path('category/details/<int:category_id>', views.category_detail, name='category/details'),\n path('category/update/<int:category_id>', views.category_update, name='category/update'),\n path('category/delete/<int:category_id>', views.category_delete, name='category/delete'),\n]\n\n"
},
{
"alpha_fraction": 0.7165775299072266,
"alphanum_fraction": 0.7201426029205322,
"avg_line_length": 31.941177368164062,
"blob_id": "a2e599d4c28c60434e386d4983aedef84cb03207",
"content_id": "0818ef1f89d94f4323f5398c8e786cd00654902a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 561,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 17,
"path": "/tools/.pythonrc.py",
"repo_name": "OpiumSmoke/django-tutorial-todolist",
"src_encoding": "UTF-8",
"text": "import os, sys, importlib\nfrom django.test.utils import setup_test_environment\n\nsetup_test_environment()\n\nprj = importlib.__import__(os.environ['DJANGO_SETTINGS_MODULE'].split('.')[0], fromlist=('settings',))\nsettings = prj.settings\n\napp_names = [ app.split('.')[0] for app in settings.INSTALLED_APPS if not app.startswith('django') ]\napps = {}\nfor app in app_names:\n apps[app] = importlib.__import__(app)\n\n### The code so far is general and reusable.\n### Add project specific code from here.\nfrom todos.models import Todo\nfrom todos.models import Category\n\n"
},
{
"alpha_fraction": 0.616258442401886,
"alphanum_fraction": 0.6219382286071777,
"avg_line_length": 35.10256576538086,
"blob_id": "a0dec77f04b0037a06cc22fd8f0c411b802077c3",
"content_id": "412889aecbb849a6518234cae192b04a435e57d0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2817,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 78,
"path": "/tools/populate-test-models.py",
"repo_name": "OpiumSmoke/django-tutorial-todolist",
"src_encoding": "UTF-8",
"text": "import os, sys, django\n\ndef add_test_todos():\n print('Populating Todo objects...')\n\n if not Todo.objects.filter(title='My 1st Todo'):\n todo = Todo(title='My 1st Todo', text='Reading...')\n todo.save()\n if not Todo.objects.filter(title='My 2nd Todo'):\n todo = Todo(title='My 2nd Todo', text='Playing with my dog...')\n todo.save()\n if not Todo.objects.filter(title='My 3rd Todo'):\n todo = Todo(title='My 3rd Todo', text='Eating...')\n todo.save()\n if not Todo.objects.filter(title='My 4th Todo'):\n todo = Todo(title='My 4th Todo', text='Sleeping...')\n todo.save()\n if not Todo.objects.filter(title='My 5th Todo'):\n todo = Todo(title='My 5th Todo', text='Debugging...')\n todo.save()\n\ndef add_test_categories():\n print('Populating Category objects...')\n\n if not Category.objects.filter(name='work'):\n category = Category(name='work')\n category.save()\n if not Category.objects.filter(name='life'):\n category = Category(name='life')\n category.save()\n if not Category.objects.filter(name='hobby'):\n category = Category(name='hobby')\n category.save()\n if not Category.objects.filter(name='study'):\n category = Category(name='study')\n category.save()\n\ndef add_relations():\n print('Populating relations between objects...')\n\n category = Category.objects.filter(name='work').get()\n if not category.todos.all():\n category.todos.add(Todo.objects.filter(title='My 5th Todo').get())\n\n category = Category.objects.filter(name='life').get()\n if not category.todos.all():\n category.todos.add(Todo.objects.filter(title='My 3rd Todo').get())\n category.todos.add(Todo.objects.filter(title='My 4th Todo').get())\n\n category = Category.objects.filter(name='hobby').get()\n if not category.todos.all():\n category.todos.add(Todo.objects.filter(title='My 1st Todo').get())\n category.todos.add(Todo.objects.filter(title='My 2nd Todo').get())\n\n category = Category.objects.filter(name='study').get()\n if not category.todos.all():\n category.todos.add(Todo.objects.filter(title='My 1st Todo').get())\n\nif __name__ == \"__main__\":\n sys.path.append(os.path.abspath(os.path.curdir))\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"todolist.settings\")\n django.setup()\n\n from todos.models import Todo\n from todos.models import Category\n\n if Todo.objects.all():\n print('You have data in the db.')\n print('Executing this script may mess with the existing data.')\n print('Still want to proceed? (y/n)')\n y = input().lower()\n if y != 'y' and y != 'yes':\n print('Aborting...')\n sys.exit()\n\n add_test_todos()\n add_test_categories()\n add_relations()\n\n"
},
{
"alpha_fraction": 0.6024590134620667,
"alphanum_fraction": 0.6237704753875732,
"avg_line_length": 32.86111068725586,
"blob_id": "b904bb2a8e31c9a3be36976261bdb14ebd4acfdf",
"content_id": "a6ce4658a0cc3df9f683f02f21c045caea892384",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1220,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 36,
"path": "/tools/mycli.sh",
"repo_name": "OpiumSmoke/django-tutorial-todolist",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\n### Reusable shell script to access MySQL database.\n### It collects information required to access the DB from settings.py in your project.\n### Additional dependencies other than Python and Django:\n### - GNU Awk (gawk)\n### - mycli(MySQL client written by Python - https://www.mycli.net/) required\n\nhostScript=${0##*/}\n\n[ -x manage.py ] || {\n printf \"[ %s ] \\e[1;31m ERROR \\e[0m Can't find manage.py\\n\" \"${hostScript}\"\n exit 1\n}\n\nprjname=$( gawk '/DJANGO_SETTINGS_MODULE/ { match($0, /\"([[:alnum:]_]*)\\.settings/, c); print c[1]; }' manage.py )\n\ndb=$( python -c \"from $prjname import settings; print(settings.DATABASES['default']['NAME'])\" )\n[ $? = 0 ] || {\n printf \"[ %s ] \\e[1;31m ERROR \\e[0m Can't acquire db name\\n\" \"${hostScript}\"\n exit 1\n}\n\nuser=$( python -c \"from $prjname import settings; print(settings.DATABASES['default']['USER'])\" )\n[ $? = 0 ] || {\n printf \"[ %s ] \\e[1;31m ERROR \\e[0m Can't acquire user name\\n\" \"${hostScript}\"\n exit 1\n}\n\npw=$( python -c \"from $prjname import settings; print(settings.DATABASES['default']['PASSWORD'])\" )\n[ $? = 0 ] || {\n printf \"[ %s ] \\e[1;31m ERROR \\e[0m Can't acquire password\\n\" \"${hostScript}\"\n exit 1\n}\n\nmycli -h localhost -u$user -p$pw \"$db\"\n\n"
},
{
"alpha_fraction": 0.6521739363670349,
"alphanum_fraction": 0.6643478274345398,
"avg_line_length": 26.33333396911621,
"blob_id": "ddeb92f6073d68c49e7a99f146f09ca5f93a3b58",
"content_id": "6523eb13204bf00ff7333801a6f6dd0779d4edcf",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 575,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 21,
"path": "/tools/shell.sh",
"repo_name": "OpiumSmoke/django-tutorial-todolist",
"src_encoding": "UTF-8",
"text": "#!/bin/sh\n\n### Shell script to make Django Python shell a little more convenient.\n### Inside the shell, you can access the following objects by default without importing them.\n### - settings\n### - apps dictionary ( Python dictionary for your own apps )\n### - models (Todo, Category) \n### See .pythonrc.py for details\n\nhostScript=${0##*/}\n\n[ -x manage.py ] || {\n printf \"[ %s ] \\e[1;31m ERROR \\e[0m Can't find manage.py\\n\" \"${hostScript}\"\n exit 1\n}\n\nscriptDir=${0%/*}\n\ncd \"$scriptDir/..\" || exit\n\nPYTHONSTARTUP=\"$scriptDir/.pythonrc.py\" python manage.py shell -i python\n\n"
},
{
"alpha_fraction": 0.6043148636817932,
"alphanum_fraction": 0.6097676753997803,
"avg_line_length": 32.736000061035156,
"blob_id": "e09b22cb9a1c8b4362928390a8a2b37e0a2de927",
"content_id": "15ee00d95be41db6bba923f56b7b5f1076b60e82",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4218,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 125,
"path": "/todos/views.py",
"repo_name": "OpiumSmoke/django-tutorial-todolist",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.urls import reverse\nfrom django.contrib import messages\n# from django.http import HttpResponse\nfrom .models import Todo, Category\n\ndef index(request):\n todos = Todo.objects.all()[:10]\n context = {\n 'todos':todos\n }\n return render(request, 'index.html', context)\n\ndef details(request, id):\n todo = get_object_or_404(Todo, pk=id)\n context = {\n 'todo':todo,\n 'categories':todo.category_set.all()\n }\n return render(request, 'details.html', context)\n\ndef add(request):\n if (request.method == 'POST'):\n title = request.POST['title']\n text = request.POST['text']\n todo = Todo(title=title, text=text)\n todo.save()\n categories = request.POST.getlist('category-select')\n if categories:\n for id in categories:\n category = Category.objects.get(id=id)\n todo.category_set.add(category)\n messages.success(request, 'Todo Added : %s' % todo.title)\n return redirect(reverse('todos:index'))\n else:\n categories = Category.objects.all()\n context = {\n 'categories':categories\n }\n return render(request, 'add.html', context)\n\ndef update(request, id):\n todo = get_object_or_404(Todo, pk=id)\n if (request.method == 'POST'):\n todo.title = request.POST['title']\n todo.text = request.POST['text']\n categories = request.POST.getlist('category-select')\n todo.category_set.clear()\n if categories:\n for cid in categories:\n category = Category.objects.get(id=cid)\n todo.category_set.add(category)\n todo.save()\n messages.success(request, 'Todo Updated : %s' % todo.title)\n return redirect(reverse('todos:detail', args=[ id ]))\n else:\n attached_categories = todo.category_set.all()\n categories = [(True,c) if attached_categories.filter(id=c.id) else (False, c) for c in Category.objects.all() ]\n\n context = {\n 'todo':todo,\n 'categories':categories\n }\n return render(request, 'edit.html', context)\n\ndef delete(request, id):\n if (request.method == 'POST'):\n todo = get_object_or_404(Todo, pk=id)\n messages.success(request, 'Todo Deleted : %s' % todo.title)\n todo.delete()\n\n return redirect(reverse('todos:index'))\n\ndef category_index(request):\n categories = Category.objects.all()\n context = {\n 'categories':categories\n }\n return render(request, 'category/index.html', context)\n\ndef category_add(request):\n if (request.method == 'POST'):\n name = request.POST['name']\n category = Category(name=name)\n category.save()\n messages.success(request, 'Category Added : %s' % category.name)\n return redirect(reverse('todos:index'))\n else:\n categories = Category.objects.all()\n context = {\n 'categories':categories\n }\n return render(request, 'category/add.html', context)\n\ndef category_detail(request, category_id):\n category = get_object_or_404(Category, pk=category_id)\n context = {\n 'category':category,\n 'todos':category.todos.all()\n }\n return render(request, 'category/details.html', context)\n\ndef category_update(request, category_id):\n category = get_object_or_404(Category, pk=category_id)\n if (request.method == 'POST'):\n category.name = request.POST['name']\n category.save()\n messages.success(request, 'Category Updated : %s' % category.name)\n return redirect(reverse('todos:category/details', args=[ category_id ]))\n else:\n categories = Category.objects.all()\n context = {\n 'categories':categories,\n 'category':category\n }\n return render(request, 'category/edit.html', context)\n\n\ndef category_delete(request, category_id):\n if (request.method == 'POST'):\n category = get_object_or_404(Category, pk=category_id)\n messages.success(request, 'Category Deleted : %s' % category.name)\n category.delete()\n\n return redirect(reverse('todos:index'))\n\n"
},
{
"alpha_fraction": 0.6512096524238586,
"alphanum_fraction": 0.6612903475761414,
"avg_line_length": 28.117647171020508,
"blob_id": "45ca7ba53c0ed31d5fbd692b40fecd60274619e8",
"content_id": "4dcc8af370dc262e5d7001a5d2ac2be84cd9df1c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 496,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 17,
"path": "/todos/models.py",
"repo_name": "OpiumSmoke/django-tutorial-todolist",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.utils import timezone\n\nclass Todo(models.Model):\n title = models.CharField(max_length=200)\n text = models.TextField()\n created_at = models.DateTimeField(default=timezone.now, blank=True)\n\n def __str__(self):\n return '%s: %s' % (self.id, self.title)\n\nclass Category(models.Model):\n name = models.CharField(max_length=50)\n todos = models.ManyToManyField(Todo)\n\n def __str__(self):\n return '%s: %s' % (self.id, self.name)\n\n"
}
] | 8 |
LucienXian/NBA_search_engine | https://github.com/LucienXian/NBA_search_engine | 6b9fa9d69c45176ad2de5953f565fcaba4028661 | 3232a22c556ca22f3a92daa745b35b327f3f4471 | 5c4884eaf264b42a9dc6dbae78a9fdae0cb0ee49 | refs/heads/master | 2020-03-23T23:44:30.139655 | 2018-07-24T12:17:10 | 2018-07-24T12:17:10 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5358114838600159,
"alphanum_fraction": 0.5583871603012085,
"avg_line_length": 37.045684814453125,
"blob_id": "6d8df4e502427150d3436e86c26134f5d5d33963",
"content_id": "ac57f4e210bccc9d032a9cc87fb39152e1b1c833",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 38374,
"license_type": "no_license",
"max_line_length": 188,
"num_lines": 985,
"path": "/search/static/assets/js/search.js",
"repo_name": "LucienXian/NBA_search_engine",
"src_encoding": "UTF-8",
"text": "$(function() {\n\t$.ajaxSetup({\n\t\theaders: {\n\t\t\t'X-CSRF-Token':$('meta[name=\"_token\"]').attr('content')\n\t\t}\n\t})\n})\n\n\n$(document).ready(function(){\n\t$('.shader-box').hide();\n\t$('.search-keyword').hide();\n\t$('#main').hide();\n\t$('.loading-box').hide();\n\n\tvar search_keyword_num = 0;\n\tvar search_word_array = new Array();\n\tvar news_array = new Array();\n\tvar news_in_box = 5;\n\tvar select_page = -1;\n\tvar Max_length = -1;\n\tvar last_page = -1;\n\tvar search_tab = '全部';\n\tvar search_order_tab = '相关度';\n\tvar time_condition = '';\n\tvar label_search_word = '';\n\tvar star_info_array = new Array();\n\tvar StarisDetailDataHide;\n\tvar select_star = 0;\n\tvar team_info_array = new Array();\n\tvar star_info_data_array = new Array();\n var\tteam_info_data_array = new Array();\n\tvar TeamisDetailDataHide;\n\tvar select_team = 0;\n\tvar search_type = 0;\n\n\tvar isSearching = false;\n\n\n\t$(\".dropdown-presentation\").mouseenter(function(){\n \t\t$(this).find('text').css('color','#000')\t\n })\n\n $(\".dropdown-presentation\").mouseleave(function(){\n \t\t$(this).find('text').css('color','#fff')\t\n })\n\n $(\".dropdown-presentation\").click(function(){\n \t\t$('#dropdownMenu1>text').text($(this).text());\n \t\tsearch_tab = $(this).find('text').text();\n })\n\n $(\".dropdown-presentation-order\").mouseenter(function(){\n \t\t$(this).find('text').css('color','#000')\t\n })\n\n $(\".dropdown-presentation-order\").mouseleave(function(){\n \t\t$(this).find('text').css('color','#fff')\t\n })\n\n $(\".dropdown-presentation-order\").click(function(){\n \t\tif (search_order_tab != $(this).find('text').text()) {\n \t\t\tsearch_order_tab = $(this).find('text').text();\n \t\t\t$('#dropdownMenu2 > text').text(search_order_tab);\n \t\t\tif (search_order_tab == '时间')\n \t\t\t\tnews_array = news_array.sort(time_cmp); // [1, 2, 3]\n \t\t\telse \n \t\t\t\tnews_array = news_array.sort(rel_cmp); // [1, 2, 3]\n \t\t}\n \t\t$('.news-detail').empty();\n \t\t$('.change-page-box').empty();\n \t\tputNews();\n })\n\n //排序函数\n function time_cmp(a, b){\n \tif (a['_source']['my_time'] > b['_source']['my_time']) return -1\n \telse return 1\n }\n\n\tfunction rel_cmp(a, b){\n \tif (a['_score'] > b['_score']) return -1\n \telse return 1\n }\n\n\n $('.fa-search').mouseenter(function(){\n \t\t$(this).css('background-color','rgba(255, 255, 255, 0.2)')\t\n })\n\n $('.fa-search').mouseleave(function(){\n \t\t$(this).css('background-color','rgba(255, 255, 255, 0)')\t\n })\n\n $('.search-box').keydown(function(event) {\n \t\tif (event.which == 13) {\n \t\t\t$('.fa-search').click();\n \t\t}\n })\n\n\n $('.fa-search').click(function(){\n \t\tvar search_origin_word = $('.search-box').val();\n \t\tif (( ($('.search-box').val().length > 0 && $.trim(search_origin_word) != '') || search_type > 0) && (!isSearching) ) {\n \t\t\t\n\t \t\t$(this).css('background-color','rgba(255, 255, 255, 0.2)')\t\n\t \t\t$('#header > div.content').animate({'margin-top':'8rem'});\n\t \t\t$('.nba-logo').hide();\n\t \t\t$('#main').show();\n\t \t\tsetTimeout(function(){\n\t\t\t\t\t$('.shader-box').show(); \n\t\t\t\t\t$('.search-keyword').show();\n\t\t\t\t},300);\n\n\t\t\t\t//清空\n\t\t\t\t$('.change-page-box').empty();\n\t\t\t\t$('.news-box-title').hide();\n\t\t\t\t$('.news-detail').empty();\n\t\t\t\t$('.star-box').empty();\n\t\t\t\t$('.team-box').empty();\n\n\t\t\t\t$('.star-box-title').hide();\n\t\t\t\t$('.team-box-title').hide();\n\t\t\t\t$('.relative-star-box').empty();\n\t\t\t\t$('.relative-team-box').empty();\n\n\t\t\t\t$('.star-divide-line').hide();\n\t\t\t\t$('.team-divide-line').hide();\n\n\t\t\t\t$('.change-button-skip-box > div:nth-child(2)').hide();\n\t\t\t\t$('.research').remove();\n\t\t\t\tsearch_order_tab = '相关度';\n\t\t\t\t\n \t\t\t$('#dropdownMenu2 > text').text(search_order_tab);\n \t\t\tvar recent_res = 0;\n \t\t\tvar search_word;\n \t\t\tvar search_source = new Array();\n \t\t\tvar search_label;\n \t\t\tvar search_star = new Array();\n \t\t\tvar search_team = new Array();\n\t\t\t\tvar search_recent;\n\n \t\t\tif (search_type == 0) {\n \t\t\t\t$('.condition-box').remove();\n \t\t\t\tsearch_keyword_num = 0;\n \t\t\t\tsearch_word_array = [];\n \t\t\t\t$('.search-container').empty();\n\n \t\t\t\tsearch_keyword_num = search_keyword_num + 1;\n\t\t\t\t\tAddKeyword(search_tab, search_keyword_num, true);\n\t\t\t\t\tsearch_word_array.push(['tab', search_tab]);\n\n\t\t\t\t\tsearch_word = $.trim(search_origin_word);\n\t\t\t\t\tsearch_keyword_num = search_keyword_num + 1;\n\t\t\t\t\tAddKeyword(search_word, search_keyword_num, true);\n\t\t\t\t\tsearch_word_array.push(['key', search_word]);\n\n\t\t\t\t\ttime_condition = '';\n\n\t\t\t\t\tsearch_label_map = {'全部':0, '新闻':1, '球队':2, '球星':3};\n\t\t\t\t\tsearch_label = search_label_map[search_tab];\n\t\t\t\t\trecent_res = 0;\n\n\t\t\t\t\tsearch_source.push('all');\n\n \t\t\t} else if (search_type == 1){\n \t\t\t\ttime_label_map = {'近一天':1, '近一周': 2, '近一月': 3, '': 0};\n \t\t\t\tsearch_label_map = {'全部':0, '新闻':1, '球队':2, '球星':3, '标签': 4};\n \t\t\t\trecent_res = time_label_map[time_condition];\n \t\t\t\tfor (var i=0; i < search_word_array.length; i++) {\n \t\t\t\t\tif (search_word_array[i][0] == 'tab') \n \t\t\t\t\t\tsearch_label = search_label_map[search_word_array[i][1]]\n \t\t\t\t\telse if (search_word_array[i][0] == 'key')\n \t\t\t\t\t\tsearch_word =search_word_array[i][1]\n \t\t\t\t\telse if (search_word_array[i][0] == 'source') \n \t\t\t\t\t\tsearch_source.push(search_word_array[i][1]);\n \t\t\t\t\telse if (search_word_array[i][0] == 'star')\n \t\t\t\t\t\tsearch_star.push(search_word_array[i][1]);\n \t\t\t\t\telse if (search_word_array[i][0] == 'team')\n \t\t\t\t\t\tsearch_team.push(search_word_array[i][1]);\n \t\t\t\t}\n \t\t\t} else if (search_type == 2) {\n \t\t\t\t$('.search-box').val('');\n\t\t\t\t\t$('.condition-box').remove();\n\n \t\t\t\tsearch_keyword_num = 0;\n \t\t\t\tsearch_word_array = [];\n \t\t\t\t$('.search-container').empty();\n\n \t\t\t\tsearch_keyword_num = search_keyword_num + 1;\n\t\t\t\t\tAddKeyword('标签', search_keyword_num, true);\n\t\t\t\t\tsearch_word_array.push(['tab', '标签']);\n\n\t\t\t\t\tsearch_word = label_search_word; \n\t\t\t\t\tlabel_search_word = '';\n\t\t\t\t\tsearch_keyword_num = search_keyword_num + 1;\n\t\t\t\t\tAddKeyword(search_word, search_keyword_num, true);\n\t\t\t\t\tsearch_word_array.push(['key', search_word]);\n\n\t\t\t\t\ttime_condition = '';\n\n\t\t\t\t\tsearch_label = 4;\n\t\t\t\t\trecent_res = 0;\n\n\t\t\t\t\tsearch_source.push('all');\n \t\t\t}\n \t\t\tstar_info_array = []\n \t\t\tteam_info_array = []\n\t\t\t\tstar_info_data_array = []\n \t\t\tteam_info_data_array = []\n\t\t\t\t\n \t\t\tselect_star = 0\n\t\t\t\t//发送数据给后台\t\n\t\t\t\t\n\t\t\t\tconsole.log(\"search word: \" + search_word);\n\t\t\t\tconsole.log(\"search label: \" + search_label);\n\t\t\t\tconsole.log(\"search source: \" + search_source);\n\t\t\t\tconsole.log(\"search recent: \" + recent_res);\n\t\t\t\tconsole.log(\"search star: \" + search_star);\n\t\t\t\tconsole.log(\"search team: \" + search_team);\n\n\t\t\t\tvar csrf_token = Cookies.get('csrftoken');\n\t\t\t\t$('.loading-box').show();\n\t\t\t\tisSearching = true;\n\t\t\t\t$.ajax({\n\t\t\t\t\ttype: \"POST\",\n\t\t\t\t\turl: \"http://127.0.0.1:8000/search/index\",\n\t\t\t\t\tdata:\n\t\t\t\t\t{\n\t \t'keyword': search_word,\n \t'source':search_source, \n \t'recent':recent_res,\n \t'label': search_label,\n \t\t\t\t'star': search_star,\n \t\t\t\t'team': search_team,\n\t\t\t\t\t\t'csrfmiddlewaretoken': csrf_token,\n\t\t\t\t\t },\n\t\t\t\t\tsuccess: function(data,status,request){\n\t\t\t\t\t\tconsole.log(data);\n\t\t\t\t\t\t\t\n\t\t\t\t\t\tif (search_tab == '全部' || search_tab == '新闻') {\n\t\t\t\t\t\t\tdata_source_count = data.result.source_list;\n\t\t\t\t\t\t\tdata_recent_list = data.result.recent_list;\n\t\t\t\t\t\t\ttime_count = [data_recent_list['count_recent_1day'], data_recent_list['count_recent_7day'], data_recent_list['count_recent_30day']];\n\t\t\t\t\t\t\tsource_count = [data_source_count['count_source_souhu'], data_source_count['count_source_sina'], data_source_count['count_source_wangyi'], data_source_count['count_source_hupu']]\n\n\t\t\t\t\t\t\tnews_array = data.result.data_list;\n\t\t\t\t\t\t\tfor (var i=0; i<news_array.length; i++) {\n\t\t\t\t\t\t\t\tnewtime = news_array[i]['_source']['my_time'];\n\t\t\t\t\t\t\t\tvar date=new Date(newtime.replace(/-/g, '/'));\n\t\t\t\t\t\t\t\tnews_array[i]['_source']['my_time'] = date;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tnews_array = news_array.sort(rel_cmp); // [1, 2, 3]\n\t\t\t\t\t\t\t$('#news-total-count').text('总共为您找到相关结果' + data.result.recent_list['count_recent_all'] + '个')\n\t\t\t\t\t\t}\n\t\t\t\t\t\t$('.loading-box').hide();\n\t\t\t\t\t\t\n\t\t\t\t\t\tif (search_tab == '全部' && search_type != 2) {\n\t\t\t\t\t\t\tif ( \"player_list\" in data.result) {\n\t\t\t\t\t\t\t\tstar_info_array =data.result.player_list;\n\t\t\t\t\t\t\t\tstar_info_data_array = data.result.player_data_list;\n\t\t\t\t\t\t\t\tif (star_info_array.length > 0)\n\t\t\t\t\t\t\t\t\tputStar(true);\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif (\"team_list\" in data.result){\n\t\t\t\t\t\t\t\tteam_info_array = data.result.team_list;\n\t\t\t\t\t\t\t\tteam_info_data_array = data.result.team_data_list;\n\t\t\t\t\t\t\t\tif (team_info_array.length > 0)\n\t\t\t\t\t\t\t\t\tputTeam(true);\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tputNews();\n\t\t\t\t\t\t\tif (search_type != 1) {\n\t\t\t\t\t\t\t\tgenerateTimeCondition('时间', ['近一天','近一周','近一月'], time_count, false)\n\t\t\t\t\t\t\t\tgenerateCondition('来源', ['搜狐体育','新浪体育','网易体育','虎扑体育'], 'source', source_count, false)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tstarnameList = new Array();\n\t\t\t\t\t\t\tif (star_info_array.length > 0) {\n\t\t\t\t\t\t\t\tfor (var i=0; i<star_info_array.length; i++)\n\t\t\t\t\t\t\t\t\tstarnameList.push(star_info_array[i]['_source']['中文名']);\n\t\t\t\t\t\t\t\tif (search_type != 1) {\n\t\t\t\t\t\t\t\t\tgenerateCondition('球星', starnameList, 'star', [], true);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tteamnameList = new Array();\n\t\t\t\t\t\t\tif (team_info_array.length > 0) {\n\t\t\t\t\t\t\t\tfor (var i=0; i<team_info_array.length; i++)\n\t\t\t\t\t\t\t\t\tteamnameList.push(team_info_array[i]['_source']['name']);\n\t\t\t\t\t\t\t\tif (search_type != 1) {\n\t\t\t\t\t\t\t\t\tgenerateCondition('球队', teamnameList, 'team', [], true);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t} else if (search_tab == '球队') {\n\t\t\t\t\t\t\tteam_info_array = data.result.team_list;\n\t\t\t\t\t\t\tteam_info_data_array = data.result.team_data_list;\n\t\t\t\t\t\t\tif (team_info_array.length > 0) {\n\t\t\t\t\t\t\t\tputTeam(false);\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t$('.team-box-title').show();\n\t\t\t\t\t\t\t\t$('.team-box').append('<text style=\"font-size: 0.8rem; color:rgb(200, 200, 200);\">没有找到相关球队</text>');\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tteamnameList = new Array();\n\t\t\t\t\t\t\tif (team_info_array.length > 0) {\n\t\t\t\t\t\t\t\tfor (var i=0; i<team_info_array.length; i++)\n\t\t\t\t\t\t\t\t\tteamnameList.push(team_info_array[i]['_source']['name']);\n\t\t\t\t\t\t\t\tif (search_type != 1) {\n\t\t\t\t\t\t\t\t\tgenerateCondition('球队', teamnameList, 'team', [], true);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else if (search_tab == '球星') {\n\t\t\t\t\t\t\tstar_info_array = data.result.player_list;\n\t\t\t\t\t\t\tstar_info_data_array = data.result.player_data_list;\n\t\t\t\t\t\t\tif (star_info_array.length > 0) {\n\t\t\t\t\t\t\t\tputStar(false);\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t$('.star-box-title').show();\n\t\t\t\t\t\t\t\t$('.star-box').append('<text style=\"font-size: 0.8rem; color:rgb(200, 200, 200);\">没有找到相关球星</text>');\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tstarnameList = new Array();\n\t\t\t\t\t\t\tif (star_info_array.length > 0) {\n\t\t\t\t\t\t\t\tfor (var i=0; i<star_info_array.length; i++)\n\t\t\t\t\t\t\t\t\tstarnameList.push(star_info_array[i]['_source']['中文名']);\n\t\t\t\t\t\t\t\tif (search_type != 1) { \n\t\t\t\t\t\t\t\t\tgenerateCondition('球星', starnameList, 'star', [], true);\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else if (search_tab == '新闻' || search_type == 2) {\n\t\t\t\t\t\t\tputNews();\n\t\t\t\t\t\t\tif (search_type != 1) {\n\t\t\t\t\t\t\t\tgenerateTimeCondition('时间', ['近一天','近一周','近一月'], time_count, false)\n\t\t\t\t\t\t\t\tgenerateCondition('来源', ['搜狐体育','新浪体育','网易体育','虎扑体育'], 'source', source_count, false)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t$('.navigator-bar').append('<button class=\"research\">筛选</button>')\n\n\t\t\t\t\t\t$('.research').click(function(){\n\t\t\t\t\t\t\tsearch_type = 1;\n\t\t\t\t\t\t\t$('.fa-search').click();\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tsearch_type = 0;\n\t\t\t\t\t\tisSearching = false;\n\t\t\t\t\t}\n\t });\n\t\t\t\n \t\t}\n })\n\n function putNews() {\n \t\t$('.news-box-title').show();\n \t\t$('.change-button-skip-box > div:nth-child(2)').show();\n \t\t//返回数据\n \t\t\n\t\t\tlast_page = Math.ceil(news_array.length / news_in_box);\n\t\t\tMax_length = Math.floor(Math.log10(last_page)+1);\n\n\t\t\t$('#total-page-button').text(\"(共\" + last_page + \"页)\")\n\n \t\tif (news_array.length/news_in_box > 5) {\n \t\t\tfor (var i=1; i<5; i++) \n\t \t\t\tgenerateChangeButton(i);\n\t \t\tgenerateRightButton()\n\t \t\tselect_page = 1;\n\t \t\t$('#page-1').css('background-color', 'rgba(255, 255, 255, 0.3)')\n \t\t} else {\n \t\t\tfor (var i=1; i<news_array.length / news_in_box+1; i++) \n \t\t\t\tgenerateChangeButton(i);\n\n \t\t\tselect_page = 1;\n \t\t\t$('#page-1').css('background-color', 'rgba(255, 255, 255, 0.3)')\n \t\t}\n \t\t\t\n\t\t\tchangePage(0);\n }\n\n function putStar(IsShowDivideLine) {\n \t$('.star-box-title').show();\n \tputOneStar(0);\n \tputRelativeStar();\n \tif (IsShowDivideLine == false)\n \t\t$('.star-divide-line').hide();\n \telse\n \t\t$('.star-divide-line').show();\n }\n\n function putOneStar(star_index) {\n \t$('.star-box').empty();\n \tStarisDetailDataHide = true;\n\t\t$('.star-box').append(FormStar(star_index, false))\n\t\tconsole.log(star_info_data_array);\n\t\tplayer_data_dict = star_info_data_array[0][0]['_source'];\n\t\tplayer_data_arr = [player_data_dict['season'], player_data_dict['得分'], player_data_dict['首发'],\n\t\t\t\t\t\t\t player_data_dict['出场'], player_data_dict['时间'], player_data_dict['投篮'], \n\t\t\t\t\t\t\t player_data_dict['三分'], player_data_dict['罚球'], player_data_dict['篮板'],\n\t\t\t\t\t\t\t player_data_dict['助攻'], player_data_dict['失误'], player_data_dict['犯规']]\n\t\tvar append_str = '<tr>'\n\t\tfor (var i=0; i<12; i++) {\n\t\t\tappend_str = append_str + '<td class=\"small-font\">'+ player_data_arr[i]+ '</td>'\n\t\t}\n\t\tappend_str = append_str + '</tr>'\n\t\t$('#player-' + star_index +' > tbody').append(append_str);\n\t\t\n\t\t\n\n\t\t$('.people-more-data').click(function(){ \n\t\t\tconsole.log(star_info_data_array)\n\t\t\tvar button_id = parseInt($(this).attr('id').split('-')[2])\n\t\t\tif (StarisDetailDataHide == false) {\n\t\t\t\t$('#player-' + button_id + ' >tbody').find('.more-data').remove();\n\t\t\t\t$('#player-button-' + button_id).text('show more');\n\t\t\t} else {\n\t\t\t\tfor (var j=1; j<=star_info_data_array[0].length-1; j++) {\n\t\t\t\t\tconsole.log(star_info_data_array)\n\t\t\t\t\tvar append_str = '<tr class=\"more-data\">';\n\t\t\t\t\tplayer_data_dict = star_info_data_array[0][j]['_source'];\n\t\t\t\t\tplayer_data_arr = [ player_data_dict['season'], player_data_dict['得分'], player_data_dict['首发'],\n\t\t\t\t\t\t\t \t\t\tplayer_data_dict['出场'], player_data_dict['时间'], player_data_dict['投篮'], \n\t\t\t\t\t\t\t \t\t\tplayer_data_dict['三分'], player_data_dict['罚球'], player_data_dict['篮板'],\n\t\t\t\t\t\t\t \t\t\tplayer_data_dict['助攻'], player_data_dict['失误'], player_data_dict['犯规']]\n\t\t\t\t\tfor (var i=0; i<12; i++) {\n\t\t\t\t\t\tappend_str = append_str + '<td class=\"small-font\">' + player_data_arr[i]+ '</td>';\n\t\t\t\t\t}\n\t\t\t\t\tappend_str = append_str + '</tr>'\n\t\t\t\t\t$('#player-' + button_id + ' >tbody').append(append_str);\n\t\t\t\t\t$('#player-button-' + button_id).text('hide');\n\t\t\t\t}\n\t\t\t}\n\t\t\tStarisDetailDataHide = !StarisDetailDataHide;\n\t\t}) \n }\n\n function putRelativeStar() {\n \t$('.relative-star-box').empty();\n \tRelativeStr = ''\n \tfor (var i=0; i<star_info_array.length; i++) {\n \t\tRelativeStr = RelativeStr + '<div class=\"star-brief-box\" id=\"star-brief-' + i + '\">'\n \t\tRelativeStr = RelativeStr + '<img src=\"' + star_info_array[i]['_source']['image_link'] + '\" class=\"people-small-photo\">';\n \t\tRelativeStr = RelativeStr + '<text class=\"star-brief-name\">' + star_info_array[i]['_source']['中文名'] + '</text>';\n \t\tRelativeStr = RelativeStr + '</div>'\n \t}\n \t$('.relative-star-box').append(RelativeStr);\n \t$('#star-brief-0').css('border-color', 'rgba(255, 255, 255, 0.3)')\n\n \t$('.star-brief-box').mouseenter(function(){\n \t\tvar star_id = parseInt($(this).attr('id').split('-')[2])\n \t\tif (star_id != select_star)\n \t\t\t$(this).css('border-color', 'rgba(255, 255, 255, 0.2)')\n \t})\n\n \t$('.star-brief-box').mouseleave(function(){\n \t\tvar star_id = parseInt($(this).attr('id').split('-')[2])\n \t\tif (star_id != select_star)\n \t\t\t$(this).css('border-color', 'rgba(255, 255, 255, 0)')\n \t})\n\n \t$('.star-brief-box').click(function(){\n \t\t$('#star-brief-' + select_star).css('border-color', 'rgba(255, 255, 255, 0)')\n \t\tvar star_id = parseInt($(this).attr('id').split('-')[2])\n\t\t\tvar csrf_token = Cookies.get('csrftoken');\n\n \t\t$.ajax({\n\t\t\t\t\ttype: \"POST\",\n\t\t\t\t\turl: \"http://127.0.0.1:8000/search/index\",\n\t\t\t\t\tdata:\n\t\t\t\t\t{\n\t\t\t\t\t\t'keyword':$(this).find('text').text(),\n\t\t\t\t\t\t'star':$(this).find('text').text(),\n\t\t\t\t\t\t'label': 3,\n\t\t\t\t\t\t'csrfmiddlewaretoken': csrf_token,\n\t\t\t\t\t },\n\t\t\t\t\tsuccess: function(data,status,request){\n\t\t\t\t\t\tconsole.log(data);\n\t\t\t\t\t\tstar_info_data_array = data.result.player_data_list;\n\t\t\t\t\t\tputOneStar(star_id);\n\t\t\t\t\t}\n\t\t\t})\n \t\tselect_star = star_id;\n \t\t$(this).css('border-color', 'rgba(255, 255, 255, 0.3)')\n \t})\n\n }\n\n function putTeam(IsShowDivideLine) {\n \t$('.team-box-title').show();\n \tputOneTeam(0);\n \tputRelativeTeam();\n \tif (IsShowDivideLine == false)\n \t\t$('.team-divide-line').hide();\n \telse\n \t\t$('.team-divide-line').show();\n }\n\n function putOneTeam(team_index) {\n \t$('.team-box').empty();\n \tTeamisDetailDataHide = true;\n\t\t$('.team-box').append(FormTeam(team_index, false))\n\t\t\n\t\tteam_data_dict = team_info_data_array[team_index][0]['_source'];\n\t\t\n\t\tteam_data = [ team_data_dict['name'], team_data_dict['得分'], team_data_dict['首发'],\n\t\t\t\t\t team_data_dict['出场'], team_data_dict['时间'], team_data_dict['投篮'], \n\t\t\t\t\t team_data_dict['三分'], team_data_dict['罚球'], team_data_dict['篮板'],\n\t\t\t\t\t team_data_dict['助攻'], team_data_dict['失误'], team_data_dict['犯规']]\n\t\tvar append_str = '<tr>'\n\t\tfor (var i=0; i<12; i++) {\n\t\t\tappend_str = append_str + '<td class=\"small-font\">'+ team_data[i]+ '</td>'\n\t\t}\n\t\tappend_str = append_str + '</tr>'\n\t\t$('#team-' + team_index +' > tbody').append(append_str);\n\n\t\t$('.team-more-data').click(function(){ \n\t\t\tvar button_id = parseInt($(this).attr('id').split('-')[2])\n\t\t\tconsole.log(team_info_data_array);\n\t\t\tif (TeamisDetailDataHide == false) {\n\t\t\t\t$('#team-' + button_id + ' >tbody').find('.more-data').remove();\n\t\t\t\t$('#team-button-' + button_id).text('show more');\n\t\t\t} else {\n\t\t\t\tfor (var j=1; j<=team_info_data_array[button_id].length-1; j++) {\n\t\t\t\t\tconsole.log(team_info_data_array);\n\t\t\t\t\tteam_data_dict = team_info_data_array[button_id][j]['_source'];\n\t\t\t\t\tteam_data = [ team_data_dict['name'], team_data_dict['得分'], team_data_dict['首发'],\n\t\t\t\t\t \t\t\tteam_data_dict['出场'], team_data_dict['时间'], team_data_dict['投篮'], \n\t\t\t\t\t \t\t\tteam_data_dict['三分'], team_data_dict['罚球'], team_data_dict['篮板'],\n\t\t\t\t\t \t\t\tteam_data_dict['助攻'], team_data_dict['失误'], team_data_dict['犯规']]\n\t\t\t\t\tvar append_str = '<tr class=\"more-data\">';\n\t\t\t\t\tfor (var i=0; i<12; i++) {\n\t\t\t\t\t\tappend_str = append_str + '<td class=\"small-font\">' + team_data[i]+ '</td>';\n\t\t\t\t\t}\n\t\t\t\t\tappend_str = append_str + '</tr>'\n\t\t\t\t\t$('#team-' + button_id + ' >tbody').append(append_str);\n\t\t\t\t\t$('#team-button-' + button_id).text('hide');\n\t\t\t\t}\n\t\t\t}\n\t\t\tTeamisDetailDataHide = !TeamisDetailDataHide;\n\t\t}) \n }\n\n function putRelativeTeam() {\n \t$('.relative-team-box').empty();\n \tRelativeStr = ''\n \tfor (var i=0; i<team_info_array.length; i++) {\n \t\tRelativeStr = RelativeStr + '<div class=\"team-brief-box\" id=\"team-brief-' + i + '\">'\n \t\tRelativeStr = RelativeStr + '<img src=\"' + team_info_array[i]['_source']['image_link'] + '\" class=\"team-small-photo\">';\n \t\tRelativeStr = RelativeStr + '<text class=\"team-brief-name\">' + team_info_array[i]['_source']['name'] + '</text>';\n \t\tRelativeStr = RelativeStr + '</div>'\n \t}\n \t$('.relative-team-box').append(RelativeStr);\n \t$('#team-brief-0').css('border-color', 'rgba(255, 255, 255, 0.3)')\n\n \t$('.team-brief-box').mouseenter(function(){\n \t\tvar team_id = parseInt($(this).attr('id').split('-')[2])\n \t\tif (team_id != select_team)\n \t\t\t$(this).css('border-color', 'rgba(255, 255, 255, 0.2)')\n \t})\n\n \t$('.team-brief-box').mouseleave(function(){\n \t\tvar team_id = parseInt($(this).attr('id').split('-')[2])\n \t\tif (team_id != select_team)\n \t\t\t$(this).css('border-color', 'rgba(255, 255, 255, 0)')\n \t})\n\n \t$('.team-brief-box').click(function(){\n \t\t$('#team-brief-' + select_team).css('border-color', 'rgba(255, 255, 255, 0)')\n \t\tvar team_id = parseInt($(this).attr('id').split('-')[2])\n \t\tputOneTeam(team_id);\n \t\tselect_team = team_id;\n \t\t$(this).css('border-color', 'rgba(255, 255, 255, 0.3)')\n \t})\n\n }\n\n\n\t$(\".input-page\").keydown(function(event) {\n\t\t\tevent.preventDefault();\n\t\t\tevent.stopPropagation();\n \t\tvar input_key_origin_value = $(this).val();\n \t\t\n \t\tif (event.which >= 48 && event.which <= 57) {\n \t\t\t\tif (parseInt(input_key_origin_value + event.key) <= last_page)\n \t\t\t\t//if (input_key_origin_value.length < Max_length)\n\t\t \t\t$('.input-page').val(input_key_origin_value + event.key);\n\t\t \t} else if (event.which == 8) {\n\t\t \t\tif (input_key_origin_value != \"\") \n\t\t \t\t\tinput_key_origin_value = input_key_origin_value.substr(0, input_key_origin_value.length-1)\n\t\t \t\t$('.input-page').val(input_key_origin_value)\n\t\t \t} else if (event.which == 13) {\n\t\t \t\tvar next_page = parseInt(input_key_origin_value);\n\t\t \t\tif (next_page <= last_page && next_page > 0) {\n\t\t \t\t\tchangePageButton(next_page);\n\t\t \t\t\t$(this).val('');\n\t\t \t\t}\t\n\t\t }\n\n })\n\n\n\n $('.main-href').mouseenter(function() {\n\t\t\t$(this).css('color', 'rgba(255, 255, 255, 0.7)');\n\t\t\t$(this).css('border-bottom', '0.15rem solid rgba(255, 255, 255, 0.7)');\n\t})\n\t\t\t\n\t$('.main-href').mouseleave(function() {\n\t\t\t$(this).css('color', 'rgba(255, 255, 255, 0.4)');\n\t\t\t$(this).css('border-bottom', '0.15rem solid rgba(255, 255, 255, 0.4)');\n\t})\n\n\t$('.main-href').click(function(){\n\t\t\t$('.shader-box').hide();\n\t\t\t$('.search-keyword').hide();\n\t\t\t$('#main').hide();\n\t\t\t$('#header > div.content').hide();\n\t\t\tsetTimeout(function(){\n\t\t\t\t$('.nba-logo').show();\t\t\t \n\t\t\t\t$('#header > div.content').animate({'margin-top':'0rem'});\n\t\t\t\t$('#header > div.content').show();\n\t\t\t},300);\t\n\t\t\t\n\t})\n\n\t$('.link-ref >a').mouseenter(function() {\n\t\t\t$(this).css('color', 'rgb(150, 150, 150)')\n\t})\n\n\t$('.link-ref >a').mouseleave(function() {\n\t\t\t$(this).css('color', 'rgb(255, 255, 255)')\n\t})\n\n\t$('.link-ref >a').click(function() {\n\t\t\t$(this).css('color', 'rgb(255, 255, 255)')\n\t})\n\n\tfunction changePageButton(next_page) {\n\t\t\tnext_page = parseInt(next_page)\n\t\t\tif (last_page <= 5) {\n\t\t \t$('#page-' + select_page).css('background-color', 'rgba(255, 255, 255, 0)');\n\t\t\t\tchangePage(next_page - 1);\n\t\t\t\tselect_page = next_page;\n\t\t\t\t$('#page-' + select_page).css('background-color', 'rgba(255, 255, 255, 0.3)');\n\t\t\t} else {\n\t\t select_page = next_page;\n\t\t\t if (next_page <= last_page)\n\t\t\t changePage(next_page-1);\n\n\t\t\t \t$('.change-page-box').empty();\n\t\t\t if (next_page == 1 || next_page == 2 || next_page == 3 || next_page == 4) {\n\t\t\t \tfor (var i=1; i<5; i++) \n\t\t\t\t\t \tgenerateChangeButton(i);\n\t\t\t\t\t\tgenerateRightButton();\n\t\t \t\t\t$('#page-' + next_page).css('background-color', 'rgba(255, 255, 255, 0.3)');\n\t\t\t } else if (next_page <= last_page && next_page >= last_page -2) {\n\t\t\t \tgenerateLeftButton();\n\t\t\t \tfor (var i=last_page-3; i<=last_page; i++) \n\t\t\t\t\t \tgenerateChangeButton(i);\n\t\t\t\t\t $('#page-' + next_page).css('background-color', 'rgba(255, 255, 255, 0.3)');\n\t\t\t } else {\n\t\t\t \tgenerateLeftButton();\n\t\t\t \tfor (var i=next_page-1; i<=next_page + 1; i++) \n\t\t\t\t\t \tgenerateChangeButton(i);\n\t\t\t\t\t $('#page-' + next_page).css('background-color', 'rgba(255, 255, 255, 0.3)');\n\t\t\t\t\t generateRightButton();\n\t\t\t\t}\n\t\t }\n\t}\n\n\tfunction changePage(page){\n\t\t\t$('.news-detail').empty();\n\t\t\tfor (var i=page * news_in_box; i < (page+1) * news_in_box && i < news_array.length; i++) {\n\t\t\t\t$('.news-detail').append(generateNew(news_array[i]['_source'], i));\n\n\t\t\t\t$('.new-label-' + i).mouseenter(function(){\n\t\t\t\t\t$(this).css('color', 'rgb(250, 250, 250);')\n\t\t\t\t\t$(this).css('border-bottom-color', 'rgb(250, 250, 250);')\n\t\t\t\t})\n\n\t\t\t\t$('.new-label-' + i).mouseleave(function(){\n\t\t\t\t\t$(this).css('color', 'rgb(200, 200, 200);')\n\t\t\t\t\t$(this).css('border-bottom-color', 'rgb(150, 150, 150);')\n\t\t\t\t})\n\n\t\t\t\t$('.new-label-' + i).click(function(){\n\t\t\t\t\tsearch_type = 2;\n\t\t\t\t\tlabel_search_word = $(this).text();\n\t\t\t\t\t$('.fa-search').click();\n\t\t\t\t})\n\t\t\t}\n\n\t\t\t$('.news-title').mouseenter(function(){\n\t\t\t\t\t$(this).css('color', 'rgb(200, 200, 200)');\n\t\t\t})\n\n\t\t\t$('.news-title').mouseleave(function(){\n\t\t\t\t\t$(this).css('color', 'rgb(255, 255, 255)');\n\t\t\t})\n\n\t\t\t$('.news-href').mouseenter(function(){\n\t\t\t\t\t$(this).css('color', 'rgb(200, 200, 200)');\n\t\t\t})\n\n\t\t\t$('.news-href').mouseleave(function(){\n\t\t\t\t\t$(this).css('color', 'rgb(255, 255, 255)');\n\t\t\t})\n\n\n\t}\n\n\n\tfunction generateChangeButton(id){\n\t\t$('.change-page-box').append('<div class=\"change-page\" id=\"page-'+ id +'\">'+ id +'</div>');\n\t $('.change-page').mouseenter(function(){\n\t \t\t$(this).css('background-color', 'rgba(255, 255, 255, 0.2)')\n\t })\n\t $('.change-page').mouseleave(function(){\n\t \t\tvar button_id = parseInt(($(this).attr('id').split('-'))[1]);\n\t \t\tif (button_id != select_page)\n\t \t\t\t$(this).css('background-color', 'rgba(255, 255, 255, 0)')\n\t })\n\t $('.change-page').click(function(){\n\t \t\tvar button_id = parseInt(($(this).attr('id').split('-'))[1]);\n\t \t\tchangePageButton(button_id);\n\t })\n\t}\n\n\tfunction generateRightButton() {\n\t\t$('.change-page-box').append('<div class=\"change-page-more icon fa-angle-right\"></div>')\n\t $('.fa-angle-right').mouseenter(function(){\n\t\t\t\t$(this).css('background-color', 'rgba(255, 255, 255, 0.2)')\n\t\t})\n\n\t $('.fa-angle-right').mouseleave(function(){\n\t\t\t\t$(this).css('background-color', 'rgba(255, 255, 255, 0)')\n\t\t})\n\n\t $('.fa-angle-right').click(function(){\n\t\t\t\tchangePageButton(parseInt(select_page) + 1);\n\t\t})\n\t}\n\n\tfunction generateLeftButton() {\n\t\t$('.change-page-box').append('<div class=\"change-page-more icon fa-angle-left\"></div>')\n\t\t$('.fa-angle-left').click(function(){\n\t\t\t\tchangePageButton(parseInt(select_page) - 1);\n\t\t})\n\t \n\t\t$('.fa-angle-left').mouseenter(function(){\n\t\t\t\t$(this).css('background-color', 'rgba(255, 255, 255, 0.2)')\n\t\t})\n\t\t \n\t\t$('.fa-angle-left').mouseleave(function(){\n\t\t\t\t$(this).css('background-color', 'rgba(255, 255, 255, 0)')\n\t\t})\n\t}\n\n\tfunction generateNew(news_info, news_index){\n\t\tNewsStr = '<div class=\"one-news\">'\n\t\tNewsStr = NewsStr + '<a class=\"news-title\" href=\"'+ news_info['link']+ '\" target=\"_blank\">';\n\t\tif (news_info['place'] == 'title' && news_info['index'] >= 0) {\n\t\t\tNewsStr = NewsStr + news_info['name'].slice(0, news_info['index'])\n\t\t\tfor (var i=0; i< news_info['keyword'].length && i + news_info['index'] < news_info['name'].length; i++) {\n\t\t\t\tif (news_info['keyword'][i] == news_info['name'].slice(news_info['index'] + i, news_info['index'] + i + 1))\n\t\t\t\t\tNewsStr = NewsStr + '<text style=\"color:rgba(250, 40, 40, 0.8)\">' + news_info['name'][news_info['index'] + i] + '</text>'\n\t\t\t\telse\n\t\t\t\t\tNewsStr = NewsStr + '<text>' + news_info['name'][news_info['index'] + i] + '</text>'\n\t\t\t}\n\t\t\tNewsStr = NewsStr + news_info['name'].slice(news_info['index'] + news_info['keyword'].length, news_info['name'].length) + '</a>'\n\t\t} else {\n\t\t\tNewsStr = NewsStr + news_info['name'] + '</a>'\n\t\t}\n\t\tNewsStr = NewsStr + '<div class=\"news-source news-label-box\">' + news_info['source'] + ' ' \n\t\tNewsStr = NewsStr + ParseTime(news_info['my_time'])\n\t\tNewsStr = NewsStr + ' 标签:'\n\t\tlabel_split = news_info['label'].split(' ')\n\t\tfor (var i = 0; i < label_split.length; i++) {\n\t\t\tif (label_split[i] != '')\n\t\t\t\tNewsStr = NewsStr + '<div class=\"news-label new-label-'+ news_index +'\">' + label_split[i] + '</div>'\n\t\t}\n\t\tNewsStr = NewsStr + '</div>'\n\t\tif (news_info['place'] == 'content' && news_info['index']>=0 && news_info['index'] < 100) {\n\t\t\tNewsStr = NewsStr + '<div class=\"news-content\">'+ news_info['content'].slice(0, news_info['index'])\n\t\t\tfor (var i=0; i< news_info['keyword'].length; i++) {\n\t\t\t\tif (news_info['keyword'][i] == news_info['content'].slice(news_info['index'] + i, news_info['index'] + i + 1))\n\t\t\t\t\tNewsStr = NewsStr + '<strong>' + news_info['content'][news_info['index'] + i] + '</strong>'\n\t\t\t\telse\n\t\t\t\t\tNewsStr = NewsStr + news_info['content'][news_info['index'] + i]\n\t\t\t}\n\n\t\t\tif (news_info['index'] + news_info['keyword'].length < 100)\n\t\t\t\tNewsStr = NewsStr + news_info['content'].slice(news_info['index'] + news_info['keyword'].length, 100)\n\t\t\tNewsStr = NewsStr +'... <a class=\"news-href\" href=\"'+ news_info['link']+ '\">查看详情</a></div></div>'\n\t\t} else {\n\t\t\tNewsStr = NewsStr + '<div class=\"news-content\">'+ news_info['content'].slice(0, 100) +'... <a class=\"news-href\" href=\"'+ news_info['link']+ '\" target=\"_blank\">查看详情</a></div>'\n\t\t\tNewsStr = NewsStr + '</div>'\n\t\t}\n\t\treturn NewsStr\n\t}\n\n\tfunction generateCondition(name, label, condition_type, number, isNoNumber) {\n\t\tcond_str = '<div class=\"condition-box\"><div class=\"condition-name\"><text\">'+ name +'</text></div> <div class=\"condition-class\">'\n\t\tfor (var i=0; i<label.length; i++) {\n\t\t\tcond_str = cond_str + '<div class=\"condition-explicit\" type=\"' + condition_type +'\"> <span class=\"icon fa-caret-right\"> </span></span>'\n\t\t\tcond_str = cond_str + '<text class=\"condition\">'+ label[i]+'</text>'\n\t\t\tif (!isNoNumber)\n\t\t\t\tcond_str = cond_str + '<text class=\"result-number\">[ ' + number[i] + ' ]</text>'\n\t\t\tcond_str = cond_str + '</div>';\n\t\t\t\n\t\t}\n\t\tcond_str = cond_str + '</div> </div>'\n\t\t$('.navigator-bar').append(cond_str)\n\n\t\t$('.condition-explicit').mouseenter(function(){\n\t\t\t$(this).css('background-color', 'rgba(255, 255, 255, 0.3)');\n\t\t})\n\n\t\t$('.condition-explicit').mouseleave(function(){\n\t\t\t\t$(this).css('background-color', 'rgba(255, 255, 255, 0)');\n\t\t})\n\n\t\t$('.condition-explicit').click(function(){\n\t\t\t\t$(this).css('background-color', 'rgba(0, 0, 0, 0.3)');\n\t\t\t\tsearch_word = $(this).find('text[class=\"condition\"]').text();\n\t\t\t\tvar i=0;\n\t\t\t\tfor (i=0; i<search_word_array.length; i++) {\n\t\t\t\t\tif (search_word_array[i][1] == search_word)\n\t\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tif (i == search_word_array.length) {\n\t\t\t\t\tsearch_keyword_num = search_keyword_num + 1;\n\t\t\t\t\tAddKeyword(search_word, search_keyword_num);\n\t\t\t\t\tsearch_word_array.push([$(this).attr('type'), search_word]);\n\t\t\t\t}\n\t\t})\n\t}\n\n\tfunction generateTimeCondition(name, label, number) {\n\t\tcond_str = '<div class=\"condition-box\"> <div class=\"condition-name\"><text\">'+ name +'</text></div> <div class=\"condition-class\">'\n\t\tfor (var i=0; i<label.length; i++) {\n\t\t\tcond_str = cond_str + '<div class=\"condition-explicit-time\"> <span class=\"icon fa-caret-right\"> </span></span>'\n\t\t\tcond_str = cond_str + '<text class=\"condition\">'+ label[i]+'</text>'\n\t\t\tcond_str = cond_str + '<text class=\"result-number\">[ ' + number[i] + ' ]</text></div>'\n\t\t}\n\t\tcond_str = cond_str + '</div> </div>'\n\t\t$('.navigator-bar').append(cond_str)\n\n\t\t$('.condition-explicit-time').mouseenter(function(){\n\t\t\t$(this).css('background-color', 'rgba(255, 255, 255, 0.3)');\n\t\t})\n\n\t\t$('.condition-explicit-time').mouseleave(function(){\n\t\t\t\t$(this).css('background-color', 'rgba(255, 255, 255, 0)');\n\t\t})\n\n\t\t$('.condition-explicit-time').click(function(){\n\t\t\t\t$(this).css('background-color', 'rgba(0, 0, 0, 0.3)');\n\t\t\t\tsearch_word = $(this).find('text[class=\"condition\"]').text();\n\t\t\t\tif (time_condition == '') {\n\t\t\t\t\tAddKeyword(search_word, 0, false, false);\n\t\t\t\t\ttime_condition = search_word;\n\t\t\t\t}\n\t\t\t\telse if (search_word != time_condition ) {\n\t\t\t\t\tsearch_keyword_num = search_keyword_num + 1;\n\t\t\t\t\tAddKeyword(search_word, 0, false, true);\n\t\t\t\t\ttime_condition = search_word;\n\t\t\t\t} \n\t\t})\n\t}\n\n\tfunction FormStar(star_index, IsShowLine) {\n\t\tconsole.log(star_info_array);\n\t\tStarInfo = star_info_array[star_index]['_source']\n \tstar_str = '<div class=\"people-info\"> <div class=\"detailed-box\">'\n \tstar_str = star_str + '<img src=\"' + StarInfo['image_link'] + '\" class=\"people-photo\"></img>'\n \tstar_str = star_str + '<div class=\"people-detailed-info\"> '\n \tstar_str = star_str + '<div class=\"info-one-line\">中文姓名:<text>' + StarInfo['中文名']+ '</text></div>'\n \tstar_str = star_str + '<div class=\"info-one-line\">英文姓名:<text>' + StarInfo['name'] + '</text></div>'\n \tstar_str = star_str + '<div class=\"info-one-line\">球队:<text>' + star_info_data_array[0][0]['_source']['team'] + '</text></div>'\n \tstar_str = star_str + '<div class=\"info-one-line\">身高:<text>' + StarInfo['身高'] + '</text></div>'\n \tstar_str = star_str + '<div class=\"info-one-line\">体重:<text>' + StarInfo['体重'] + '</text></div>'\n \tstar_str = star_str + '<div class=\"info-one-line\">出生情况:<text>' + StarInfo['出生年月'] + '</text></div>'\n \tstar_str = star_str + '<div class=\"info-one-line\">位置:<text>' + StarInfo['位置'] + '</text></div>'\n \t//star_str = star_str + '<div class=\"info-one-line\">薪水:<text>' + StarInfo['salary'] + '</text></div>'\n \tstar_str = star_str + '</div></div><table class=\"people-data\" align=\"center\" id=\"player-'+ star_index +'\"><tr><td>赛季</td><td>场均得分</td><td>首发</td><td>出场</td><td>上场时间</td>'\n\t\tstar_str = star_str + '<td>投篮命中率</td><td>三分命中率</td><td>罚球命中率</td><td>篮板数</td><td>助攻</td><td>失误</td><td>犯规</td></tr><tr></tr></table>'\n\t\tstar_str = star_str + '<button class=\"people-more-data\" id=player-button-'+ star_index +'>show more</button>'\n\t\tif (IsShowLine) \n\t\t\tstar_str = star_str + '<div class=\"divide-line\"></div>'\n\t\tstar_str = star_str + '</div>'\n \treturn star_str;\n }\n\n function FormTeam(team_index, IsShowLine) {\n \tTeamInfo = team_info_array[team_index]['_source']\n\t\tconsole.log(team_info_array);\n \tteam_str = '<div class=\"team-info\"> <div class=\"detailed-box\">'\n \tteam_str = team_str + '<img src=\"' + TeamInfo['image_link'] + '\" class=\"team-photo\"></img>'\n \tteam_str = team_str + '<div class=\"people-detailed-info\"> '\n \tteam_str = team_str + '<div class=\"info-one-line\">中文队名:<text>' + TeamInfo['name']+ '</text></div>'\n \tteam_str = team_str + '<div class=\"info-one-line\">英文队名:<text>' + TeamInfo['英文名字'] + '</text></div>'\n \tteam_str = team_str + '<div class=\"info-one-line\">地区:<text>' + TeamInfo['所属地区'] + '</text></div>'\n \tteam_str = team_str + '<div class=\"info-one-line\">体育馆:<text>' + TeamInfo['主球馆'] + '</text></div>'\n \tteam_str = team_str + '<div class=\"info-one-line\">所属赛区:<text>' + TeamInfo['赛区'] + '</text></div>'\n \tteam_str = team_str + '<div class=\"info-one-line\">成立时间:<text>' + TeamInfo['成立时间'] + '</text></div>'\n \tteam_str = team_str + '<div class=\"info-one-line\">教练:<text>' + TeamInfo['拥有者'] + '</text></div>'\n\t\tteam_str = team_str + '<div class=\"info-one-line\">老板:<text>' + TeamInfo['主教练'] + '</text></div>'\n \tteam_str = team_str + '</div></div><table class=\"team-data\" align=\"center\" id=\"team-'+ team_index +'\"><tr><td>球员</td><td>场均得分</td><td>首发</td><td>出场</td><td>上场时间</td>'\n\t\tteam_str = team_str + '<td>投篮命中率</td><td>三分命中率</td><td>罚球命中率</td><td>篮板数</td><td>助攻</td><td>失误</td><td>犯规</td></tr><tr></tr></table>'\n\t\tteam_str = team_str + '<button class=\"team-more-data\" id=team-button-'+ team_index +'>show more</button>'\n\t\tteam_str = team_str + '</div>'\n \treturn team_str;\n }\n\t\t\t\t\n\n\tfunction ParseTime(datetime) {\n\t\tdateformat = datetime.getFullYear() + \"-\" + (datetime.getMonth()+1) + \"-\" + datetime.getDate() + \" \"\n\t\tif (datetime.getHours() < 10)\n\t\t\tdateformat = dateformat + '0'\n\t\tdateformat = dateformat + datetime.getHours() + \":\";\n\t\tif (datetime.getMinutes() < 10)\n\t\t\tdateformat = dateformat + '0'\n\t\tdateformat = dateformat + datetime.getMinutes() + \":\";\n\t\tif (datetime.getSeconds() < 10)\n\t\t\tdateformat = dateformat + '0'\n\t\tdateformat = dateformat + datetime.getSeconds();\n\t\treturn dateformat\n\t}\n\n\tfunction AddKeyword(search_word , search_keyword_num, isLock, isModify){\n\t\t\tsearch_word = search_word.replace( /^\\s*/, '');\n\t\t\tif (isModify == true) {\n\t\t\t\t$('#keyword-box-'+search_keyword_num + '>strong').text(search_word)\n\t\t\t}\n\t\t\telse if (!isLock) {\n\t\t\t\t$('.search-container').append('<div class=\"keyword-box\" id=\"keyword-box-'+ search_keyword_num +'\">\\\n\t\t\t\t\t\t\t\t\t\t \t<strong style=\"color: rgb(150, 150, 150)\">'+ search_word + '</strong> \\\n\t\t\t\t\t\t\t\t\t\t \t<span class=\"icon fa-close\" id=\"fa-close-'+ search_keyword_num +'\"></span>\\\n\t\t\t\t\t\t\t\t\t\t \t</div>');\n\t\t\t} else {\n\n\t\t\t\tsearch_split_word = search_word.split(' ')\n\t\t\t\tfor (var i=0; i<search_split_word.length; i++)\n\t\t\t\t\tif (search_split_word[i] != '')\n\t\t\t\t\t\t$('.search-container').append('<div class=\"keyword-box locked\" id=\"keyword-box-'+ search_keyword_num +'\">\\\n\t\t\t\t\t\t\t\t\t\t \t<strong style=\"color: rgb(150, 150, 150)\">'+ search_split_word[i] +'</strong>\\\n\t\t\t\t\t\t\t\t\t\t \t</div>');\n\t\t\t}\n\t\t\t\n\t\t\t$('.keyword-box').mouseenter(function(){\n\t\t\t\t\t$(this).css('background-color', 'rgba(255, 255, 255, 0.2)');\n\t\t\t\t\t$(this).find('strong').css('color','rgb(255, 255, 255, 0.8)')\n\t\t\t})\n\n\t\t\t$('.keyword-box').mouseleave(function(){\n\t\t\t\t\t$(this).css('background-color', 'rgba(255, 255, 255, 0)');\n\t\t\t\t\t$(this).find('strong').css('color','rgb(150, 150, 150)')\n\t\t\t})\n\t\t\t\n\t\t\t$('#fa-close-' + search_keyword_num).mouseenter(function(){\n\t\t\t\t \tvar close_id = parseInt($(this).attr('id').split('-')[2]);\n\t\t\t\t\t$(this).append(\"<style>#fa-close-\"+close_id+\"::before{color: rgba(255, 255, 255, 0.8)}</style>\")\n\t\t\t})\n\n\t\t\t$('#fa-close-' + search_keyword_num).mouseleave(function(){\n\t\t\t\t \tvar close_id = parseInt($(this).attr('id').split('-')[2]);\n\t\t\t\t\t$(this).append(\"<style>#fa-close-\"+close_id+\"::before{color: rgb(150, 150, 150)}</style>\")\n\t\t\t})\n\n\t\t\t$('#fa-close-' + search_keyword_num).click(function(){\n\t\t\t\t \tvar close_id = parseInt($(this).attr('id').split('-')[2]);\n\t\t\t\t \tif (close_id == 0) {\n\t\t\t\t \t\ttime_condition = '';\n\t\t\t\t \t}\n\t\t\t\t\t$('#keyword-box-' + close_id).remove();\n\t\t\t\t\tfor (var i=0; i<search_word_array.length; i++) {\n\t\t\t\t\t\tif (search_word_array[i][1] == search_word)\n\t\t\t\t\t\t\tsearch_word_array.splice(i, 1)\n\t\t\t\t\t}\n\t\t\t})\n\n\n\t}\n\n});"
},
{
"alpha_fraction": 0.4941006600856781,
"alphanum_fraction": 0.5090296268463135,
"avg_line_length": 28.27007293701172,
"blob_id": "01ef4ae823c5c5a2850a46fdffb6376a3f5387a9",
"content_id": "486386100e7771331263a1d34f5466fc3f8398c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8352,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 274,
"path": "/search/views.py",
"repo_name": "LucienXian/NBA_search_engine",
"src_encoding": "UTF-8",
"text": "from django.http import HttpResponse\r\nfrom django.http import JsonResponse\r\nfrom django.shortcuts import render\r\nfrom elasticsearch import Elasticsearch\r\nfrom search.elasticsearch import ElasticSearchClass\r\nimport datetime\r\n\r\n# Create your views here.\r\n\r\ntry:\r\n from django.utils import simplejson as json\r\nexcept ImportError:\r\n import json\r\n\r\nclass DateEncoder(json.JSONEncoder): \r\n def default(self, obj): \r\n if isinstance(obj, datetime.datetime): \r\n return obj.strftime('%Y-%m-%d %H:%M:%S') \r\n elif isinstance(obj, date): \r\n return obj.strftime(\"%Y-%m-%d\") \r\n else: \r\n return json.JSONEncoder.default(self, obj) \r\n\r\ndef toShortName(name):\r\n short_name = name[-5:-1]\r\n if short_name == '凯尔特人' :\r\n return short_name\r\n short_name = name[-4:-1]\r\n if short_name in ['76人','步行者','独行侠','森林狼'] :\r\n return short_name\r\n short_name = name[-3:-1]\r\n return short_name\r\n\r\ndef index(request):\r\n\r\n if request.method == 'GET':\r\n passed = {}\r\n return render(request, 'index.html', passed)\r\n \r\n print(\"xxxxxxxxxxxxx\")\r\n\r\n es = ElasticSearchClass()\r\n \r\n\r\n category = request.POST.get('label', None)\r\n if category == '0':\r\n # this is all\r\n es.make_index_table()\r\n\r\n keyword_list = request.POST.get('keyword', None)\r\n source = request.POST.getlist('source[]', None)\r\n team = request.POST.getlist('team[]', None)\r\n player = request.POST.getlist('star[]', None)\r\n search_str = keyword_list\r\n search_type = \"news\"\r\n search_order = \"by score\"\r\n search_source = source\r\n \r\n team_str = \"\"\r\n if team != []:\r\n for i in team:\r\n team_str = team_str + toShortName(i) + \" \"\r\n if team_str != \"\":\r\n search_str = team_str[0:-1]\r\n\r\n list2 = es.search_keywords(search_type, search_str)\r\n list2 = es.filter(list2, 'source', search_source)\r\n recent = request.POST.get('recent', None)\r\n \r\n if recent == '1':\r\n list2 = es.filter(list2, 'recent_1day', [])\r\n elif recent == '2':\r\n list2 = es.filter(list2, 'recent_7day', [])\r\n elif recent == '3':\r\n list2 = es.filter(list2, 'recent_30day', [])\r\n \r\n temp = {}\r\n temp['recent_list'] = es.count_recent(list2)\r\n temp['source_list'] = es.count_source(list2)\r\n temp['data_list'] = list2\r\n #print(temp['recent_list'])\r\n #print(temp['source_list'])\r\n\r\n #print(list2)\r\n #list2 = es.sort(list2, search_order)\r\n keyword_list = request.POST.get('keyword', None)\r\n \r\n search_str = keyword_list\r\n search_type = \"team\"\r\n search_order = \"by defen\"\r\n \r\n list2 = es.search_team(search_str)\r\n list2 = es.filter(list2, 'by team', team)\r\n \r\n temp['team_list'] = list2\r\n list0 = []\r\n \r\n i = 0\r\n\r\n for i in list2:\r\n name = i['_source']['name']\r\n short_name = toShortName(name)\r\n listx = es.search_data_season(short_name, \"2017\")\r\n listx = es.sort(listx, search_order)\r\n list0.append(listx)\r\n \r\n temp['team_data_list'] = list0\r\n \r\n keyword_list = request.POST.get('keyword', None)\r\n search_str = keyword_list\r\n search_type = \"player\"\r\n search_order = \"by season\"\r\n \r\n list2 = es.search_player(search_str)\r\n list2 = es.filter(list2, 'by player', player)\r\n \r\n temp['player_list'] = list2\r\n list0 = []\r\n\r\n for i in list2:\r\n name = i['_source']['中文名']\r\n listx = es.search_data(name)\r\n \r\n listx = es.sort(listx, search_order)\r\n list0.append(listx)\r\n\r\n break\r\n \r\n temp['player_data_list'] = list0\r\n\r\n\r\n if category == '1':\r\n # this is a news\r\n es.make_index_table()\r\n\r\n keyword_list = request.POST.get('keyword', None)\r\n source = request.POST.getlist('source[]', None)\r\n search_str = keyword_list\r\n search_type = \"news\"\r\n search_order = \"by score\"\r\n search_source = source\r\n \r\n list2 = es.search_keywords(search_type, search_str)\r\n list2 = es.filter(list2, 'source', search_source)\r\n recent = request.POST.get('recent', None)\r\n\r\n if recent == '1':\r\n list2 = es.filter(list2, 'recent_1day', [])\r\n elif recent == '2':\r\n list2 = es.filter(list2, 'recent_7day', [])\r\n elif recent == '3':\r\n list2 = es.filter(list2, 'recent_30day', [])\r\n \r\n temp = {}\r\n temp['recent_list'] = es.count_recent(list2)\r\n temp['source_list'] = es.count_source(list2)\r\n temp['data_list'] = list2\r\n \r\n #print(temp['recent_list'])\r\n #print(temp['source_list'])\r\n\r\n #print(list2)\r\n #list2 = es.sort(list2, search_order)\r\n \r\n elif category == '2':\r\n # this is a team\r\n temp = {}\r\n keyword_list = request.POST.get('keyword', None)\r\n team = request.POST.getlist('team[]', None)\r\n search_str = keyword_list\r\n search_type = \"team\"\r\n search_order = \"by defen\"\r\n\r\n list2 = es.search_team(search_str)\r\n list2 = es.filter(list2, 'by team', team)\r\n temp = {}\r\n \r\n temp['team_list'] = list2\r\n list0 = []\r\n \r\n i = 0\r\n\r\n for i in list2:\r\n name = i['_source']['name']\r\n short_name = toShortName(name)\r\n listx = es.search_data_season(short_name, \"2017\")\r\n listx = es.sort(listx, search_order)\r\n list0.append(listx)\r\n \r\n temp['team_data_list'] = list0\r\n\r\n #temp['data_list'] = \r\n\r\n elif category == '3':\r\n # this is a player\r\n temp = {}\r\n keyword_list = request.POST.get('keyword', None)\r\n player = request.POST.getlist('star[]', None)\r\n search_str = keyword_list\r\n search_type = \"player\"\r\n search_order = \"by season\"\r\n \r\n list2 = es.search_player(search_str)\r\n list2 = es.filter(list2, 'by player', player)\r\n \r\n temp = {}\r\n \r\n temp['player_list'] = list2\r\n list0 = []\r\n\r\n for i in list2:\r\n name = i['_source']['中文名']\r\n listx = es.search_data(name)\r\n \r\n listx = es.sort(listx, search_order)\r\n list0.append(listx)\r\n \r\n break\r\n \r\n temp['player_data_list'] = list0\r\n\r\n elif category == '4':\r\n # this is a news of label\r\n keyword_list = request.POST.get('keyword', None)\r\n source = request.POST.getlist('source[]', None)\r\n search_str = keyword_list\r\n search_type = \"news\"\r\n search_order = \"by score\"\r\n search_source = source\r\n list2 = es.get_by_index(keyword_list)\r\n \r\n list2 = es.filter(list2, 'source', search_source)\r\n recent = request.POST.get('recent', None)\r\n \r\n if recent == '1':\r\n list2 = es.filter(list2, 'recent_1day', [])\r\n elif recent == '2':\r\n list2 = es.filter(list2, 'recent_7day', [])\r\n elif recent == '3':\r\n list2 = es.filter(list2, 'recent_30day', [])\r\n \r\n temp = {}\r\n temp['recent_list'] = es.count_recent(list2)\r\n temp['source_list'] = es.count_source(list2)\r\n temp['data_list'] = list2\r\n \r\n #print(temp['recent_list'])\r\n #print(temp['source_list'])\r\n\r\n #print(list2)\r\n #list2 = es.sort(list2, search_order)\r\n '''\r\n \r\n #print(es.search_all(\"news\"))\r\n\r\n search_str = [\"詹姆斯\"]\r\n search_type = \"data\"\r\n search_order = \"by season\"\r\n search_source = [\"all\"]\r\n\r\n list = es.search_keywords(search_type, search_str)\r\n list = es.filter(list, 'source', search_source)\r\n list = es.sort(list, search_order)\r\n print(list)\r\n '''\r\n\r\n\r\n passed = {}\r\n passed['result'] = temp\r\n #print(passed['list'])\r\n print(\"xxxxxxxxxxxxx\")\r\n \r\n #print(list2)\r\n return HttpResponse(json.dumps(passed, cls=DateEncoder), content_type='application/json')\r\n\r\n \r\n "
},
{
"alpha_fraction": 0.39244741201400757,
"alphanum_fraction": 0.43499043583869934,
"avg_line_length": 39.631839752197266,
"blob_id": "d71d059372458266d5afd9d51885579cd204e8a2",
"content_id": "11d137e8ee6e803b035ba3a289e6dd1f34aba22e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9875,
"license_type": "no_license",
"max_line_length": 497,
"num_lines": 201,
"path": "/data/views.py",
"repo_name": "LucienXian/NBA_search_engine",
"src_encoding": "UTF-8",
"text": "from django.http import HttpResponse\r\nfrom elasticsearch import Elasticsearch\r\nimport es_client\r\n\r\nfrom elasticsearch import helpers\r\nfrom search.elasticsearch import ElasticSearchClass\r\nimport xlrd\r\n\r\n# Create your views here.\r\n\r\nTEAM_DIC = {'ATL':'老鹰','BKN':'篮网','BOS':'凯尔特人','CHI':'公牛','CHA':'黄蜂','CLE':'骑士','MIA':'热火',\r\n \r\n 'DET':'活塞','NYK':'尼克斯','ORL':'魔术','IND':'步行者','PHI':'76人','WAS':'奇才','MIL':'雄鹿',\r\n \r\n 'TOR':'猛龙','GSW':'勇士','DEN':'掘金','DAL':'独行侠','LAC':'快船','MIN':'森林狼','HOU':'火箭',\r\n \r\n 'LAL':'湖人','OKC':'雷霆','MEM':'灰熊','PHO':'太阳','POR':'开拓者','NOH':'鹈鹕','SAC':'国王',\r\n \r\n 'UTA':'爵士','SAS':'马刺'}\r\n\r\nNEWS = {'新浪体育' : ['sina1', 'sina5', 'sina6', 'sina12', 'sina14', 'sina15', 'sina25'],\r\n '搜狐体育' : ['sohu'],\r\n '网易体育' : ['netease'],\r\n '虎扑体育' : ['hupu']\r\n}\r\n\r\nDATA_SIZE = 22000\r\nTEAM_SIZE = 30\r\n\r\ntry:\r\n from django.utils import simplejson as json\r\nexcept ImportError:\r\n import json\r\n\r\ndef replaceAll(old, new, str):\r\n while str.find(old) > -1:\r\n str = str.replace(old, new)\r\n return str\r\n\r\n '''\r\n es.index(\"news\", {\"name\": \"14日夏季联赛综述:伯顿压哨抛投绝杀魔术\", \"label\" : \"魔术 魔术王\", \"my_time\" : \"2018-07-14\", \"source\": \"baidu\", \"link\" : \"http://nbachina.qq.com/a/20180714/013195.htm\", \"content\" : \"尼克斯102-83轻取鹈鹕,丹尼尔-奥切夫22分8篮板,泰瑞斯-沃克16分5篮板6助攻5抢断,丹伊恩-多特森15分6篮板5助攻,米切尔-罗宾逊14分12篮板5封盖;鹈鹕方面,加尔伦-格林23分,DJ-霍格16分,查森-兰德尔11分3篮板。\"})\r\n es.index(\"news\", {\"name\": \"曝若安东尼恢复自由身 火箭将最有希望得到他\", \"label\" : \"魔术王\", \"time\" : \"2018-07-13\", \"source\": \"yahoo\", \"link\" : \"http://nbachina.qq.com/a/20180714/003886.htm\", \"content\" : \"北京时间7月14日,据雅虎记者沙姆斯-查拉尼亚报道,消息人士透露,一旦尼克斯前锋卡梅隆-安东尼成为自由球员,那么休斯顿火箭将会是青睐甜瓜的所有球队中的领跑者。\"})\r\n es.index(\"player\", {\"name\": \"詹姆斯\", \"content\" : \"Lebron James\", \"中文名字\" : \"勒布朗·詹姆斯\", \"英文名字\" : \"Lebron James\", \"身高\" : \"2.03m\", \"体重\" : \"113kg\", \"出生日期\" : \"1984-12-30\", \"出生地点\" : \"俄亥俄州阿克伦城\", \"位置\" : \"前锋/后卫\"})\r\n es.index(\"team\", {\"name\": \"湖人\", \"content\" : \"Lakers\", \"中文名字\" : \"湖人队\", \"英文名字\" : \"Lakers\", \"所属地区\" : \"西部\", \"成立时间\" : \"1948\", \"主球馆\" : \"斯台普斯中心球馆\", \"拥有者\" : \"珍妮-巴斯(Jeanie Buss)\", \"赛区\" : \"太平洋区\", \"主教练\" : \"卢克-沃顿(Luke Walton)\"})\r\n es.index(\"data\", {\"name\": \"\", \"team\" : \"湖人\", \"content\" : \"湖人 詹姆斯 2018-2019\", \"season\" : \"2018-2019\", \"player\" : \"詹姆斯\", \"出场次数\" : \"82\", \"首发次数\" : \"82\", \"场均上场时间\" : \"46.5\", \"投篮命中率\" : \"60.0\", \"场均投篮出手次数\" : \"25.0\", \"场均投篮命中次数\" : \"15.0\", \"三分球命中率\" : \"36.5\", \"场均三分出手次数\" : \"9.0\", \"场均三分命中次数\" : \"3.3\", \"罚球命中率\" : \"90.0\", \"场均罚球出手次数\" : \"10.0\", \"场均罚球命中次数\" : \"9.0\", \"场均篮板\" : \"15.0\", \"前场篮板\" : \"7.0\", \"后场篮板\" : \"8.0\", \"场均助攻\" : \"10.2\", \"场均抢断\" : \"2.0\", \"场均盖帽\" : \"1.2\", \"场均失误\" : \"6.2\", \"场均犯规\" : \"4.3\", \"场均得分\" : \"36.4\"})\r\n es.index(\"data\", {\"name\": \"\", \"team\" : \"骑士\", \"content\" : \"骑士 詹姆斯 2017-2018\", \"season\" : \"2017-2018\", \"player\" : \"詹姆斯\", \"出场次数\" : \"82\", \"首发次数\" : \"82\", \"场均上场时间\" : \"46.5\", \"投篮命中率\" : \"60.0\", \"场均投篮出手次数\" : \"25.0\", \"场均投篮命中次数\" : \"15.0\", \"三分球命中率\" : \"36.5\", \"场均三分出手次数\" : \"9.0\", \"场均三分命中次数\" : \"3.3\", \"罚球命中率\" : \"90.0\", \"场均罚球出手次数\" : \"10.0\", \"场均罚球命中次数\" : \"9.0\", \"场均篮板\" : \"15.0\", \"前场篮板\" : \"7.0\", \"后场篮板\" : \"8.0\", \"场均助攻\" : \"10.2\", \"场均抢断\" : \"2.0\", \"场均盖帽\" : \"1.2\", \"场均失误\" : \"6.2\", \"场均犯规\" : \"4.3\", \"场均得分\" : \"36.4\"})\r\n '''\r\n\r\ndef read_news(f, source):\r\n es = ElasticSearchClass()\r\n\r\n data = json.load(f)\r\n \r\n try:\r\n for i in data:\r\n content = i['content']\r\n content = content.replace('\\n','<br>')\r\n content = content.replace('\\u3000', \"\")\r\n content = replaceAll('<br><br>', '<br>', content)\r\n \r\n try:\r\n if content.index('<br>') == 0:\r\n content = content[4:-1]\r\n except Exception:\r\n print(\"None extra <br>\")\r\n es.index(\"news\", {\"name\" : i['title'], \"label\" : i['label'], \r\n \"my_time\": i['time'], \"source\" : source, \"link\" : i['url'], \"content\" : content})\r\n \r\n except Exception:\r\n print('File add error')\r\n\r\ndef read_team(table):\r\n es = ElasticSearchClass()\r\n \r\n title = table.row_values(0)\r\n \r\n line = 1\r\n try:\r\n while len(table.row_values(line)) > 0 :\r\n body = {}\r\n p = table.row_values(line)\r\n es.index(\"team\", {\"name\": p[1], \"content\" : p[2], \"中文名字\" : p[1], \"英文名字\" : p[2], \"所属地区\" : p[3], \"成立时间\" : p[4], \"主球馆\" : p[5], \"拥有者\" : p[6], \"赛区\" : p[7], \"主教练\" : p[8], \"image_link\" : p[9]})\r\n line = line + 1\r\n except Exception:\r\n print(\"read team over\")\r\n\r\ndef read_player(table):\r\n es = ElasticSearchClass()\r\n \r\n title = table.row_values(0)\r\n line = 1\r\n try:\r\n while len(table.row_values(line)) > 0 :\r\n body = {}\r\n p = table.row_values(line)\r\n es.index(\"player\", {\"name\": p[1], \"content\" : p[7], \"位置\" : p[2], \"身高\" : p[3], \"体重\" : p[4], \"出生年月\" : p[5], \"出生城市\" : p[6], \"中文名\" : p[7], \"image_link\" : p[8]})\r\n line = line + 1\r\n except Exception:\r\n print(\"read player over\")\r\n\r\ndef read_data(table):\r\n es = ElasticSearchClass()\r\n \r\n title = table.row_values(0)\r\n print(title)\r\n line = 1\r\n try:\r\n while len(table.row_values(line)) > 0 :\r\n body = {}\r\n p = table.row_values(line)\r\n es.index(\"data\", {\"name\": p[1], \"content\" : p[7], \"位置\" : p[2], \"身高\" : p[3], \"体重\" : p[4], \"出生年月\" : p[5], \"出生城市\" : p[6], \"中文名\" : p[7], \"image_link\" : p[8]})\r\n line = line + 1\r\n except Exception:\r\n print(\"read data over\")\r\n\r\ndef index(request):\r\n es = ElasticSearchClass()\r\n '''\r\n print(len(es.search_all(\"news\")))\r\n es.delete_all()\r\n\r\n for key in NEWS:\r\n for file_name in NEWS[key]:\r\n try:\r\n file_name = file_name + \".json\"\r\n print(file_name)\r\n f = open( file_name, encoding='utf-8') \r\n read_news(f, key)\r\n except Exception:\r\n print('File not found')\r\n\r\n try:\r\n data = xlrd.open_workbook('team_info.xlsx')\r\n table = data.sheet_by_name('team_info')\r\n except Exception:\r\n print('File not found')\r\n \r\n read_team(table)\r\n\r\n try:\r\n data = xlrd.open_workbook('nba_player_info.xlsx')\r\n table = data.sheet_by_name('player_info')\r\n except Exception:\r\n print('File not found')\r\n\r\n read_player(table)\r\n\r\n #es.delete_all()\r\n\r\n for key,value in TEAM_DIC.items():\r\n data = xlrd.open_workbook('nba_team_reg_data(%s).xlsx' % key)\r\n for i in range(1946,2018):\r\n try:\r\n table = data.sheet_by_name('regularseason_data ' + str(i))\r\n except Exception:\r\n continue\r\n title = table.row_values(0)\r\n line = 1\r\n try:\r\n while len(table.row_values(line)) > 0 :\r\n #while line <= 1:\r\n body = {}\r\n p = table.row_values(line)\r\n if(p[1] != '总计'):\r\n es.index(\"data\", {\r\n \"name\": p[1], \r\n \"content\" : p[1], \r\n \"team\" : value,\r\n \"season\" : str(i),\r\n \"球员\" : p[1], \r\n \"出场\" : p[2], \r\n \"首发\" : p[3], \r\n \"时间\" : p[4], \r\n \"投篮\" : p[5], \r\n \"投篮命中\" : p[6], \r\n \"投篮出手\" : p[7],\r\n \"三分\" : p[8],\r\n \"三分命中\" : p[9],\r\n \"三分出手\" : p[10],\r\n \"罚球\" : p[11],\r\n \"罚球命中\" : p[12],\r\n \"罚球出手\" : p[13],\r\n \"篮板\" : p[14],\r\n \"前场篮板\" : p[15],\r\n \"后场篮板\" : p[16],\r\n \"助攻\" : p[17],\r\n \"抢断\" : p[18],\r\n \"盖帽\" : p[19],\r\n \"失误\" : p[20],\r\n \"犯规\" : p[21],\r\n \"得分\" : p[22]\r\n }\r\n )\r\n line = line + 1\r\n print(key, i, line - 1, p[1])\r\n except Exception:\r\n print(\"read data over\")\r\n '''\r\n #print(es.search_data(\"詹姆斯\"))\r\n #print(table.row_values(0))\r\n return HttpResponse(es.count_all())\r\n"
},
{
"alpha_fraction": 0.3847312927246094,
"alphanum_fraction": 0.3928232491016388,
"avg_line_length": 34.183837890625,
"blob_id": "0af8cd16898df02ab82c940c0e1ec3c1ed2dd564",
"content_id": "ff95be37cb0fcf3be30709cc1997db173c3cfa6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18003,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 495,
"path": "/search/elasticsearch.py",
"repo_name": "LucienXian/NBA_search_engine",
"src_encoding": "UTF-8",
"text": "from elasticsearch import Elasticsearch\r\nfrom elasticsearch import helpers\r\nimport datetime\r\nimport time\r\n\r\nabstract_length = 40\r\n\r\ndef toShortName(name):\r\n short_name = name[-5:-1]\r\n if short_name == '凯尔特人' :\r\n return short_name\r\n short_name = name[-4:-1]\r\n if short_name in ['76人','步行者','独行侠','森林狼'] :\r\n return short_name\r\n short_name = name[-3:-1]\r\n return short_name \r\n\r\nclass ElasticSearchClass(object):\r\n type_list = [\"all\", \"news\", \"team\", \"player\", \"data\"]\r\n max_window_size = 1000\r\n idx = 0\r\n index_table = {}\r\n\r\n def __init__(self):\r\n self.es = Elasticsearch(['localhost:9200'])\r\n \r\n def count_all(self):\r\n return self.es.count(index = None, doc_type = None)['count']\r\n \r\n def index(self, type_name, body):\r\n self.es.index(index = 'my_index', doc_type = type_name, id = None ,body = body)\r\n self.idx = self.idx + 1\r\n print(self.idx)\r\n\r\n def delete_all(self):\r\n\r\n allDoc = self.search_all(\"all\")\r\n\r\n for i in allDoc:\r\n j = i['_type']\r\n try:\r\n print(\"delete \"+ j + \" \" + i['_id'])\r\n self.es.delete(index = 'my_index', doc_type = j, id = i['_id'])\r\n except Exception:\r\n print(\"delete \"+ j + \" \" + i['_id'] + \" error\")\r\n continue\r\n\r\n def get_by_id(self, id):\r\n try:\r\n query = {'query': {\"term\": { \"_id\" : id}}}\r\n allDoc = self.es.search(None, None, query)\r\n return allDoc['hits']['hits'][0]\r\n except Exception as err:\r\n print(err)\r\n return {}\r\n \r\n def get_by_index(self, index):\r\n list = []\r\n try:\r\n for i in self.index_table[index]:\r\n list.append(self.get_by_id(i))\r\n except Exception as err:\r\n print(err)\r\n return list\r\n \r\n def make_index_table(self):\r\n list = self.search_all('news')\r\n if self.index_table != {}:\r\n self.index_table = {}\r\n for i in list:\r\n for j in i['_source']['label'].strip(',').split(' '):\r\n try:\r\n self.index_table[j].append(i['_id'])\r\n except Exception:\r\n self.index_table[j] = [i['_id']]\r\n\r\n def make_abstract(self, list_step1, search_str):\r\n list_step2 = []\r\n abstract = \"\"\r\n list_search_str = []\r\n \r\n if('Untitled' not in search_str):\r\n list_i = search_str.strip(',').split(' ')\r\n list_search_str.extend(list_i)\r\n\r\n for j in list_step1:\r\n content = j['_source']['content']\r\n title = j['_source']['name']\r\n j['_source']['keyword'] = search_str\r\n j['_score'] = 0\r\n index = -1\r\n for temp in list_search_str:\r\n try:\r\n if title.count(temp) != 0:\r\n index = title.index(temp)\r\n j['_source']['place'] = 'title'\r\n j['_source']['keyword'] = temp\r\n j['_score'] = title.count(temp) + content.count(temp) * 0.05\r\n '''\r\n abstract = title[index : index + abstract_length]\r\n if(len(abstract) < abstract_length):\r\n abstract = '...' + title[index + len(abstract) - abstract_length: index + abstract_length]\r\n else:\r\n abstract = abstract + '...'\r\n ''' \r\n break\r\n if content.count(temp) != 0:\r\n index = content.index(temp)\r\n j['_source']['place'] = 'content'\r\n j['_source']['keyword'] = temp\r\n j['_score'] = title.count(temp) + content.count(temp) * 0.05\r\n '''\r\n abstract = content[index : index + abstract_length]\r\n if(len(abstract) < abstract_length):\r\n abstract = '...' + content[index + len(abstract) - abstract_length: index + abstract_length]\r\n else:\r\n abstract = abstract + '...'\r\n '''\r\n break\r\n except Exception:\r\n continue\r\n else:\r\n j['_source']['place'] = 'nowhere'\r\n j['_source']['index'] = index\r\n #j['_source']['abstract'] = abstract\r\n list_step2.append(j)\r\n \r\n return list_step2\r\n\r\n def search_all(self, type_name):\r\n\r\n if type_name == 'all':\r\n result = helpers.scan(\r\n self.es,\r\n query = {\r\n 'query': {\r\n 'match_all': {}\r\n }\r\n },\r\n index = 'my_index',\r\n doc_type = None\r\n )\r\n \r\n final_result = []\r\n for item in result:\r\n final_result.append(item)\r\n \r\n allDoc = final_result\r\n else:\r\n result = helpers.scan(\r\n self.es,\r\n query = {\r\n 'query': {\r\n 'match_all': {}\r\n }\r\n },\r\n index = 'my_index',\r\n doc_type = type_name\r\n )\r\n \r\n final_result = []\r\n for item in result:\r\n final_result.append(item)\r\n \r\n allDoc = final_result\r\n \r\n list_step1 = allDoc\r\n\r\n '''\r\n for j in allDoc:\r\n for k in list_step1:\r\n if j['_id'] == k['_id']:\r\n break\r\n else:\r\n list_step1.append(j)\r\n '''\r\n #list_step1 = self.make_abstract(list_step1, ['Untitled'])\r\n \r\n return list_step1\r\n\r\n def search(self, type_name, keywords):\r\n if type_name == 'all':\r\n result = helpers.scan(\r\n self.es,\r\n query = {\r\n 'query':{\r\n \"multi_match\":{\r\n \"query\" : keywords, \r\n \"fields\": [ \"name\", \"content\" ] \r\n }\r\n }\r\n },\r\n index = 'my_index',\r\n doc_type = None\r\n )\r\n \r\n final_result = []\r\n for item in result:\r\n final_result.append(item)\r\n \r\n allDoc = final_result\r\n else:\r\n result = helpers.scan(\r\n self.es,\r\n query = {\r\n 'query': {\r\n \"multi_match\": {\r\n \"query\" : keywords, \r\n \"fields\": [ \"name\", \"content\" ] \r\n }\r\n }\r\n },\r\n index = 'my_index',\r\n doc_type = type_name\r\n )\r\n \r\n final_result = []\r\n for item in result:\r\n final_result.append(item)\r\n \r\n allDoc = final_result\r\n return allDoc\r\n\r\n def search_keywords(self, search_type, search_str):\r\n list_step1 = []\r\n '''\r\n for i in search_str:\r\n temp = self.search(search_type, i)\r\n for j in temp:\r\n for k in list_step1:\r\n if j['_id'] == k['_id']:\r\n break\r\n else:\r\n list_step1.append(j)\r\n '''\r\n list_step1 = self.search(search_type, search_str)\r\n list_step1 = self.make_abstract(list_step1, search_str)\r\n return list_step1\r\n\r\n def search_team(self, search_str):\r\n temp = []\r\n search_list = search_str.strip(',').split(' ')\r\n \r\n if len(search_list) < 1:\r\n return []\r\n \r\n list_all_team = self.search_all(\"team\")\r\n\r\n for i in list_all_team:\r\n for j in search_list:\r\n #print(i['_source']['name'])\r\n if j in i['_source']['name'] or j.upper() in i['_source']['content'].upper():\r\n temp.append(i)\r\n break\r\n \r\n return temp\r\n\r\n def search_player(self, search_str):\r\n temp = []\r\n search_list = search_str.strip(',').split(' ')\r\n if len(search_list) < 1:\r\n return []\r\n \r\n list_all_team = self.search_all(\"player\")\r\n\r\n for i in list_all_team:\r\n for j in search_list:\r\n #print(i['_source']['name'])\r\n if j in i['_source']['name'] or j.upper() in i['_source']['content'].upper():\r\n temp.append(i)\r\n break\r\n\r\n return temp\r\n \r\n def search_data(self, search_str):\r\n temp = []\r\n search_list = search_str.strip(',').split(' ')\r\n if len(search_list) < 1:\r\n return []\r\n \r\n list_all_data = self.search_all(\"data\")\r\n\r\n for i in list_all_data:\r\n for j in search_list:\r\n #print(i['_source']['name'])\r\n if j in i['_source']['name'] or j in i['_source']['team']:\r\n temp.append(i)\r\n break\r\n\r\n return temp\r\n \r\n def search_data_season(self, search_str, season):\r\n temp = []\r\n search_list = search_str.strip(',').split(' ')\r\n if len(search_list) < 1:\r\n return []\r\n \r\n list_all_data = self.search_all(\"data\")\r\n\r\n for i in list_all_data:\r\n for j in search_list:\r\n #print(i['_source']['name'])\r\n if j in i['_source']['team'] and season in i['_source']['season']:\r\n temp.append(i)\r\n break\r\n\r\n return temp\r\n\r\n def sort(self, list, sign) :\r\n lenl = len(list)\r\n if sign == \"by time\": \r\n for i in range(0, lenl):\r\n for j in range(i, lenl):\r\n #print(list[i]['_source']['my_time'], list [j]['_source']['my_time'])\r\n if(list[i]['_source']['my_time'] < list [j]['_source']['my_time']):\r\n list[i], list[j] = list[j], list[i]\r\n if sign == \"by season\": \r\n for i in range(0, lenl):\r\n for j in range(i, lenl):\r\n #print(list[i]['_source']['season'], list [j]['_source']['season'])\r\n if(list[i]['_source']['season'] < list [j]['_source']['season']):\r\n list[i], list[j] = list[j], list[i]\r\n if sign == \"by defen\": \r\n for i in range(0, lenl):\r\n for j in range(i, lenl):\r\n #print(list[i]['_source']['season'], list [j]['_source']['season'])\r\n if(list [j]['_source']['name'] == '全队数据'):\r\n list[i], list[j] = list[j], list[i]\r\n elif(list [j]['_source']['name'] == '对手数据'):\r\n list[i], list[j] = list[j], list[i]\r\n elif(float(list[i]['_source']['得分']) < float(list [j]['_source']['得分'])):\r\n list[i], list[j] = list[j], list[i]\r\n return list\r\n\r\n def filter(self, list, condition, keywordlist):\r\n if(\"all\" in keywordlist):\r\n return list\r\n if(condition == 'source'):\r\n if keywordlist == []:\r\n return list\r\n j = 0\r\n while j < len(list):\r\n try:\r\n if condition == 'source':\r\n temp = list[j]['_source']['source']\r\n list_temp = temp.strip(',').split(' ')\r\n for i in keywordlist:\r\n if i in list_temp:\r\n j = j + 1\r\n break\r\n else:\r\n del list[j]\r\n except Exception as err:\r\n continue\r\n elif(condition == 'label'):\r\n j = 0\r\n while j < len(list):\r\n if condition == 'label':\r\n temp = list[j]['_source']['label']\r\n list_temp = temp.strip(',').split(' ')\r\n for i in keywordlist:\r\n if i in list_temp:\r\n j = j + 1\r\n break\r\n else:\r\n del list[j]\r\n elif(condition == 'by team'):\r\n if(keywordlist == []):\r\n return list\r\n j = 0\r\n while j < len(list):\r\n if condition == 'by team':\r\n temp1 = list[j]['_source']['name']\r\n temp2 = list[j]['_source']['content']\r\n list_temp1 = temp1.strip(',').split(' ')\r\n list_temp2 = temp2.strip(',').split(' ')\r\n for i in keywordlist:\r\n if i in list_temp1 or i in list_temp2:\r\n j = j + 1\r\n break\r\n else:\r\n del list[j]\r\n elif(condition == 'by player'):\r\n if(keywordlist == []):\r\n return list\r\n j = 0\r\n \r\n while j < len(list):\r\n if condition == 'by player':\r\n temp1 = list[j]['_source']['name']\r\n temp2 = list[j]['_source']['content']\r\n list_temp1 = temp1.strip(',').split(' ')\r\n list_temp2 = temp2.strip(',').split(' ')\r\n for i in keywordlist:\r\n if i in list_temp1 or i in list_temp2:\r\n j = j + 1\r\n break\r\n else:\r\n del list[j]\r\n elif(condition == 'recent_1day'):\r\n now = datetime.datetime.now()\r\n recent_1day = (now - datetime.timedelta(days=2)).strftime(\"%Y-%m-%d %H:%M:%S\")\r\n j = 0\r\n while j < len(list):\r\n try:\r\n temp = list[j]['_source']['my_time']\r\n if(temp < recent_1day):\r\n del list[j]\r\n else:\r\n j = j + 1\r\n except Exception as err:\r\n continue\r\n elif(condition == 'recent_7day'):\r\n now = datetime.datetime.now()\r\n recent_1day = (now - datetime.timedelta(days=8)).strftime(\"%Y-%m-%d %H:%M:%S\")\r\n j = 0\r\n while j < len(list):\r\n try:\r\n temp = list[j]['_source']['my_time']\r\n if(temp < recent_1day):\r\n del list[j]\r\n else:\r\n j = j + 1\r\n except Exception as err:\r\n continue\r\n elif(condition == 'recent_30day'):\r\n now = datetime.datetime.now()\r\n recent_1day = (now - datetime.timedelta(days=31)).strftime(\"%Y-%m-%d %H:%M:%S\")\r\n j = 0\r\n while j < len(list):\r\n try:\r\n temp = list[j]['_source']['my_time']\r\n if(temp < recent_1day):\r\n del list[j]\r\n else:\r\n j = j + 1\r\n except Exception as err:\r\n continue\r\n return list\r\n\r\n def count_recent(self, list):\r\n try:\r\n list1 = []\r\n list2 = []\r\n list3 = []\r\n \r\n list1.extend(list)\r\n list2.extend(list)\r\n list3.extend(list)\r\n\r\n list_recent_1day = self.filter(list1, 'recent_1day', [])\r\n list_recent_7day = self.filter(list2, 'recent_7day', [])\r\n list_recent_30day = self.filter(list3, 'recent_30day', [])\r\n list_recent_all = self.filter(list, 'recent_all', [])\r\n \r\n count_recent_1day = len(list_recent_1day)\r\n count_recent_7day = len(list_recent_7day)\r\n count_recent_30day = len(list_recent_30day)\r\n count_recent_all = len(list)\r\n\r\n return {'count_recent_all' : count_recent_all, \r\n 'count_recent_1day' : count_recent_1day, \r\n 'count_recent_7day' : count_recent_7day, \r\n 'count_recent_30day' : count_recent_30day}\r\n except Exception:\r\n return {}\r\n \r\n def count_source(self, list):\r\n try:\r\n list1 = []\r\n list2 = []\r\n list3 = []\r\n list4 = []\r\n \r\n list1.extend(list)\r\n list2.extend(list)\r\n list3.extend(list)\r\n list4.extend(list)\r\n\r\n list_source_souhu = self.filter(list1, 'source', ['搜狐体育'])\r\n list_source_sina = self.filter(list2, 'source', ['新浪体育'])\r\n list_source_wangyi = self.filter(list3, 'source', ['网易体育'])\r\n list_source_hupu = self.filter(list4, 'source', ['虎扑体育'])\r\n \r\n count_source_souhu = len(list_source_souhu)\r\n count_source_sina = len(list_source_sina)\r\n count_source_wangyi = len(list_source_wangyi)\r\n count_source_hupu = len(list_source_hupu)\r\n\r\n return {'count_source_souhu' : count_source_souhu, \r\n 'count_source_sina' : count_source_sina, \r\n 'count_source_wangyi' : count_source_wangyi, \r\n 'count_source_hupu' : count_source_hupu}\r\n except Exception:\r\n return {}\r\n "
}
] | 4 |
gabin8/AndroidOpenAccessoryBridge | https://github.com/gabin8/AndroidOpenAccessoryBridge | 194f2cdba286e5acb88c1671bea170cd942eee30 | 8579377658216600c754de5aae74594cb3aceb77 | d31fe4025b0ae11dd8bcbb8f92329e30d49d6c7b | refs/heads/master | 2021-08-23T21:49:35.265401 | 2017-12-06T18:12:32 | 2017-12-06T18:12:32 | 113,335,327 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5550157427787781,
"alphanum_fraction": 0.5578736662864685,
"avg_line_length": 34.52284240722656,
"blob_id": "84e71339c982de90aaf92e14816385aef5769dea",
"content_id": "dab25d56797d20dec1d39eb8874ce36fb0d9d4b5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 6998,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 197,
"path": "/AndroidOpenAccessoryBridge/library/src/main/java/com/covertbagel/androidopenaccessorybridge/AndroidOpenAccessoryBridge.java",
"repo_name": "gabin8/AndroidOpenAccessoryBridge",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright 2015 Christopher Blay <[email protected]>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\npackage com.covertbagel.androidopenaccessorybridge;\n\nimport android.content.Context;\nimport android.hardware.usb.UsbAccessory;\nimport android.hardware.usb.UsbManager;\nimport android.os.Handler;\nimport android.os.Looper;\nimport android.os.Message;\nimport android.os.ParcelFileDescriptor;\nimport android.util.Log;\n\nimport java.io.Closeable;\nimport java.io.FileDescriptor;\nimport java.io.FileInputStream;\nimport java.io.FileOutputStream;\nimport java.io.IOException;\n\npublic class AndroidOpenAccessoryBridge {\n\n private static final String TAG = AndroidOpenAccessoryBridge.class.getSimpleName();\n private static final long CONNECT_COOLDOWN_MS = 100;\n private static final long READ_COOLDOWN_MS = 100;\n private Listener mListener;\n private UsbManager mUsbManager;\n private BufferHolder mReadBuffer;\n private InternalThread mInternalThread;\n private boolean mIsShutdown;\n private boolean mIsAttached;\n private FileOutputStream mOutputStream;\n private FileInputStream mInputStream;\n private ParcelFileDescriptor mParcelFileDescriptor;\n\n public AndroidOpenAccessoryBridge(final Context context, final Listener listener) {\n if (BuildConfig.DEBUG && (context == null || listener == null)) {\n throw new AssertionError(\"Arguments context and listener must not be null\");\n }\n mListener = listener;\n mUsbManager = (UsbManager) context.getSystemService(Context.USB_SERVICE);\n mReadBuffer = new BufferHolder();\n mInternalThread = new InternalThread();\n mInternalThread.start();\n }\n\n public synchronized boolean write(final BufferHolder bufferHolder) {\n if (BuildConfig.DEBUG && (mIsShutdown || mOutputStream == null)) {\n throw new AssertionError(\"Can't write if shutdown or output stream is null\");\n }\n try {\n return bufferHolder.write(mOutputStream);\n } catch (IOException exception) {\n mInternalThread.terminate();\n return false;\n }\n }\n\n private class InternalThread extends Thread {\n\n private static final int STOP_THREAD = 1;\n private static final int MAYBE_READ = 2;\n\n private Handler mHandler;\n\n @Override\n public void run() {\n Looper.prepare();\n mHandler = new Handler() {\n @Override\n public void handleMessage(Message msg) {\n switch (msg.what) {\n case STOP_THREAD:\n Looper.myLooper().quit();\n break;\n case MAYBE_READ:\n final boolean readResult;\n try {\n readResult = mReadBuffer.read(mInputStream);\n } catch (IOException exception) {\n terminate();\n break;\n }\n if (readResult) {\n if (mReadBuffer.size == 0) {\n mHandler.sendEmptyMessage(STOP_THREAD);\n } else {\n mListener.onAoabRead(mReadBuffer);\n mReadBuffer.reset();\n mHandler.sendEmptyMessage(MAYBE_READ);\n }\n } else {\n mHandler.sendEmptyMessageDelayed(MAYBE_READ, READ_COOLDOWN_MS);\n }\n break;\n }\n }\n };\n detectAccessory();\n Looper.loop();\n detachAccessory();\n mIsShutdown = true;\n mListener.onAoabShutdown();\n\n // Clean stuff up\n mHandler = null;\n mListener = null;\n mUsbManager = null;\n mReadBuffer = null;\n mInternalThread = null;\n }\n\n void terminate() {\n mHandler.sendEmptyMessage(STOP_THREAD);\n }\n\n private void detectAccessory() {\n while (!mIsAttached) {\n if (mIsShutdown) {\n mHandler.sendEmptyMessage(STOP_THREAD);\n return;\n }\n try {\n Thread.sleep(CONNECT_COOLDOWN_MS);\n } catch (InterruptedException exception) {\n // pass\n }\n final UsbAccessory[] accessoryList = mUsbManager.getAccessoryList();\n if (accessoryList == null || accessoryList.length == 0) {\n continue;\n }\n if (accessoryList.length > 1) {\n Log.w(TAG, \"Multiple accessories attached!? Using first one...\");\n }\n maybeAttachAccessory(accessoryList[0]);\n }\n }\n\n private void maybeAttachAccessory(final UsbAccessory accessory) {\n final ParcelFileDescriptor parcelFileDescriptor = mUsbManager.openAccessory(accessory);\n if (parcelFileDescriptor != null) {\n final FileDescriptor fileDescriptor = parcelFileDescriptor.getFileDescriptor();\n mIsAttached = true;\n mOutputStream = new FileOutputStream(fileDescriptor);\n mInputStream = new FileInputStream(fileDescriptor);\n mParcelFileDescriptor = parcelFileDescriptor;\n mHandler.sendEmptyMessage(MAYBE_READ);\n }\n }\n\n private void detachAccessory() {\n if (mIsAttached) {\n mIsAttached = false;\n }\n if (mInputStream != null) {\n closeQuietly(mInputStream);\n mInputStream = null;\n }\n if (mOutputStream != null) {\n closeQuietly(mOutputStream);\n mOutputStream = null;\n }\n if (mParcelFileDescriptor != null) {\n closeQuietly(mParcelFileDescriptor);\n mParcelFileDescriptor = null;\n }\n }\n\n private void closeQuietly(Closeable closable) {\n try {\n closable.close();\n } catch (IOException exception) {\n // pass\n }\n }\n\n }\n\n public interface Listener {\n void onAoabRead(BufferHolder bufferHolder);\n void onAoabShutdown();\n }\n\n}\n"
},
{
"alpha_fraction": 0.6135770082473755,
"alphanum_fraction": 0.6579634547233582,
"avg_line_length": 18.200000762939453,
"blob_id": "18ef6287ed27c8d871c5be84f60ef49957030d96",
"content_id": "fd7c38231c3cae63367d17878faa570e7fa0b9f2",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Gradle",
"length_bytes": 383,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 20,
"path": "/AndroidOpenAccessoryBridge/library/build.gradle",
"repo_name": "gabin8/AndroidOpenAccessoryBridge",
"src_encoding": "UTF-8",
"text": "apply plugin: 'com.android.library'\n\nversion = '1.1'\n\nandroid {\n compileSdkVersion 27\n buildToolsVersion '27.0.2'\n\n defaultConfig {\n minSdkVersion 17\n targetSdkVersion 27\n versionCode 1\n versionName version\n }\n\n compileOptions {\n sourceCompatibility JavaVersion.VERSION_1_8\n targetCompatibility JavaVersion.VERSION_1_8\n }\n}"
},
{
"alpha_fraction": 0.5580102801322937,
"alphanum_fraction": 0.5705530047416687,
"avg_line_length": 36.1216926574707,
"blob_id": "8bd4fdc723384625b86e4eaf6409f5a808ee0586",
"content_id": "b4e970e0fca4e77e4deb66ae0cbc6b40ef7d09c5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7018,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 189,
"path": "/android_open_accessory_bridge.py",
"repo_name": "gabin8/AndroidOpenAccessoryBridge",
"src_encoding": "UTF-8",
"text": "#!/usr/local/bin/python3\n# coding=utf-8\n\n# Copyright 2015 Christopher Blay <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function, unicode_literals\n\nimport array\nimport sys\nimport time\nimport usb\n\n\nNEXUS4_USB_IDS = (0x18d1, 0x4ee2, 0x2d01)\n_B = 'B' if sys.version_info.major == 3 else b'B'\n\n\nclass AndroidOpenAccessoryBridge:\n\n def __init__(self,\n vendor_id, unconfigured_product_id, configured_product_id,\n manufacturer, model, description, version, uri, serial):\n self._vendor_id = int(vendor_id)\n self._unconfigured_product_id = int(unconfigured_product_id)\n self._configured_product_id = int(configured_product_id)\n self._device = self._configureAndOpenDevice(\n str(manufacturer),\n str(model),\n str(description),\n str(version),\n str(uri),\n str(serial))\n self._endpoint_out, self._endpoint_in = self._detectEndpoints()\n\n def __enter__(self):\n return self # All 'enter' work is done in __init__().\n\n def __exit__(self, type, value, traceback):\n self.close()\n\n def _detectDevice(self, attempts_left=5):\n unconfigured_device = usb.core.find(\n idVendor=self._vendor_id, idProduct=self._unconfigured_product_id)\n configured_device = usb.core.find(\n idVendor=self._vendor_id, idProduct=self._configured_product_id)\n if configured_device:\n return configured_device, True\n elif unconfigured_device:\n return unconfigured_device, False\n elif attempts_left:\n time.sleep(1)\n return self._detectDevice(attempts_left - 1)\n else:\n raise usb.core.USBError('Device not connected')\n\n def _configureAndOpenDevice(\n self, manufacturer, model, description, version, uri, serial):\n device, is_configured = self._detectDevice()\n if not is_configured:\n # Validate version code.\n buf = device.ctrl_transfer(0xc0, 51, data_or_wLength=2)\n assert(len(buf) == 2 and (buf[0] | buf[1] << 8) == 2)\n # Send accessory information.\n for i, data in enumerate(\n (manufacturer, model, description, version, uri, serial)):\n assert(device.ctrl_transfer(\n 0x40, 52, wIndex=i, data_or_wLength=data) == len(data))\n # Put device into accessory mode.\n assert(device.ctrl_transfer(0x40, 53) == 0)\n usb.util.dispose_resources(device)\n else:\n # This brings your companion app back to foreground.\n device.reset()\n time.sleep(1)\n\n # Wait for configured device to show up\n attempts_left = 5\n while attempts_left:\n device, is_configured = self._detectDevice()\n if is_configured:\n return device\n time.sleep(1)\n attempts_left -= 1\n raise usb.core.USBError('Device not configured')\n\n def _detectEndpoints(self):\n assert(self._device)\n configuration = self._device.get_active_configuration()\n interface = configuration[(0, 0)]\n\n def first_out_endpoint(endpoint):\n return (usb.util.endpoint_direction(endpoint.bEndpointAddress)\n == usb.util.ENDPOINT_OUT)\n\n def first_in_endpoint(endpoint):\n return (usb.util.endpoint_direction(endpoint.bEndpointAddress)\n == usb.util.ENDPOINT_IN)\n\n endpoint_out = usb.util.find_descriptor(\n interface, custom_match=first_out_endpoint)\n endpoint_in = usb.util.find_descriptor(\n interface, custom_match=first_in_endpoint)\n assert(endpoint_out and endpoint_in)\n return endpoint_out, endpoint_in\n\n def write(self, data, timeout=None):\n assert(self._device and self._endpoint_out and data)\n size = len(data)\n size_bytes = array.array(_B, [\n (size & 0x0000ff00) >> 8,\n (size & 0x000000ff)])\n data_bytes = array.array(_B, data)\n while True:\n try:\n bytes_wrote = self._endpoint_out.write(size_bytes,\n timeout=timeout)\n except usb.core.USBError as e:\n if e.errno == 110: # Operation timed out\n continue\n else:\n raise e\n else:\n assert(bytes_wrote == 2)\n break\n assert(self._endpoint_out.write(data_bytes, timeout=timeout) == size)\n\n def read(self, timeout=None):\n assert(self._device and self._endpoint_in)\n try:\n size_bytes = self._endpoint_in.read(2, timeout=timeout)\n size = (size_bytes[0] << 8) | size_bytes[1]\n return self._endpoint_in.read(size, timeout=timeout).tostring()\n except usb.core.USBError as e:\n if e.errno == 110: # Operation timed out.\n return None\n else:\n raise e\n\n def close(self):\n assert(self._device and self._endpoint_out)\n self._endpoint_out.write(array.array(_B, [0, 0]))\n usb.util.dispose_resources(self._device)\n self._device = None\n self._endpoint_out = None\n self._endpoint_in = None\n\n\nif __name__ == '__main__':\n from signal import signal, SIGTERM, SIGINT\n shutdown = False\n\n def signal_handler(signal, frame):\n global shutdown\n shutdown = True\n for signum in (SIGTERM, SIGINT):\n signal(signum, signal_handler)\n\n while not shutdown:\n try:\n with AndroidOpenAccessoryBridge(\n *NEXUS4_USB_IDS,\n manufacturer='AoabManufacturer',\n model='AoabModel',\n description='AoabDescription',\n version=1,\n uri=('https://github.com/chris-blay/'\n 'android-open-accessory-bridge'),\n serial='AoabSerial') as aoab:\n aoab.write('0'.encode('utf-8'))\n while not shutdown:\n bytes = aoab.read()\n if bytes:\n value = int(bytes.decode('utf-8'))\n print('Read in value: {}'.format(value))\n aoab.write(str(value + 1).encode('utf-8'))\n except usb.core.USBError:\n print('USBError occurred. Restarting…')\n"
},
{
"alpha_fraction": 0.7683823704719543,
"alphanum_fraction": 0.7720588445663452,
"avg_line_length": 80.5999984741211,
"blob_id": "2d0409ab6ad5e1da8477a47b92f8ce5a418d718b",
"content_id": "39a7c154aa62900c45f235ea2c033df3c0b422f0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 816,
"license_type": "permissive",
"max_line_length": 307,
"num_lines": 10,
"path": "/README.md",
"repo_name": "gabin8/AndroidOpenAccessoryBridge",
"src_encoding": "UTF-8",
"text": "android-open-accessory-bridge\n=============================\n\nA USB communication bridge using Android Open Accessory Protocol. Allows sending of messages between a Python script running on a PC and Android activity running on an Android device.\n\nIncludes a simple \"ping-pong\" test for Nexus 4. Just install the Android application and then run the Python script. When the script starts the application will launch and they'll send tiny incremental messages back and forth. When you stop the Python script the Android application closes - it's that easy!\n\nPython script requires pyusb at http://sourceforge.net/apps/trac/pyusb/\n\nMy goal in writing this was to make my Nexus 4 into a sensor for a Raspberry Pi. Raspberry Pi doesn't have ADB and Nexus 4 can't easily use USB-OTG so AOAP is the only way to communicate.\n"
}
] | 4 |
eltondornelas/django-semana-dev-python-treinaweb | https://github.com/eltondornelas/django-semana-dev-python-treinaweb | d03fb07a958851bf57a6ea542248d406da5b574f | 7713900815872f8bc20d8045f0b2ec95b6e06c51 | 5b2175c85d2ceaba0a06e1fc678e307ae0c97054 | refs/heads/master | 2022-10-26T08:30:16.054427 | 2020-06-17T02:48:57 | 2020-06-17T02:48:57 | 272,861,741 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.69077068567276,
"alphanum_fraction": 0.6983824968338013,
"avg_line_length": 31.84375,
"blob_id": "01f9ae6a9867662251a9fec687cbc8cd86a962aa",
"content_id": "96cbfe8d312b7e2a27256f5bf12a4b177b891f2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1052,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 32,
"path": "/gerenciador_tarefas/urls.py",
"repo_name": "eltondornelas/django-semana-dev-python-treinaweb",
"src_encoding": "UTF-8",
"text": "\"\"\"gerenciador_tarefas URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('app.urls')),\n # path, como não tem view, ele vai no app a ser chamado\n]\n\n# mysql -u root\n# create database gerenciador_tarefas;\n# use gerenciador_tarefas;\n# show tables;\n# desc app_task;\n# select * from app_task\n# select * from auth_user\n# desc app_task;\n"
},
{
"alpha_fraction": 0.6584346294403076,
"alphanum_fraction": 0.6584346294403076,
"avg_line_length": 31.899999618530273,
"blob_id": "de6d9c5d6a24c0686e7de1de9dc7bccf9c7bd089",
"content_id": "ad4801c193d439ac82bb32bac3c75bdae14daa4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2643,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 80,
"path": "/app/views/task_views.py",
"repo_name": "eltondornelas/django-semana-dev-python-treinaweb",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom ..forms import TaskForm\nfrom app.entities.task import Task\nfrom ..services import task_service\n\n\n# com o @login_required é um decorator que verifica se usuário esta logado\n# se não estiver ele é redirecionado\n# perceba que a página padrão é o account/login; para ajustar isso deve ir em\n# settings.py e criar LOGIN_URL\n\n@login_required()\ndef task_list(request):\n tasks = task_service.task_list(request.user)\n # request.user é o usuário logado\n\n return render(request, 'tasks/task_list.html',\n {\"tasks\": tasks})\n\n\n@login_required()\ndef register_task(request):\n if request.method == 'POST':\n form_task = TaskForm(request.POST)\n # o TaskForm ja valida os campos automaticamente\n\n if form_task.is_valid():\n title = form_task.cleaned_data['title']\n description = form_task.cleaned_data['description']\n expiration_date = form_task.cleaned_data['expiration_date']\n priority = form_task.cleaned_data['priority']\n new_task = Task(title, description,\n expiration_date, priority, request.user)\n\n task_service.register_task(new_task)\n return redirect('task_list_route')\n else:\n form_task = TaskForm()\n\n return render(request, 'tasks/form_task.html', {\"form_task\": form_task})\n\n\n@login_required()\ndef edit_task(request, id):\n task_db = task_service.task_list_id(id)\n\n if task_db.user != request.user:\n return HttpResponse('Não Permitido!')\n\n form_task = TaskForm(request.POST or None, instance=task_db)\n\n if form_task.is_valid():\n title = form_task.cleaned_data['title']\n description = form_task.cleaned_data['description']\n expiration_date = form_task.cleaned_data['expiration_date']\n priority = form_task.cleaned_data['priority']\n\n new_task = Task(title, description,\n expiration_date, priority, request.user)\n\n task_service.edit_task(task_db, new_task)\n return redirect('task_list_route')\n\n return render(request, 'tasks/form_task.html', {\"form_task\": form_task})\n\n\n@login_required()\ndef remove_task(request, id):\n task_db = task_service.task_list_id(id)\n\n if task_db.user != request.user:\n return HttpResponse('Não Permitido!')\n\n if request.method == 'POST':\n task_service.remove_task(task_db)\n return redirect('task_list_route')\n\n return render(request, 'tasks/confirmation.html', {'task': task_db})\n"
},
{
"alpha_fraction": 0.6606943011283875,
"alphanum_fraction": 0.6685330271720886,
"avg_line_length": 34.7599983215332,
"blob_id": "3a56d34adad31083ddd0fa04f43931bd74d2b083",
"content_id": "1c1e79979f035cec618a06533ee1ea40476fc03b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 904,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 25,
"path": "/app/models.py",
"repo_name": "eltondornelas/django-semana-dev-python-treinaweb",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Task(models.Model):\n PRIORITY_CHOICES = [\n ('H', 'High'),\n ('N', 'Normal'),\n ('L', 'Low')\n ]\n\n title = models.CharField(max_length=30, null=False, blank=False)\n description = models.CharField(max_length=100, null=False, blank=False)\n expiration_date = models.DateField(null=False, blank=False)\n priority = models.CharField(max_length=1, choices=PRIORITY_CHOICES,\n null=False, blank=False)\n\n # relacionando uma tarefa a um usuário\n # porém, um usuário a varias tarefas\n # relação de 1xN = ForeignKey\n\n user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)\n # como possuímos tarefas cadastradas sem relação com usuário\n # ele poderá ser null\n # cascade para apagar todas as tarefas relacionadas a este usuáiro"
},
{
"alpha_fraction": 0.7295373678207397,
"alphanum_fraction": 0.7295373678207397,
"avg_line_length": 22.41666603088379,
"blob_id": "40d6074263d9aacf479c949f927f4fb4dd0c3ee9",
"content_id": "ffc02505ad4e628a2dae61ccc5c8bb5a4fdd3bef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 282,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 12,
"path": "/app/templatetags/my_filters.py",
"repo_name": "eltondornelas/django-semana-dev-python-treinaweb",
"src_encoding": "UTF-8",
"text": "from django import template\n\nregister = template.Library()\n\n\[email protected](name='add_class')\ndef add_class(value, arg):\n return value.as_widget(attrs={'class': arg})\n\n# recebe um input e a classe;\n# depois adiciona nesse input a classe\n# form_task.title é o input nesse caso\n"
},
{
"alpha_fraction": 0.671410083770752,
"alphanum_fraction": 0.671410083770752,
"avg_line_length": 24.766666412353516,
"blob_id": "bed3332c8300b91cad1a5cfe7bc787e89f60de77",
"content_id": "9d153879b1c6783f2a7b76e9f35d01b06f42b022",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 775,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 30,
"path": "/app/services/task_service.py",
"repo_name": "eltondornelas/django-semana-dev-python-treinaweb",
"src_encoding": "UTF-8",
"text": "from app.models import Task\n\n\ndef register_task(task):\n Task.objects.create(title=task.title, description=task.description,\n expiration_date=task.expiration_date,\n priority=task.priority, user=task.user)\n\n\ndef task_list(user):\n return Task.objects.filter(user=user).all()\n # com esse filter, só traz os referentes ao usuario\n # é um SELECT * FROM app_task\n\n\ndef task_list_id(id):\n return Task.objects.get(id=id)\n\n\ndef edit_task(task_db, new_task):\n task_db.title = new_task.title\n task_db.description = new_task.description\n task_db.expiration_date = new_task.expiration_date\n task_db.priority = new_task.priority\n\n task_db.save(force_update=True)\n\n\ndef remove_task(task_db):\n task_db.delete()\n"
},
{
"alpha_fraction": 0.5563751459121704,
"alphanum_fraction": 0.5563751459121704,
"avg_line_length": 27.75757598876953,
"blob_id": "0b9822abd6fc5a71e1d74165afa1eacdc81818bd",
"content_id": "fb5cd7d860fc5ed7daf1c29c250f0a51381c9e32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 953,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 33,
"path": "/app/templates/tasks/form_task.html",
"repo_name": "eltondornelas/django-semana-dev-python-treinaweb",
"src_encoding": "UTF-8",
"text": "{% extends 'base.html' %}\n{% load my_filters %}\n\n{% block content %}\n\n<form method=\"post\">\n {% csrf_token %}\n <div class=\"form-group\">\n {{ form_task.title.errors }}\n <label>Titulo: </label>\n {{ form_task.title | add_class:'form-control' }}\n </div>\n <div class=\"form-group\">\n {{ form_task.description.errors }}\n <label>Descrição: </label>\n {{ form_task.description | add_class:'form-control' }}\n </div>\n <div class=\"form-group\">\n {{ form_task.expiration_date.errors }}\n <label>Data de Expiração: </label>\n {{ form_task.expiration_date | add_class:'form-control' }}\n </div>\n <div class=\"form-group\">\n {{ form_task.priority.errors }}\n <label>Prioridade: </label>\n {{ form_task.priority | add_class:'form-control' }}\n </div>\n <div class=\"form-group\">\n <input type=\"submit\" value=\"Salvar\">\n </div>\n</form>\n\n{% endblock content %}\n"
},
{
"alpha_fraction": 0.7991071343421936,
"alphanum_fraction": 0.7991071343421936,
"avg_line_length": 43.79999923706055,
"blob_id": "a2b05e26a5d7ae544eb9acf7b370e71762e405f9",
"content_id": "b289a927eb7d0e6b6f2f8d4f993a0b5732b429f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 229,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 5,
"path": "/README.md",
"repo_name": "eltondornelas/django-semana-dev-python-treinaweb",
"src_encoding": "UTF-8",
"text": "# django-semana-dev-python-treinaweb\n\nAplicação feita seguindo as aulas da Semana Dev Python e Django da TreinaWeb.\n<br>\nPara diferenciar um pouco, todo o código foi feito em inglês e com algumas modificaçes feitas por mim.\n"
},
{
"alpha_fraction": 0.6565656661987305,
"alphanum_fraction": 0.6565656661987305,
"avg_line_length": 45.20000076293945,
"blob_id": "3691f0baaa0c41b287b8aa81a6b5025ec737f042",
"content_id": "c88d0534875156e411c93807f193b0d76bbafb90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 694,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 15,
"path": "/app/urls.py",
"repo_name": "eltondornelas/django-semana-dev-python-treinaweb",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom .views.task_views import *\nfrom .views.user_views import *\n\nurlpatterns = [\n path('task_list/', task_list, name='task_list_route'),\n path('register_task/', register_task, name='register_task_route'),\n path('edit_task/<int:id>', edit_task, name='edit_task_route'),\n path('remove_task/<int:id>', remove_task, name='remove_task_route'),\n path('register_user/', register_user, name='register_user_route'),\n path('login_user/', login_user, name='login_user_route'),\n path('logout_user/', logout_user, name='logout_user_route'),\n # url , method/class, route\n # <tipo:nome_parametro> -> o nome tem que bater com o do método\n]\n"
}
] | 8 |
teramita45/python-master | https://github.com/teramita45/python-master | 017bfe20e2db312e54f0878fcd945a210dd31ab1 | 1e69d9706327bee242fa60c9a628252160876043 | f5556033dd3e5c653000330ad26255be8d879cda | refs/heads/master | 2023-03-15T17:06:24.646838 | 2021-03-08T01:32:15 | 2021-03-08T01:32:15 | 345,038,794 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6311787366867065,
"alphanum_fraction": 0.6311787366867065,
"avg_line_length": 14.411765098571777,
"blob_id": "be075b268739ec5452dd857af3aaf0f5de00031f",
"content_id": "f58ff954c816350fe2c45258c8bd5ed5f023ee8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 263,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 17,
"path": "/operadores/operadores-aritmetico.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "a = int(input(\"dame un mumero\"))\nb = int(input(\"dame otro numero\"))\nsuma = a+b\nprint(\"la suma es : \",suma)\n\nmulti= a * b\n\nprint(multi)\n\ndivision = a/b\nprint(division) \n\nmodulo = a % b\nprint(\"modulo: \" , modulo)\n\nexponente = a **b\nprint(\"exponente: \", exponente)\n\n"
},
{
"alpha_fraction": 0.5745920538902283,
"alphanum_fraction": 0.6048951148986816,
"avg_line_length": 19.452381134033203,
"blob_id": "1bc15c294691e9b5c50d6f091d70c43cf208229c",
"content_id": "0c3ed32649b8a41b6ba4feeffcd8f8323e0f2585",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 860,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 42,
"path": "/siclos/ejercicio-if.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "mes = int(input(\"proporciona un mes en numero: \"))\n\nestacion = None\nif mes == 1 or mes == 2 or mes == 3 :\n estacion = \"inverno\"\nelif mes == 5 or mes == 6 or mes == 4:\n estacion = \"otoño\"\nelif mes == 7 or mes == 8 or mes ==9:\n estacion = \"verano\"\nelif mes == 10 or mes == 11 or mes == 12:\n estacion = \"primavera\"\nelse:\n estacion= \"no corresponde a ningun mes del año\"\n\n\n\n\n\nprint(\"estacion \" ,estacion, \" para el mes \" , mes)\n\n\nnota = int(input(\"proporciona un valor entre 0 y 10: \"))\n\ncalificacion = None\n\nif nota == 9 or nota == 10 :\n calificacion = \"A\"\nelif nota == 8 :\n calificacion = \"B\"\nelif nota == 7:\n calificacion = \"C\"\nelif nota == 6:\n calificacion = \"C\"\nelif nota <=5 and nota >=0 :\n calificacion = \"F\"\nelse:\n calificacion = True\n\nif calificacion == True :\n print(\"valor incorrecto\")\nelse:\n print(calificacion)"
},
{
"alpha_fraction": 0.7447335720062256,
"alphanum_fraction": 0.7447335720062256,
"avg_line_length": 16.565217971801758,
"blob_id": "30c517e93a3b487db005cf5dafa75471add31b44",
"content_id": "d166cc6d50c0e8293c4cf0cb61782d8ddcf9252e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 807,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 46,
"path": "/colecciones/diccionario.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "#un diccionario esta compuesto de los elementos clave valor\ndiccionario = {\n \"ide\": \"integrated Development Environment\",\n \"OOP\": \"onject Oriented Programing\",\n \"DBMS\": \"data base management system\"\n}\nprint(diccionario)\n\n#largo de un diccionario\nprint(len(diccionario))\n\n#acceder\nprint(diccionario[\"ide\"])\n\n#acceder con get\nprint(diccionario.get(\"ide\"))\n\n#modificando valores\n\ndiccionario[\"ide\"] = \"IDEEEEEEEEE\"\nprint(diccionario)\n\n#iterar\nfor i in diccionario:\n print(diccionario[i])\n\nfor i in diccionario.values():\n print(i)\n\n#comprobando si existe un elemtno\n\nprint(\"ide\" in diccionario)\n\n#agregar elementos\ndiccionario[\"PK\"] = \"Primary Key\"\nprint(diccionario)\n\n#remover elemtnos\ndiccionario.pop(\"DBMS\")\nprint(diccionario)\n\n#limpiar con clear\ndiccionario.clear()\n\n#elminiar\ndel diccionario"
},
{
"alpha_fraction": 0.7575757503509521,
"alphanum_fraction": 0.7575757503509521,
"avg_line_length": 15,
"blob_id": "ea220ee470d3e9976250298b7d051e7ed6cf1829",
"content_id": "2700273cca6eb31b39df2d986fb0eb176e52585b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 33,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 2,
"path": "/README.md",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "# python-master\ncurso de python \n"
},
{
"alpha_fraction": 0.6597353219985962,
"alphanum_fraction": 0.6597353219985962,
"avg_line_length": 28.33333396911621,
"blob_id": "c0323a004230bf3f261d7784f6ec6efdea560b14",
"content_id": "8866c884d597ef6bd9b2cb61589af760fb5bb2ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 529,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 18,
"path": "/operadores/tienda-de-libros.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "print(\"proporcione los siguientes datos del libro\")\n\nnombre = input(\"proporciona el nombre: \")\nid = int(input(\"proporciona el id: \"))\nprecio = float(input(\"proporciona el precio : \"))\nenvio = input(\"indica si el envio es gratuito(Treu / False): \")\n\nif envio ==\"True\" or envio == \"true\":\n envio = True\nelif envio == \"False\" or envio == \"false\":\n envio = False\nelse:\n envio = \"valor incorrecto debe ser True/False\"\n\nprint(\"nombre : \" +nombre)\nprint(\"id: \", id)\nprint(\"precio: \",precio )\nprint(\"envio gratuito: \", envio )\n\n"
},
{
"alpha_fraction": 0.6783625483512878,
"alphanum_fraction": 0.6881091594696045,
"avg_line_length": 22.272727966308594,
"blob_id": "2aba55bae486ded349f84ad07fdf3588b5731849",
"content_id": "4db7e0552378f1fbe70abc5dc30466f87b7f0dac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 513,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 22,
"path": "/siclos/if-else.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "condicion = True\nif condicion:\n print(\"la condicion es verdadera\")\nelse:\n print(\"la condicion es falsa\")\n\n#operador ternario \nprint(\"condicion es verdadera\") if condicion else print(\"condicion es falsa\")\n\nnumero = int(input(\"proporciona un numero entre 1 y 3: \"))\n\n#if elseif else\nif numero == 1:\n numeroTexto = \"numero uno\"\nelif numero == 2:\n numeroTexto = \"Numero dos\"\nelif numero == 3:\n numeroTexto = \"numero tres\"\nelse:\n numeroTexto = \"no se encuentra en el rango\"\n\nprint(numeroTexto)\n\n"
},
{
"alpha_fraction": 0.6792452931404114,
"alphanum_fraction": 0.6792452931404114,
"avg_line_length": 14.142857551574707,
"blob_id": "802afec2963cb8d70d55690bdaa00678857b269d",
"content_id": "8371939e2898f683ceb6de2c2c5c5aa37215d959",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 106,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 7,
"path": "/funciones/funciones.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "def mi_funcion():\n print(\"Ejecutnado mi funcion\")\n\nmi_funcion()\nmi_funcion()\nmi_funcion()\nmi_funcion()\n"
},
{
"alpha_fraction": 0.6265060305595398,
"alphanum_fraction": 0.6746987700462341,
"avg_line_length": 24,
"blob_id": "0d2893182ce795ff0b7fda3a84d6c710b705161a",
"content_id": "142fe86fa7b988bba4bc68949cb347e8798c8f86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 249,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 10,
"path": "/python-inicios/tipo-string.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "cadena = \"aerosmith\"\nprint(\"mi grupo favorito es : \" + cadena)\n\nnumero1 = \"1\"\nnumero2 = \"2\"\nprint(\"concatenacion : \", int(numero1) + int(numero2))\n\nnum1 = 1\nnum2 = 2\nprint(\"operacion de suma:\", num2 + num1) #hay q usar la coma para concatenar texto"
},
{
"alpha_fraction": 0.6357142925262451,
"alphanum_fraction": 0.6499999761581421,
"avg_line_length": 11.727272987365723,
"blob_id": "56827a80309d6d2977172ded96fe0fe6ce1a1f6b",
"content_id": "ca4b200c7d032f3932bfbe18333a86456ca22fa1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 140,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 11,
"path": "/python-inicios/variables.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "x = 3\ny = 5\nprint (x)\nprint (y)\n\nz = x + y\n\nprint (z)\n#solo se visualiza en el compilador de python\n\nid(z) # muestra la posicion en memeria\n"
},
{
"alpha_fraction": 0.5400000214576721,
"alphanum_fraction": 0.5799999833106995,
"avg_line_length": 8.800000190734863,
"blob_id": "700fbb5a5f1b287936c59964e994827df809b26f",
"content_id": "ed814dc769b78a9c88b517a73d18eb20211f8943",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 5,
"path": "/operadores/comparacion.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "a = 3\nb = 2\n\nresultado = a == b\nprint(resultado)\n\n"
},
{
"alpha_fraction": 0.6501650214195251,
"alphanum_fraction": 0.6650164723396301,
"avg_line_length": 22.30769157409668,
"blob_id": "5dd856215ce3232700e73782e92840e617617651",
"content_id": "9e953d399601752da11d7414aec356c9c5bf45ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 606,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 26,
"path": "/operadores/logicos.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "a = int(input(\"proporciona un valor : \"))\nvalorMin = 0\nvalorMax = 5\n\ndentroRango = a >= valorMin and a <= valorMax\n\nprint (dentroRango)\n\nif (dentroRango):\n print(\"dentro de rango\")\nelse:\n print(\"fuerda de rango\")\n \nalto = int(input(\"proporciona un alto: \"))\nancho= int(input(\"proporciona un ancho: \"))\n\nprint(\"area: \", alto*ancho)\nprint(\"perimetro: \", (alto+ancho)*2)\n\nnumero1 = int(input(\"Proporciona el primer numero: \"))\nnumero2 = int(input(\"Proporciona el segundo numero: \"))\n\nif(numero1 > numero2):\n print(\"el mayor numero es: \", numero1)\nelse:\n print(\"el mayor numero es: \", numero2)\n"
},
{
"alpha_fraction": 0.7678275108337402,
"alphanum_fraction": 0.7678275108337402,
"avg_line_length": 18.45161247253418,
"blob_id": "48d3768fd58fbcba0bca462bcb4b8ddcfd2d4615",
"content_id": "034936b27ca7e15910cba6377e789583637360d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 604,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 31,
"path": "/colecciones/set.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "#no podemos cambiar elementos pero si podemos añadir o elimnar elementos nuevos en el set\n# set es una coleccion sin orden, no titne indices y elementos repetidos\n\nplanetas = {\"tierra\", \"marte\", \"mercurio\", \"jupiter\"}\nprint(planetas)\n\n#largo\nprint(len(planetas))\n#revisar si un elemetno esta presente\nprint(\"marte\" in planetas)\n\n#agregar\nplanetas.add(\"venus\")\nprint(planetas)\n\n#eliminar con remove\nplanetas.remove(\"venus\")\nprint(planetas)\n\n#eliminar con discard\n\nplanetas.discard(\"jupiters\")\nprint(planetas)\n\n#limpiar el set\nplanetas.clear()\nprint(planetas)\n\n#elinar el set\ndel planetas\nprint(planetas)\n"
},
{
"alpha_fraction": 0.5600000023841858,
"alphanum_fraction": 0.6000000238418579,
"avg_line_length": 7.333333492279053,
"blob_id": "aa7d1a277e5c309a9ed687bc6456e990e12fd643",
"content_id": "5d67f7edb05a9fe190cc7043aff7d1105e260cbd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 14,
"num_lines": 6,
"path": "/python-inicios/datos-numericos.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "x = 10\nprint(x)\n\nz = None\nprint(z)\nprint(type(z))\n"
},
{
"alpha_fraction": 0.7403314709663391,
"alphanum_fraction": 0.7403314709663391,
"avg_line_length": 25,
"blob_id": "fe8cd0d513331495f1966e7c0769f9f413fe9eae",
"content_id": "6d5d3e7052d709bbb0c077a3be7ef62f98b1c4d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 7,
"path": "/funciones/argumentos.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "#paramentro es una variables defenida\n# argumento (arg) es un calor enviado a la funcion\n\ndef funcion_arg(nombre):\n print(\"el nombre resivido es \", nombre)\n\nfuncion_arg(\"carlos\")"
},
{
"alpha_fraction": 0.7033831477165222,
"alphanum_fraction": 0.7230527400970459,
"avg_line_length": 16.788732528686523,
"blob_id": "1d20a03401144769a21daf0be8e709adbeff3aeb",
"content_id": "36f4b25e38522201e07fe18b70476c6f5db85b9a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1271,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 71,
"path": "/colecciones/lista.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "nombres = [\"juan\", \"carla\", \"ricardo\", \"maria\"]\n\nprint(nombres)\nprint(len(nombres))\n\nprint(nombres[0])\nprint(nombres[1])\n\n#navegacion inversa\nprint(nombres[-4])\n\n#imprimir rango\n\nprint(nombres[0:2]) #sin incluir el indice 2\n\n#imprimir los elementos hasta el indice \n\nprint(nombres[:3]) \n\n#imprimir los elementos hasta el final desde el indice proporcionado\nprint(nombres[2])\n\n#cambiar los elementops de la la lista\nnombres[3] =\"ivone\"\n\nprint(nombres)\n#iterar lista\nfor i in nombres:\n print(i)\n#revisar sii existe un elemetno en una lista\n\nif \"Karla\" in nombres:\n print(\"carla si existe en la lista\")\nelse :\n print(\"el elemento buscado no existe en la lsita\")\n \n#agregar elementos\nnombres.append(\"lorenzo\")\n\nprint(nombres)\n\n#insertar un nuevo elemtno en el indice proporcionado\nnombres.insert(1,\"octavio\")\nprint(nombres)\n#remover un elemtno de la lista\n\nnombres.remove(\"octavio\")\nprint(nombres)\n\n#remover el ultimo elemento de la lista\nnombres.pop()\nprint(nombres)\n\n#remover el indice indicado\ndel nombres[0]\nprint(nombres)\n\n#limpiar elemento de nuestra lista\nnombres.clear()\nprint(nombres)\n\n#eliminar por completo la lista elimina la variable\ndel nombres\n\n\n#tarea\nnumeros = [0,1,2,3,4,5,6,7,8,9,10]\n\nfor i in numeros:\n if i%3 ==0:\n print(i) \n "
},
{
"alpha_fraction": 0.7027027010917664,
"alphanum_fraction": 0.7027027010917664,
"avg_line_length": 37,
"blob_id": "156cd3ffe81fdce303e1b37fd2a88de73c35b2b5",
"content_id": "336e8af1dd3b04364eb43890cdca61f185e002b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 37,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 1,
"path": "/python-inicios/hola-mundo.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "print(\"hola mundo..desde python..:v\")"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5357142686843872,
"avg_line_length": 10.399999618530273,
"blob_id": "3b9dd32b8bce0546157c37d52f1c69e5b5e054f9",
"content_id": "58c3a8235dfe7619d6247077ffbe1b9a8bd247c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 56,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 5,
"path": "/funciones/return.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "def suma(a=0,b=0):\n return a + b\n\nc = suma()\nprint(c)"
},
{
"alpha_fraction": 0.5751072764396667,
"alphanum_fraction": 0.6266094446182251,
"avg_line_length": 13.25,
"blob_id": "e4bb5824fd58030bc4bf6d2594485f155c2f6519",
"content_id": "69c81fe34faaba82e18d3f14831a975762d1c963",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 233,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 16,
"path": "/python-inicios/tipo-bool.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "x = True \nprint(x)\nprint(type(x))\ny = False \nprint(y)\n\nnum1 = 3\nnum2 = 2\n\nresultado = num1 < num2\nprint(resultado)\n\nif(num1 <num2) :\n print(\"el valor num1 es menor q num2\")\nelse:\n print(\"el valor num1 no es menor q num2\")\n\n "
},
{
"alpha_fraction": 0.5714285969734192,
"alphanum_fraction": 0.5870130062103271,
"avg_line_length": 15.782608985900879,
"blob_id": "f0d163219e7780d36493522df1b52aa339f0f3c6",
"content_id": "82602b7b83a6347a7e34b8594aa8ed0bf8f15922",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 385,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 23,
"path": "/siclos/break-continue.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "#imprimir solo las letras a contenidas en una cadena\n\nfor letra in \"holanda\":\n if letra == \"a\":\n print (letra)\n break\nelse:\n print(\"fin siclo for\")\n\nprint(\"continua el programa\")\n\n#imprimir numeros pares\n\n# for i in range(6):\n# if i%2 == 0:\n# print(i)\n\n#si el numero es diferente de par\n\nfor i in range(6):\n if i%2 != 0:\n continue\n print(i)"
},
{
"alpha_fraction": 0.5938864350318909,
"alphanum_fraction": 0.6113536953926086,
"avg_line_length": 13.375,
"blob_id": "31d3f8d6c6d95df4a91e18764170e3362ad00464",
"content_id": "ae89b240067f945d696bb6b9553b234727ccc078",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 229,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 16,
"path": "/python-inicios/tipos-datos.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "x = 5 #entero\ny = 10.1 #florantae\nz = True #vuleano\na = \"q cosas no???..xD\"\nprint(x)\nprint(type(x))\nprint(id(x))\nprint(y)\nprint(type(y))\nprint(id(y))\nprint(z)\nprint(type(z))\nprint(id(z))\nprint(a)\nprint(type(a))\nprint(id(a))"
},
{
"alpha_fraction": 0.46666666865348816,
"alphanum_fraction": 0.5333333611488342,
"avg_line_length": 5.636363506317139,
"blob_id": "86ea2cc46e683e74b97c650dea461574471fabf9",
"content_id": "97843894fb2e73f1fb5911c7c9ff5f6ff51db038",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 75,
"license_type": "no_license",
"max_line_length": 8,
"num_lines": 11,
"path": "/operadores/operadores-asignacion.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "x = 3\nprint(x)\n\nx+=2\nprint(x)\nx**=2\nprint(x)\nx*=2\nprint(x)\nx%=2\nprint(x)\n\n\n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.625,
"avg_line_length": 18.75,
"blob_id": "8e4afb3b884cf35f2ab3f329448b3cd63d39d650",
"content_id": "28f05537745959c8cf65b7d0dbac69af863cc40b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 80,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 4,
"path": "/siclos/for.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "for letra in \"hola\":\n print (letra)\nelse:\n print(\"termino el siclo for\")\n\n"
},
{
"alpha_fraction": 0.6661764979362488,
"alphanum_fraction": 0.6882352828979492,
"avg_line_length": 16,
"blob_id": "8f134fd4e33336a41f07ff4202f181c0419077fe",
"content_id": "f8248f89e864a91ff8d04e0ef027b9e87e4e24a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 680,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 40,
"path": "/colecciones/tuplas.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "#las tuplan mantienen el orden pero no es posible modificarla\nfrutas = (\"naranja\", \"platano\", \"guayaba\" )\n\nprint(frutas)\n\n#largo de la tupla\nprint(len(frutas))\n\n#accder a un elemento\nprint(frutas[2])\n\n#navegacion inversa\nprint(frutas[-2])\n\n#rango\nprint(frutas[0:2])\n\n#modificar un valo\n#frutas[0] = \"naranjita\"\n\nfrutasLista = list(frutas)\nfrutasLista[0] = \"naranjita\"\n\nfrutas = tuple(frutasLista)\n\nprint(frutas)\n\nfor fruta in frutas:\n print(fruta, end=\", \")\n#no podemos agregar ni eleiminar elementos de una tupla...\n# pero si podemos eliminar latupla\n\ntupla = (13, 1, 8, 3, 2, 5, 8)\nnumeros = []\n\nfor i in tupla:\n if i<5:\n numeros.append(i)\n\ntupla = tuple(numeros)\n"
},
{
"alpha_fraction": 0.7042801380157471,
"alphanum_fraction": 0.7120622396469116,
"avg_line_length": 24.799999237060547,
"blob_id": "729e853fd8f2d928ae067186e3c9416505c26938",
"content_id": "6805c95ae12e3ebc228c937bd90bfc6db13ff663",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 257,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 10,
"path": "/python-inicios/entrada-datos.py",
"repo_name": "teramita45/python-master",
"src_encoding": "UTF-8",
"text": "resultado = int(input(\"proporciona ujn valor: \"))\nresultado1 = int(input())\n\nprint(resultado + resultado1)\nprint(type(resultado))\n\ntitulo = input(\"Proporciona un titulo: \")\nautor = input(\"PRoporciona un autor: \")\n\nprint(titulo + \" fue escrito por \" + autor)"
}
] | 24 |
greg308/info3220 | https://github.com/greg308/info3220 | 1e75fbee7ed679c25e00ccfad14aa3f4471ca052 | 7380839abf87861acca1614fe25436e6afbc227b | b30a698edd34121718dd7b2fde31c640bc2759d9 | refs/heads/master | 2020-03-18T05:28:26.869484 | 2018-10-28T05:30:23 | 2018-10-28T05:30:23 | 134,344,283 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6689864993095398,
"alphanum_fraction": 0.6894301772117615,
"avg_line_length": 22.459182739257812,
"blob_id": "a3f147b2548914b2350c115edeeff5cfc5fa264d",
"content_id": "a69bc072e0c3d2287c6e683b966239d94cebecd0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2299,
"license_type": "no_license",
"max_line_length": 228,
"num_lines": 98,
"path": "/svm.py",
"repo_name": "greg308/info3220",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nfrom sklearn import svm\nimport timeit\n\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\n\n# used to unwrap data\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\n# fine labels\n# train data CF100\nif False:\n data_train = unpickle('./train')\n x_train = data_train[b'data']\n y_train = np.array(data_train[b'fine_labels'])\n\n# coarse labels\n# train data CF100\nif True:\n data_train = unpickle('./train')\n x_train = data_train[b'data']\n y_train = np.array(data_train[b'coarse_labels'])\n\nprint(x_train.shape)\nprint(y_train.shape)\n\n# fine labels\nif False:\n # test data CF100\n data_test= unpickle('./test')\n x_test= data_test[b'data']\n y_test= np.array(data_test[b'fine_labels'])\n\n# coarse labels\nif True:\n # test data CF100\n data_test= unpickle('./test')\n x_test= data_test[b'data']\n y_test= np.array(data_test[b'coarse_labels'])\n\nprint(x_test.shape)\nprint(y_test.shape)\n\n# Preprocess names to take away file extensions\nnames_train_raw = data_train[b'filenames']\n\nnames_train = []\n\nfor x in names_train_raw:\n name_str = x.decode(\"utf-8\")\n ic = name_str.find('_s_')\n name = name_str[:ic]\n names_train.append(name)\n\n# make tuple of names to label\nl_n_train = [(y_train[i], x) for i, x in enumerate(names_train)]\n\nprint(l_n_train)\n\n# Preprocess names to take away file extensions\nnames_test_raw = data_test[b'filenames']\n\nnames_test = []\n\nfor x in names_test_raw:\n name_str = x.decode(\"utf-8\")\n ic = name_str.find('_s_')\n name = name_str[:ic]\n names_test.append(name)\n\n# make tuple of names to label\nl_n_test = [(y_test[i], x) for i, x in enumerate(names_test)]\n\nprint(l_n_test)\n\n\n\nt1 = timeit.default_timer()\n\nclf = svm.SVC(C=1.0, kernel='rbf', degree=3, gamma='auto', coef0=0.0, shrinking=True, probability=False, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, decision_function_shape='ovr', random_state=None)\nclf.fit(x_train[:10000], y_train[:10000])\nt2 = timeit.default_timer()\nprint(t2-t1)\nresult = clf.score(x_test, y_test)\nprint('Result:', result)\n\nt2 = timeit.default_timer()\nprint(t2-t1)\n"
}
] | 1 |
PawelPeczek/ModelAsAServiceV2 | https://github.com/PawelPeczek/ModelAsAServiceV2 | f60f84c77bf110f0fa51a5ac8a1b9fb4ae63d513 | 813323a22aec14bdc0f4f4c73c07f711ddd3ce2b | a332c83cfa3d9b24d818f3c567df7816149f2e94 | refs/heads/master | 2023-03-21T20:10:03.175709 | 2021-03-07T18:44:24 | 2021-03-07T18:44:24 | 345,054,690 | 2 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6052631735801697,
"alphanum_fraction": 0.6115922927856445,
"avg_line_length": 32.71910095214844,
"blob_id": "89ad30937ecc515b775886e4a4c03de8f7c70922",
"content_id": "ae75b2366a62058fdc8f9fb3c251c861cfb4cc90",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3002,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 89,
"path": "/object_detection_service/object_detection_service/handlers/object_detection.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "import logging\nfrom typing import Tuple\n\nimport torch\nfrom flask import Response, request, make_response\nfrom flask_restful import Resource\nimport numpy as np\nimport cv2 as cv\nfrom torchvision.models.detection.retinanet import RetinaNet\n\nfrom ..config import \\\n CLASS_MAPPING\nfrom ..entities import \\\n DetectedObjects, BoundingBox, DetectedObject\nfrom ..utils import \\\n image_from_str, to_chw_tensor\n\nSTANDARDIZATION_CONST = 255.0\n\nRawPrediction = dict\n\n\nlogging.getLogger().setLevel(logging.INFO)\n\n\nclass ObjectDetection(Resource):\n\n def __init__(\n self,\n model: RetinaNet,\n confidence_threshold: float,\n max_image_dim: int\n ):\n self.__model = model\n self.__confidence_threshold = confidence_threshold\n self.__max_image_dim = max_image_dim\n\n def post(self) -> Response:\n if 'image' not in request.files:\n return make_response({'msg': 'Field named \"image\" required.'}, 400)\n image = image_from_str(raw_image=request.files['image'].read())\n results = self.__infer_from_image(image=image)\n return make_response(results.to_dict(), 200)\n\n def __infer_from_image(\n self,\n image: np.ndarray\n ) -> DetectedObjects:\n image, scale = self.__standardize_image(image=image)\n logging.info(f\"Standardized image shape: {image.shape}. scale: {scale}\")\n prediction = self.__model(image)[0]\n return self.__post_process_inference(prediction=prediction, scale=scale)\n\n def __standardize_image(\n self,\n image: np.ndarray\n ) -> Tuple[torch.Tensor, float]:\n max_shape = max(image.shape[:2])\n if max_shape <= self.__max_image_dim:\n return to_chw_tensor(image / STANDARDIZATION_CONST), 1.0\n scale = self.__max_image_dim / max_shape\n resized_image = cv.resize(image, dsize=None, fx=scale, fy=scale)\n return to_chw_tensor(resized_image / STANDARDIZATION_CONST), scale\n\n def __post_process_inference(\n self,\n prediction: RawPrediction,\n scale: float\n ) -> DetectedObjects:\n boxes, scores, labels = \\\n prediction[\"boxes\"].detach().numpy() / scale, \\\n prediction[\"scores\"].detach().numpy(), \\\n prediction[\"labels\"].detach().numpy()\n detected_objects = []\n for bbox, score, label in zip(boxes, scores, labels):\n if score < self.__confidence_threshold:\n continue\n bbox = BoundingBox(\n left_top=(int(round(bbox[0])), int(round(bbox[1]))),\n right_bottom=(int(round(bbox[2])), int(round(bbox[3])))\n )\n detected_object = DetectedObject(\n bbox=bbox,\n confidence=score.astype(float).item(),\n label=label.item(),\n class_name=CLASS_MAPPING.get(label, \"N/A\")\n )\n detected_objects.append(detected_object)\n return DetectedObjects(detected_objects=detected_objects)\n\n"
},
{
"alpha_fraction": 0.5851755738258362,
"alphanum_fraction": 0.5912440419197083,
"avg_line_length": 33.95454406738281,
"blob_id": "53b36bee323f48b9c851fd9bf00004ad1db03c49",
"content_id": "5c74cb5a7008bf2ecca6e2abf03fd18e58e4e4d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2307,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 66,
"path": "/resources_manager_service/resources_manager_service/handlers/input_image_register.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "import os\nfrom uuid import uuid4\n\nfrom flask import request, Response, make_response, send_from_directory\nfrom flask_restful import Resource\n\nfrom .utils import initialize_request_parser, build_base_resource_path\nfrom .config import RESOURCE_IDENTIFIER_FIELD_NAME, LOGIN_FIELD_NAME\nfrom ..config import INPUT_IMAGE_NAME\n\n\nclass InputImageRegister(Resource):\n\n def __init__(self):\n self.__get_request_parser = initialize_request_parser()\n self.__post_request_parser = initialize_request_parser(\n include_resource_identifier=False\n )\n\n def post(self) -> Response:\n if 'image' not in request.files:\n return make_response(\n {'msg': 'Field called \"image\" must be specified'}, 400\n )\n data = self.__post_request_parser.parse_args()\n requester_login = data[LOGIN_FIELD_NAME]\n resource_identifier = f'{uuid4()}'\n target_path = os.path.join(\n build_base_resource_path(\n requester_login=requester_login,\n resource_identifier=resource_identifier\n ),\n INPUT_IMAGE_NAME\n )\n os.makedirs(os.path.dirname(target_path), exist_ok=True)\n request.files['image'].save(target_path)\n return make_response(\n {\n LOGIN_FIELD_NAME: requester_login,\n RESOURCE_IDENTIFIER_FIELD_NAME: resource_identifier\n },\n 200\n )\n\n def get(self) -> Response:\n data = self.__get_request_parser.parse_args()\n requester_login = data[LOGIN_FIELD_NAME]\n resource_identifier = data[RESOURCE_IDENTIFIER_FIELD_NAME]\n resources_dir = build_base_resource_path(\n requester_login=requester_login,\n resource_identifier=resource_identifier\n )\n if not os.path.isdir(resources_dir):\n return make_response(\n {'msg': 'Incorrect resource identifiers.'}, 500\n )\n try:\n return send_from_directory(\n directory=resources_dir,\n filename=INPUT_IMAGE_NAME,\n as_attachment=True\n )\n except FileNotFoundError:\n return make_response(\n {'msg': 'There is no input file detected.'}, 500\n )\n"
},
{
"alpha_fraction": 0.6444283127784729,
"alphanum_fraction": 0.6466038227081299,
"avg_line_length": 31.57480239868164,
"blob_id": "2eacbc5e1dacadecc6f1ee102f84a79fcee4354c",
"content_id": "09eb2c73e7310483ac6c1f295b7bc278d9e02977",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4137,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 127,
"path": "/face_detection_service/face_detection_service/app.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "import json\nimport logging\nfrom functools import partial\nfrom threading import Thread\nfrom typing import List, Any\n\nimport pika\nfrom pika import spec\nfrom pika.channel import Channel\nfrom retina_face_net import RetinaFaceNet, RetinaFaceNetPrediction\n\nfrom .communication import fetch_processing_input, register_results, \\\n LOGIN_FIELD, RESOURCE_IDENTIFIER_FIELD\nfrom .config import RABBIT_HOST, RABBIT_PORT, RABBIT_USER, RABBIT_PASSWORD, \\\n FACE_DETECTION_CHANNEL, WEIGHTS_PATH, TOP_K, CONFIDENCE_THRESHOLD\n\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef start_app() -> None:\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(\n host=RABBIT_HOST,\n port=RABBIT_PORT,\n credentials=pika.PlainCredentials(\n username=RABBIT_USER,\n password=RABBIT_PASSWORD\n )\n )\n )\n channel = connection.channel()\n channel.queue_declare(queue=FACE_DETECTION_CHANNEL)\n channel.basic_qos(prefetch_count=1)\n model = RetinaFaceNet.initialize(\n weights_path=WEIGHTS_PATH,\n top_k=TOP_K,\n confidence_threshold=CONFIDENCE_THRESHOLD\n )\n channel.basic_consume(\n queue=FACE_DETECTION_CHANNEL,\n on_message_callback=partial(on_face_detection, model=model)\n )\n channel.start_consuming()\n\n\ndef on_face_detection(\n channel: Channel,\n method: spec.Basic.Deliver,\n properties: spec.BasicProperties,\n body: str,\n model: RetinaFaceNet\n) -> None:\n logging.info(\"Starting worker thread.\")\n worker_thread = Thread(\n target=start_face_detection,\n args=(channel, method, properties, body, model,)\n )\n worker_thread.daemon = True\n worker_thread.start()\n logging.info(\"Working thread started.\")\n\n\ndef start_face_detection(\n channel: Channel,\n method: spec.Basic.Deliver,\n properties: spec.BasicProperties,\n body: str,\n model: RetinaFaceNet\n) -> None:\n # https://stackoverflow.com/questions/51752890/how-to-disable-heartbeats-with-pika-and-rabbitmq\n try:\n message_content = json.loads(body)\n logging.info(f\"Processing request: {message_content}\")\n image = fetch_processing_input(\n requester_login=message_content[LOGIN_FIELD],\n request_identifier=message_content[RESOURCE_IDENTIFIER_FIELD]\n )\n inference_results = model.infer(image=image)\n logging.info(f\"Inference done: {message_content}\")\n serialized_results = _inference_results_to_dict(\n inference_results=inference_results\n )\n register_results(\n requester_login=message_content[LOGIN_FIELD],\n request_identifier=message_content[RESOURCE_IDENTIFIER_FIELD],\n results=serialized_results\n )\n send_ack = partial(ack_message, channel=channel, delivery_tag=method.delivery_tag)\n channel.connection.add_callback_threadsafe(send_ack)\n logging.info(f\"Results registered: {message_content}\")\n except Exception as e:\n logging.error(f\"Could not process image: {e}\")\n\n\ndef ack_message(channel: Channel, delivery_tag: Any):\n \"\"\"Note that `channel` must be the same pika channel instance via which\n the message being ACKed was retrieved (AMQP protocol constraint).\n \"\"\"\n if channel.is_open:\n channel.basic_ack(delivery_tag)\n else:\n # Channel is already closed, so we can't ACK this message;\n # log and/or do something that makes sense for your app in this case.\n pass\n\n\ndef _inference_results_to_dict(\n inference_results: List[RetinaFaceNetPrediction]\n) -> dict:\n return {\n \"inference_results\": [\n {\n \"bounding_box\": {\n \"left_top\": list(r.bbox.left_top.compact_form),\n \"right_bottom\": list(r.bbox.right_bottom.compact_form)\n },\n \"confidence\": r.confidence.astype(float).item(),\n \"landmarks\": [\n list(l.compact_form) for l in r.landmarks\n ]\n } for r in inference_results\n ]\n }\n\n\nif __name__ == '__main__':\n start_app()\n"
},
{
"alpha_fraction": 0.5847682356834412,
"alphanum_fraction": 0.6152318120002747,
"avg_line_length": 16.776470184326172,
"blob_id": "47fb67048a6ea66db525cdcff947ca12cfd6c35e",
"content_id": "f752798d3cbbe582d7aaad073ae16d6a7681b117",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1510,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 85,
"path": "/gateway_service/README.md",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "# Resource manager service\n\n## Service build\nTo build face detection service simply run:\n```bash\nrepo_root/gateway_service$ docker build -t maas/v2/gateway_service .\n```\n\n## Service run\nTo run the service one should simply\n```bash\nrepo_root/gateway_service$ docker run --network host -v $PWD:/project maas/v2/gateway_service:latest\n```\n\n## Communication schema\nCommunication should be implemented via REST.\n\n### Object detection\nRequest:\n```\nMethod: POST\nPort: 50000\nPath: /maas_workshop/v2/gateway/detect_objects\nBody: (form-data)\n \"image\": bytes of jpeg image\n```\n\nResponse:\n```json\n{\n \"detected_objects\": [\n {\n \"bbox\": {\n \"left_top\": [0, 0],\n \"right_bottom\": [100, 100]\n },\n \"confidence\": 0.87,\n \"label\": 1,\n \"class_name\": \"person\"\n }\n ]\n}\n```\n\n### Face detection - initialize job\nRequest:\n```\nMethod: POST\nPort: 50000\nPath: /maas_workshop/v2/gateway/detect_faces\nBody: (form-data)\n \"image\": bytes of jpeg image\n \"login\" user login (str)\n```\nResponse:\n```json\n{\n \"login\": \"requester_login\",\n \"resource_identifier\": \"uuid\"\n}\n```\n\n\n### Face detection - fetch results\nRequest:\n```\nMethod: GET\nPath: /maas_workshop/v2/gateway/detect_faces\nBody: (form-data)\n \"login\" user login (str)\n \"resource_identifier\" uuid (str)\n```\nResponse:\n```json\n{\n \"inference_results\": [{\n \"bounding_box\": {\n \"left_top\": [0, 0],\n \"right_bottom\": [100, 100]\n },\n \"confidence\": 0.78,\n \"landmarks\": [[0, 0], [100, 100]]\n }]\n}\n```"
},
{
"alpha_fraction": 0.6313912272453308,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 19.0238094329834,
"blob_id": "51b6ff9620643b67c7bb8c3e3504ab986950cb5e",
"content_id": "61ba9dfed5e31574660d9d984eaf438b489c4c74",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 841,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 42,
"path": "/object_detection_service/README.md",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "# Model as a Service - object detection service\n\n## Environment build\nTo build object detection service simply run:\n```bash\nrepo_root/object_detection_service$ docker build -t maas/v2/object_detection_service . \n```\n\n## Service run\nTo run the service one should simply\n```bash\nrepo_root/object_detection_service$ docker run --network host -v $PWD:/project maas/v2/object_detection_service:latest\n```\n\n## Communication schema\nCommunication should be implemented via REST.\n\nRequest:\n```\nMethod: POST\nPort: 50001\nPath: /maas_workshop/v2/object_detection/detect\nBody: (form-data)\n \"image\": bytes of jpeg image\n```\n\nResponse:\n```json\n{\n \"detected_objects\": [\n {\n \"bbox\": {\n \"left_top\": [0, 0],\n \"right_bottom\": [100, 100]\n },\n \"confidence\": 0.87,\n \"label\": 1,\n \"class_name\": \"person\"\n }\n ]\n}\n```\n"
},
{
"alpha_fraction": 0.6502617597579956,
"alphanum_fraction": 0.6544502377510071,
"avg_line_length": 26.285715103149414,
"blob_id": "f32349690cbb5c08bc8e82cde4e79aa4c320b704",
"content_id": "67d0fc43c90e483a882666bbc44e09f55dd25a4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1910,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 70,
"path": "/gateway_service/gateway_service/app.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "import logging\nimport time\nfrom threading import Thread\nfrom typing import Tuple\n\nimport pika\nfrom flask import Flask\nfrom flask_restful import Api\n\nfrom .handlers.face_detection import FaceDetection\nfrom .handlers.object_detection import ObjectDetection\nfrom .config import RABBIT_PASSWORD, RABBIT_USER, RABBIT_PORT, RABBIT_HOST, \\\n PORT, FACE_DETECTION_CHANNEL, GATEWAY_API_BASE_PATH\n\nlogging.getLogger().setLevel(logging.INFO)\n\napp = Flask(__name__)\napp.config['PROPAGATE_EXCEPTIONS'] = True\nSTOP_HEARTBEAT = False\n\n\ndef keep_rabbit_connection_online(connection: pika.BlockingConnection) -> None:\n while not STOP_HEARTBEAT:\n logging.info(\"RabbitMQ heartbeat.\")\n connection.process_data_events()\n time.sleep(30)\n\n\ndef create_api() -> Tuple[Api, Thread]:\n api = Api(app)\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(\n host=RABBIT_HOST,\n port=RABBIT_PORT,\n credentials=pika.PlainCredentials(\n username=RABBIT_USER,\n password=RABBIT_PASSWORD\n ),\n heartbeat=60\n )\n )\n channel = connection.channel()\n channel.queue_declare(queue=FACE_DETECTION_CHANNEL)\n api.add_resource(\n FaceDetection,\n construct_api_url('detect_faces'),\n resource_class_kwargs={\n 'rabbit_channel': channel\n }\n )\n api.add_resource(\n ObjectDetection,\n construct_api_url('detect_objects')\n )\n heartbeat = Thread(target=keep_rabbit_connection_online, args=(connection, ))\n heartbeat.start()\n return api, heartbeat\n\n\ndef construct_api_url(url_postfix: str) -> str:\n return f\"{GATEWAY_API_BASE_PATH}/{url_postfix}\"\n\n\nif __name__ == '__main__':\n api, heartbeat = create_api()\n try:\n app.run(host='0.0.0.0', port=PORT)\n except KeyboardInterrupt:\n STOP_HEARTBEAT = True\n heartbeat.join()\n"
},
{
"alpha_fraction": 0.7436762452125549,
"alphanum_fraction": 0.7571669220924377,
"avg_line_length": 28.700000762939453,
"blob_id": "a45a319788518e1e0ad66eb79e269ad9fab6f583",
"content_id": "4db3a56a9a28d918dc58b1e0ffb738097b4346c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 593,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 20,
"path": "/face_detection_service/Dockerfile",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "FROM python:3.9\n\nRUN apt-get update && apt-get install -y build-essential && \\\n apt-get install -y python3-dev python3-opencv\n\nRUN mkdir /build\nRUN mkdir /project\n\nCOPY ./requirements.txt build/requirements.txt\nRUN python -m pip install -r build/requirements.txt\nRUN rm -r build\n\nRUN mkdir /weights\nRUN wget --no-check-certificate 'https://github.com/PawelPeczek/RetinaFaceNet/releases/download/v1.0/FaceNet_resnet_50.pth' \\\n -O /weights/weights.pth\n\nCOPY ./face_detection_service /project/face_detection_service\nWORKDIR project\n\nENTRYPOINT [\"python\", \"-m\", \"face_detection_service.app\"]"
},
{
"alpha_fraction": 0.6611570119857788,
"alphanum_fraction": 0.7471074461936951,
"avg_line_length": 49.41666793823242,
"blob_id": "fedc03a06c846adbe514f430ca4a5b0b31ed3a29",
"content_id": "2a72a0566e11b5c0e71d8fafee709dbce02a65ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 605,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 12,
"path": "/gateway_service/gateway_service/config.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "import os\n\nPORT = 50000\nFACE_DETECTION_CHANNEL = \"face_detection_channel\"\nRABBIT_HOST = os.getenv(\"RABBIT_HOST\", \"127.0.0.1\")\nRABBIT_PORT = 5672\nRABBIT_USER = os.getenv(\"RABBIT_USER\", \"guest\")\nRABBIT_PASSWORD = os.getenv(\"RABBIT_PASSWORD\", \"guest\")\nOBJECT_DETECTION_URL = \"http://127.0.0.1:50001/maas_workshop/v2/object_detection/detect\"\nINPUT_IMAGE_INGEST_URL = \"http://127.0.0.1:50002/maas_workshop/v2/resources_manager/input_image_register\"\nFETCH_RESULTS_URL = \"http://127.0.0.1:50002/maas_workshop/v2/resources_manager/face_detection_register\"\nGATEWAY_API_BASE_PATH = \"/maas_workshop/v2/gateway\"\n"
},
{
"alpha_fraction": 0.7710084319114685,
"alphanum_fraction": 0.7710084319114685,
"avg_line_length": 19.69565200805664,
"blob_id": "6c7ddc2a27b74ec38f1cf50b91e00ed0b9486ba4",
"content_id": "1d7a6dd921f431fb6d75b9373485743bcc5e0024",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 476,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 23,
"path": "/object_detection_service/object_detection_service/entities.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "from dataclasses import dataclass\nfrom typing import Tuple, List\n\nfrom dataclasses_json import DataClassJsonMixin\n\n\n@dataclass(frozen=True)\nclass BoundingBox(DataClassJsonMixin):\n left_top: Tuple[int, int]\n right_bottom: Tuple[int, int]\n\n\n@dataclass\nclass DetectedObject(DataClassJsonMixin):\n bbox: BoundingBox\n confidence: float\n label: int\n class_name: str\n\n\n@dataclass\nclass DetectedObjects(DataClassJsonMixin):\n detected_objects: List[DetectedObject]\n"
},
{
"alpha_fraction": 0.625,
"alphanum_fraction": 0.6313559412956238,
"avg_line_length": 37.27027130126953,
"blob_id": "f907ea354506e4a1e4bf3133b95d1976def55324",
"content_id": "32228bf68ae3523e55bd03817a4809b69a470b79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2832,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 74,
"path": "/resources_manager_service/resources_manager_service/handlers/face_detection_register.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "import json\nimport os\nfrom json.decoder import JSONDecodeError\n\nfrom flask import Response, make_response\nfrom flask_restful import Resource\nfrom flask_restful.reqparse import RequestParser\n\nfrom .utils import safe_load_json, initialize_request_parser, \\\n build_base_resource_path, persist_json_result\nfrom .config import LOGIN_FIELD_NAME, RESOURCE_IDENTIFIER_FIELD_NAME\nfrom ..config import PERSISTENCE_DIR\n\n\nFACE_DETECTION_FILE_NAME = \"face_detection.json\"\nFACE_DETECTION_RESULTS_FIELD = \"face_detection_results\"\n\n\nclass FaceDetectionRegister(Resource):\n\n def __init__(self):\n self.__get_request_parser = initialize_request_parser()\n self.__post_request_parser = self.__initialize_post_request_parser()\n\n def get(self) -> Response:\n data = self.__get_request_parser.parse_args()\n requester_login = data[LOGIN_FIELD_NAME]\n resource_identifier = data[RESOURCE_IDENTIFIER_FIELD_NAME]\n resource_path = os.path.join(\n PERSISTENCE_DIR, requester_login, resource_identifier, FACE_DETECTION_FILE_NAME\n )\n if not os.path.isdir(os.path.dirname(resource_path)):\n return make_response(\n {'msg': 'Incorrect resource identifiers.'}, 500\n )\n if not os.path.isfile(resource_path):\n return make_response({\"status\": \"in_progress\"}, 200)\n resource = safe_load_json(resource_path)\n return make_response(resource, 200)\n\n def post(self) -> Response:\n data = self.__post_request_parser.parse_args()\n requester_login = data[LOGIN_FIELD_NAME]\n resource_identifier = data[RESOURCE_IDENTIFIER_FIELD_NAME]\n target_path = os.path.join(\n build_base_resource_path(\n requester_login=requester_login,\n resource_identifier=resource_identifier\n ),\n FACE_DETECTION_FILE_NAME\n )\n if not os.path.isdir(os.path.dirname(target_path)):\n return make_response(\n {'msg': 'Wrong resource identifier or requester login'}, 500\n )\n try:\n content = json.loads(data[FACE_DETECTION_RESULTS_FIELD])\n persist_json_result(target_path=target_path, content=content)\n return make_response({\"msg\": \"OK\"}, 200)\n except (JSONDecodeError, KeyError):\n return make_response(\n {'msg': f'Input in {FACE_DETECTION_RESULTS_FIELD} is not JSON'},\n 400\n )\n\n def __initialize_post_request_parser(self) -> RequestParser:\n parser = initialize_request_parser()\n parser.add_argument(\n FACE_DETECTION_RESULTS_FIELD,\n help=f'Field \"{FACE_DETECTION_RESULTS_FIELD}\" must '\n 'be specified in this request.',\n required=True\n )\n return parser\n"
},
{
"alpha_fraction": 0.5391539335250854,
"alphanum_fraction": 0.541854202747345,
"avg_line_length": 30.742856979370117,
"blob_id": "cc15525a0f0c34b353d1c48ee4f3c83ef002c559",
"content_id": "01419214ba6c31ba3dae901393dae49bd5348efe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1111,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 35,
"path": "/gateway_service/gateway_service/handlers/proxy.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "import requests\nfrom flask import request, Response, make_response\nfrom flask_restful import Resource\n\n\nclass Proxy(Resource):\n\n def __init__(self):\n super().__init__()\n self.__excluded_headers = [\n 'content-encoding', 'content-length',\n 'transfer-encoding', 'connection'\n ]\n\n def _forward_message(self, target_url: str) -> Response:\n headers = {\n key: value for (key, value) in request.headers if key != 'Host'\n }\n try:\n resp = requests.request(\n method=request.method,\n url=target_url,\n headers=headers,\n data=request.get_data(),\n allow_redirects=False,\n verify=False\n )\n except Exception:\n return make_response({'msg': 'Internal error'}, 500)\n headers = [\n (name, value) for (name, value) in resp.raw.headers.items()\n if name.lower() not in self.__excluded_headers\n ]\n response = Response(resp.content, resp.status_code, headers)\n return response\n"
},
{
"alpha_fraction": 0.6311713457107544,
"alphanum_fraction": 0.6369796991348267,
"avg_line_length": 31.25,
"blob_id": "e4b9268dd76d8a89fa8f4a68f2b3a79a59f563f3",
"content_id": "bbd71551723c096323d1c8c1e1ba70e6487a2878",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1033,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 32,
"path": "/gateway_service/gateway_service/handlers/face_detection.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "import json\n\nfrom flask import Response, make_response\nfrom pika.channel import Channel\n\nfrom ..config import INPUT_IMAGE_INGEST_URL, FACE_DETECTION_CHANNEL, \\\n FETCH_RESULTS_URL\nfrom .proxy import Proxy\n\n\nclass FaceDetection(Proxy):\n\n def __init__(self, rabbit_channel: Channel):\n super().__init__()\n self.__rabbit_channel = rabbit_channel\n\n def post(self) -> Response:\n registration_response = self._forward_message(target_url=INPUT_IMAGE_INGEST_URL)\n if registration_response.status_code != 200:\n return registration_response\n try:\n self.__rabbit_channel.basic_publish(\n exchange=\"\",\n routing_key=FACE_DETECTION_CHANNEL,\n body=json.dumps(registration_response.json)\n )\n except Exception as e:\n return make_response({\"msg\": f\"Internal error, {e}\"}, 500)\n return registration_response\n\n def get(self) -> Response:\n return self._forward_message(target_url=FETCH_RESULTS_URL)\n\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6940639019012451,
"avg_line_length": 26.375,
"blob_id": "a9462c3f6890cf5990663e323269a7c14ced8337",
"content_id": "55af2af612baa5a4b10a57ab3f22a917adad098c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 219,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 8,
"path": "/resources_manager_service/resources_manager_service/config.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "import os\n\nBASE_RESOURCE_PATH = '/maas_workshop/v2/resources_manager'\nPORT = 50002\nPERSISTENCE_DIR = os.path.abspath(os.path.join(\n os.path.dirname(__file__), \"..\", \"storage\"\n))\nINPUT_IMAGE_NAME = \"input_image.jpeg\"\n"
},
{
"alpha_fraction": 0.6714975833892822,
"alphanum_fraction": 0.772946834564209,
"avg_line_length": 17.81818199157715,
"blob_id": "fba0f29fb263e4e676cb253e4e017fcff56c28c1",
"content_id": "99f3aeebfde2a56c3f97c9f0aaaa69c329c31528",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 207,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 11,
"path": "/playground/requirements.txt",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "torch==1.8.0\ntorchvision==0.9.0\ndataclasses-json==0.5.2\nnumpy==1.20.1\nopencv-python==4.5.1.48\nrequests\ntqdm\nmatplotlib==3.3.4\nipykernel\nipywidgets\ngit+https://github.com/PawelPeczek/RetinaFaceNet.git@master\n"
},
{
"alpha_fraction": 0.6961805820465088,
"alphanum_fraction": 0.7413194179534912,
"avg_line_length": 43.30769348144531,
"blob_id": "d9272a4e7f597654aaaa430ec18ad218e746de28",
"content_id": "58fdbbca8e7c116bb9a975251266146500b68a5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 576,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 13,
"path": "/face_detection_service/face_detection_service/config.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "import os\n\nFACE_DETECTION_CHANNEL = \"face_detection_channel\"\nRABBIT_HOST = os.getenv(\"RABBIT_HOST\", \"127.0.0.1\")\nRABBIT_PORT = 5672\nRABBIT_USER = os.getenv(\"RABBIT_USER\", \"guest\")\nRABBIT_PASSWORD = os.getenv(\"RABBIT_PASSWORD\", \"guest\")\nRESOURCE_MANAGER_BASE_URI = \"http://127.0.0.1:50002/maas_workshop/v2/resources_manager/\"\nINPUT_IMAGE_FETCHING_URI = f\"{RESOURCE_MANAGER_BASE_URI}input_image_register\"\nRESULT_POSTING_URI = f\"{RESOURCE_MANAGER_BASE_URI}face_detection_register\"\nWEIGHTS_PATH = os.path.join(\"/weights\", \"weights.pth\")\nTOP_K = 50\nCONFIDENCE_THRESHOLD = 0.2\n"
},
{
"alpha_fraction": 0.6578947305679321,
"alphanum_fraction": 0.6698564887046814,
"avg_line_length": 25.125,
"blob_id": "d032cdc89a8e42e97e88b02df858bd6c19938f0b",
"content_id": "38ddc66758c9c2ddb2e4f54300c531606a1fa0fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 418,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 16,
"path": "/object_detection_service/object_detection_service/utils.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "from typing import Union, List\n\nimport numpy as np\nimport cv2 as cv\nimport torch\n\n\ndef image_from_str(raw_image: str) -> np.ndarray:\n data = np.fromstring(raw_image, dtype=np.uint8)\n return cv.imdecode(data, cv.IMREAD_COLOR)\n\n\ndef to_chw_tensor(x: Union[np.ndarray, List[np.ndarray]]) -> torch.Tensor:\n if type(x) is not list:\n x = [x]\n return torch.Tensor([e.transpose(2, 0, 1).copy() for e in x])\n"
},
{
"alpha_fraction": 0.6298269033432007,
"alphanum_fraction": 0.6378162503242493,
"avg_line_length": 22.46875,
"blob_id": "df1b34095718252d9bbca3ff1cb51e0e6ac510f0",
"content_id": "063695f257c6b824e09850d8ac6e9cdcb9f7c4a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 751,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 32,
"path": "/object_detection_service/object_detection_service/app.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "import torchvision\nfrom flask import Flask\nfrom flask_restful import Api\n\nfrom .config import \\\n OBJECT_DETECTION_PATH, CONFIDENCE_THRESHOLD, MAX_IMAGE_DIM, PORT\nfrom .handlers.object_detection import \\\n ObjectDetection\n\napp = Flask(__name__)\n\n\ndef create_api() -> Api:\n api = Api(app)\n model = torchvision.models.detection.retinanet_resnet50_fpn(pretrained=True)\n model.eval()\n api.add_resource(\n ObjectDetection,\n OBJECT_DETECTION_PATH,\n resource_class_kwargs={\n 'model': model,\n 'confidence_threshold': CONFIDENCE_THRESHOLD,\n 'max_image_dim': MAX_IMAGE_DIM\n }\n )\n return api\n\n\napi = create_api()\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=PORT)\n"
},
{
"alpha_fraction": 0.6782945990562439,
"alphanum_fraction": 0.6834625601768494,
"avg_line_length": 23.967741012573242,
"blob_id": "f5f7f66e44658e03ab790884e7c21a87d402913d",
"content_id": "a7e3f2baafe8dcdb499789487ea893daa86030cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 774,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 31,
"path": "/resources_manager_service/resources_manager_service/app.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "from flask import Flask\nfrom flask_restful import Api\n\nfrom .handlers.input_image_register import InputImageRegister\nfrom .handlers.face_detection_register import FaceDetectionRegister\nfrom .config import BASE_RESOURCE_PATH, PORT\n\napp = Flask(__name__)\napp.config['PROPAGATE_EXCEPTIONS'] = True\n\n\ndef create_api() -> Api:\n api = Api(app)\n api.add_resource(\n InputImageRegister,\n construct_api_url('input_image_register')\n )\n api.add_resource(\n FaceDetectionRegister,\n construct_api_url('face_detection_register')\n )\n return api\n\n\ndef construct_api_url(resource_postfix: str) -> str:\n return f'{BASE_RESOURCE_PATH}/{resource_postfix}'\n\n\nif __name__ == '__main__':\n api = create_api()\n app.run(host='0.0.0.0', port=PORT)\n"
},
{
"alpha_fraction": 0.7804877758026123,
"alphanum_fraction": 0.7804877758026123,
"avg_line_length": 40,
"blob_id": "c7263a359642411772d09760919cea06201d3896",
"content_id": "ae96d7b7ab7cf7c0264f36f25b024a399275442e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 2,
"path": "/resources_manager_service/resources_manager_service/handlers/config.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "LOGIN_FIELD_NAME = \"login\"\nRESOURCE_IDENTIFIER_FIELD_NAME = \"resource_identifier\"\n"
},
{
"alpha_fraction": 0.7417942881584167,
"alphanum_fraction": 0.7505470514297485,
"avg_line_length": 24.38888931274414,
"blob_id": "0d5db3e4871d23a6db916457e121df9337ede666",
"content_id": "178ef7c58eb0e3e92c9d25d1f537b72ff5084bfe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 457,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 18,
"path": "/object_detection_service/Dockerfile",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "FROM python:3.9\n\nRUN apt-get update && apt-get install -y build-essential && \\\n apt-get install -y python3-dev python3-opencv\n\nRUN mkdir /build\n\nCOPY ./requirements.txt build/requirements.txt\nCOPY ./cache_weights.py build/cache_weights.py\nRUN python -m pip install -r build/requirements.txt\nRUN python build/cache_weights.py\nRUN rm -r build\n\nRUN mkdir /project\nCOPY . /project\nWORKDIR project\n\nENTRYPOINT [\"python\", \"-m\", \"object_detection_service.app\"]\n"
},
{
"alpha_fraction": 0.7001140117645264,
"alphanum_fraction": 0.7103762626647949,
"avg_line_length": 31.518518447875977,
"blob_id": "a61999d2796ea1f384ae92cd2a655450488108df",
"content_id": "f7f29471d6ad114b1d4b8f5ce811896c8866a31c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 877,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 27,
"path": "/playground/Dockerfile",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "FROM continuumio/miniconda\n\nRUN apt-get update && apt-get install -y build-essential && \\\n apt install -y python3 python3-dev python3-opencv\nRUN python -m pip install jupyter\nRUN mkdir /build\nCOPY ./requirements.txt build/requirements.txt\nRUN conda create -n Playground python=3.9 -y && \\\n . ~/.bashrc && \\\n conda activate Playground && \\\n python -m pip install -r build/requirements.txt && \\\n python -m ipykernel install --name \"Playground\" --user &&\\\n conda deactivate\nRUN rm -r build\nRUN mkdir /project\nCOPY . /project\nWORKDIR project\n\nRUN . ~/.bashrc && \\\n conda activate Playground && \\\n python -m playground.cache_weights\n\nRUN mkdir /weights\nRUN wget --no-check-certificate 'https://github.com/PawelPeczek/RetinaFaceNet/releases/download/v1.0/FaceNet_resnet_50.pth' \\\n -O /weights/weights.pth\n\nENTRYPOINT [\"jupyter\", \"notebook\", \"--allow-root\"]"
},
{
"alpha_fraction": 0.649235725402832,
"alphanum_fraction": 0.649235725402832,
"avg_line_length": 24.367347717285156,
"blob_id": "de7487798af028060ba838d6471d164e653138ae",
"content_id": "db97e4cc64259c31a9f53a0906d11898a2022662",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1243,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 49,
"path": "/resources_manager_service/resources_manager_service/handlers/utils.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "import json\nimport os\nfrom typing import Optional\n\nfrom flask_restful import reqparse\n\nfrom .config import LOGIN_FIELD_NAME, RESOURCE_IDENTIFIER_FIELD_NAME\nfrom ..config import PERSISTENCE_DIR\n\n\ndef persist_json_result(target_path: str, content: dict) -> None:\n with open(target_path, \"w\") as f:\n json.dump(content, f)\n\n\ndef safe_load_json(path: str) -> Optional[dict]:\n try:\n with open(path, \"r\") as f:\n return json.load(f)\n except Exception:\n return None\n\n\ndef initialize_request_parser(\n include_resource_identifier: bool = True\n) -> reqparse.RequestParser:\n parser = reqparse.RequestParser()\n parser.add_argument(\n LOGIN_FIELD_NAME,\n help='Field \"login\" must be specified in this request.',\n required=True\n )\n if include_resource_identifier:\n parser.add_argument(\n RESOURCE_IDENTIFIER_FIELD_NAME,\n help='Field \"resource_identifier\" must '\n 'be specified in this request.',\n required=True\n )\n return parser\n\n\ndef build_base_resource_path(\n requester_login: str,\n resource_identifier: str\n) -> str:\n return os.path.join(\n PERSISTENCE_DIR, requester_login, resource_identifier\n )\n"
},
{
"alpha_fraction": 0.6538461446762085,
"alphanum_fraction": 0.6648351550102234,
"avg_line_length": 29.33333396911621,
"blob_id": "45c597185c7c644d9d34aeeef2f1442f4b0a981a",
"content_id": "de13a5f7548e2d2867bff21b558673f58bff1ddd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 182,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 6,
"path": "/object_detection_service/cache_weights.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "import torchvision\n\nif __name__ == '__main__':\n print(\"Model weights fetching...\")\n _ = torchvision.models.detection.retinanet_resnet50_fpn(pretrained=True)\n print(\"Done.\")\n"
},
{
"alpha_fraction": 0.6205263137817383,
"alphanum_fraction": 0.6431578993797302,
"avg_line_length": 19.010526657104492,
"blob_id": "d2cf1351abe4d4e2a9b82c156119e762476501e6",
"content_id": "b143cff0fd97aa8b8b6483faae7ce6039f319bac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1900,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 95,
"path": "/resources_manager_service/README.md",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "# Resource manager service\n\n## Service build\nTo build face detection service simply run:\n```bash\nrepo_root/resources_manager_service$ docker build -t maas/v2/resource_manager_service .\n```\n\n## Service run\nTo run the service one should simply\n```bash\nrepo_root/resources_manager_service$ docker run --network host -v $PWD:/project maas/v2/resource_manager_service:latest\n```\n\n## Communication schema\nCommunication should be implemented via REST.\n\n### Input image registration\nRequest:\n```\nMethod: POST\nPort: 50002\nPath: /maas_workshop/v2/resources_manager/input_image_register\nBody: (form-data)\n \"image\": bytes of jpeg image\n \"login\" user login (str)\n```\nResponse:\n```json\n{\n \"login\": \"requester_login\",\n \"resource_identifier\": \"uuid\"\n}\n```\n\n### Input image fetching\nRequest:\n```\nMethod: GET\nPort: 50002\nPath: /maas_workshop/v2/resources_manager/input_image_register\nBody: (form-data)\n \"resource_identifier\": uuid (str)\n \"login\" user login (str)\n```\nResponse:\n```\nBytes - jpeg image\n```\n\n### Face detection result registration\nRequest:\n```\nMethod: POST\nPort: 50002\nPath: /maas_workshop/v2/resources_manager/face_detection_register\nBody: (form-data)\n \"resource_identifier\": uuid (str)\n \"login\" user login (str)\n \"face_detection_results\" json of format returned by face detection service as text\n```\nResponse:\n```json\n{\n \"msg\": \"OK\"\n}\n```\n\n### Face detection result fetching\nRequest:\n```\nMethod: GET\nPort: 50002\nPath: /maas_workshop/v2/resources_manager/face_detection_register\nBody: (form-data)\n \"resource_identifier\": uuid (str)\n \"login\" user login (str)\n```\nResponse:\n```json\n{\n \"inference_results\": [\n {\n \"bounding_box\": {\n \"left_top\": [0, 0],\n \"right_bottom\": [100, 100]\n },\n \"confidence\": 0.78,\n \"landmarks\": [\n [0, 0], [10, 10]\n ]\n }\n ]\n}\n```"
},
{
"alpha_fraction": 0.7473065853118896,
"alphanum_fraction": 0.7639569044113159,
"avg_line_length": 29.878787994384766,
"blob_id": "7ee26047243db9e36c02ff68f43e078d20408623",
"content_id": "c27c7236f1328b367891c549e90aabaafe07c492",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1021,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 33,
"path": "/README.md",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "# Model as a Service - DL model deployment workshop\n\n## Prerequisites\n* Operating system: Linux - preferable Ubuntu. Windows support - experimental\n* Docker (recommended version: ``19.03``)\n* docker-compose (recommended version: ``1.28.5``)\n* Postman app\n\n## Environment setup\nOne should execute two commands before workshop to make sure everything works (in context of ``master`` branch).\n\nFirst one to build services:\n```bash\nrespository_root$ docker-compose build\n```\n\nSecond one to build playground: [here](./playground/README.md)\n\nIn case of any issues please contact me via official conference channel.\n\n## Running services\n```bash\nrespository_root$ docker-compose up\n```\n\n## Postman docs\nOne may make use of [Postman docs](./postman_docs/ModelAsAServiceV2.postman_collection.json).\n\n## Workshop instruction\nOne may find instruction [here](https://docs.google.com/presentation/d/1evTpBg3eFMDigZlPdehKCJJ-cE78ehIHLciK3u7-IY4/edit?usp=sharing)\n\n## Services overview\n\n\n\n"
},
{
"alpha_fraction": 0.694656491279602,
"alphanum_fraction": 0.7014418840408325,
"avg_line_length": 27.7560977935791,
"blob_id": "8a4385341b33e97e219bc67b71ecc9b52053fbff",
"content_id": "a355abdddcf24dce19d69eba23b57ecdf73b5ab5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1179,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 41,
"path": "/face_detection_service/face_detection_service/communication.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "import json\n\nimport numpy as np\nimport cv2 as cv\nimport requests\n\nfrom .config import INPUT_IMAGE_FETCHING_URI, RESULT_POSTING_URI\n\nLOGIN_FIELD = \"login\"\nRESOURCE_IDENTIFIER_FIELD = \"resource_identifier\"\nFACE_DETECTION_RESULTS_FIELD = \"face_detection_results\"\n\n\ndef fetch_processing_input(\n requester_login: str,\n request_identifier: str\n) -> np.ndarray:\n payload = {\n LOGIN_FIELD: requester_login,\n RESOURCE_IDENTIFIER_FIELD: request_identifier\n }\n response = requests.get(INPUT_IMAGE_FETCHING_URI, data=payload)\n if response.status_code != 200:\n raise RuntimeError(\"Could not process request\")\n data = np.fromstring(response.content, dtype=np.uint8)\n return cv.imdecode(data, cv.IMREAD_COLOR)\n\n\ndef register_results(\n requester_login: str,\n request_identifier: str,\n results: dict\n) -> None:\n payload = {\n LOGIN_FIELD: requester_login,\n RESOURCE_IDENTIFIER_FIELD: request_identifier,\n FACE_DETECTION_RESULTS_FIELD: json.dumps(results)\n }\n response = requests.post(RESULT_POSTING_URI, data=payload)\n if response.status_code != 200:\n raise RuntimeError(\"Could not send back results.\")\n"
},
{
"alpha_fraction": 0.460317462682724,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 14.75,
"blob_id": "20cd71cde178ef22bed2802778830abb61e72c3a",
"content_id": "94c757fed71e31dbc6cf0513a0b18631816545a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 63,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 4,
"path": "/gateway_service/requirements.txt",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "Flask==1.1.1\nFlask-RESTful==0.3.7\nrequests==2.22.0\npika==1.2.0\n"
},
{
"alpha_fraction": 0.5118110179901123,
"alphanum_fraction": 0.7007874250411987,
"avg_line_length": 17.285715103149414,
"blob_id": "56245bbbc4ff4aa091a13fe2c85168a76fea51ef",
"content_id": "83329f61520eb72a44455b647fb1145e6df31762",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 127,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 7,
"path": "/object_detection_service/requirements.txt",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "torch==1.8.0\ntorchvision==0.9.0\ndataclasses-json==0.5.2\nnumpy==1.20.1\nopencv-python==4.5.1.48\nFlask==1.1.1\nFlask-RESTful==0.3.7"
},
{
"alpha_fraction": 0.747826099395752,
"alphanum_fraction": 0.747826099395752,
"avg_line_length": 22,
"blob_id": "41f0ff5e9288176b952f67eca38a05eea67ec471",
"content_id": "ee7413bb2d56f5496691e427cf303f2361e0809e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 230,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 10,
"path": "/gateway_service/gateway_service/handlers/object_detection.py",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "from flask import Response\n\nfrom ..config import OBJECT_DETECTION_URL\nfrom .proxy import Proxy\n\n\nclass ObjectDetection(Proxy):\n\n def post(self) -> Response:\n return self._forward_message(target_url=OBJECT_DETECTION_URL)\n"
},
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.7554945349693298,
"avg_line_length": 23.33333396911621,
"blob_id": "e9341111328f9d1212c2bbaed95bb125ebc66ece",
"content_id": "743756f8ca898b56cb6186d68a970bb4bfd6a6d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 364,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 15,
"path": "/playground/README.md",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "# Model as a Service - playground\n\nHere you may find comfortable playground to test staff while workshop.\n\n## Environment build\nTo build playground simply run:\n```bash\nrepo_root/playground$ docker build -t maas/v2/playground . \n```\n\n## Environment run\nTo run playground use the following command:\n```bash\ndocker run --network host -it maas/v2/playground:latest\n```"
},
{
"alpha_fraction": 0.5917431116104126,
"alphanum_fraction": 0.6091743111610413,
"avg_line_length": 23.772727966308594,
"blob_id": "8cb7ca929cb33e85092cf42c6a6acaa2031132f0",
"content_id": "ab3837d911770ca64e045d8c3921b976b2110cde",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1090,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 44,
"path": "/face_detection_service/README.md",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "# Model as a Service - face detection service\n\n## Service build\nTo build face detection service simply run:\n```bash\nrepo_root/face_detection_service$ docker build -t maas/v2/face_detection_service .\n```\n\n## Service run\nTo run the service one should simply\n```bash\nrepo_root/face_detection_service$ docker run --network host -v $PWD:/project maas/v2/face_detection_service:latest\n```\n\n## Communication schema\n```\nRabbitMQ message:\n Queue name: face_detection_channel\n Messages format: serialized json\n {\n \"login\": \"user_login\",\n \"resource_identifier\": \"uuid\"\n }\n```\nData provided in message can be used to fetch image from resource manager service.\nDesired outcome:\n```json\n{\n \"inference_results\": [\n {\n \"bounding_box\": {\n \"left_top\": [0, 0],\n \"right_bottom\": [100, 100]\n },\n \"confidence\": 0.78,\n \"landmarks\": [\n [0, 0], [10, 10]\n ]\n }\n ]\n}\n```\nwhich describe face detection results. The json file should be posted to \nresource manager service.\n"
},
{
"alpha_fraction": 0.7430830001831055,
"alphanum_fraction": 0.7509881258010864,
"avg_line_length": 18.538461685180664,
"blob_id": "1003165c5a1c3db6cede3896e6124ee9edeb2e87",
"content_id": "755f95d6352172d113d0b5e040c569fef9e031e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 253,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 13,
"path": "/gateway_service/Dockerfile",
"repo_name": "PawelPeczek/ModelAsAServiceV2",
"src_encoding": "UTF-8",
"text": "FROM python:3.9\n\nRUN mkdir /build\nRUN mkdir /project\n\nCOPY ./requirements.txt build/requirements.txt\nRUN python -m pip install -r build/requirements.txt\nRUN rm -r build\n\nCOPY . /project\nWORKDIR project\n\nENTRYPOINT [\"python\", \"-m\", \"gateway_service.app\"]"
}
] | 32 |
vickychun/myproj | https://github.com/vickychun/myproj | 997cf3ca9751811723a63435ed7d51c63092b7c7 | a3408537a392e46e4f8c4ae986c928cc9905ff36 | f5c6b16b3e9e4358f122e0269d4a8a203b66c9fe | refs/heads/master | 2020-03-11T23:48:10.140973 | 2018-05-11T09:22:09 | 2018-05-11T09:22:09 | 130,332,108 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4931102395057678,
"alphanum_fraction": 0.5767716765403748,
"avg_line_length": 14.630768775939941,
"blob_id": "7495dedb10da1ab92e359b567855a727552c0647",
"content_id": "b295acdb0cd5d7ec35bb0cd5ee7b301f05e1b264",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1080,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 65,
"path": "/Untitled.py",
"repo_name": "vickychun/myproj",
"src_encoding": "UTF-8",
"text": "\n# coding: utf-8\n\nstr1 = \"I am learning programming\"\nprint(str1)\nprint(len(str1))\n\nprint(str1[5:])\nprint(str1[::-1])\n#只有python才有\n\nprint(\"{0} {1}\".format(100,200))\nprint(\"num1: {0} num2: {1}\".format(1,2))\nprint(\"{0:10}{1:10}\".format(200,300))\nprint(\"{0:<7}{1:<7}{2:<7}\".format(123,456,789))\n\n#5/11\nx = [2,3,4]\nfor i in x:\n print(i, end=\" \")\nprint()\nfor i in range(len(x)):\n print(x[i], end=\" \")\nprint()\n\nfor idx, i in enumerate(x):\n print(idx+1,i,sep=\":\")\nprint()\n\nx.append(5)\nprint(x)\n\ny=[1,2]\nz=x+y\nprint(x,y,z)\n#加完是一個全新的list\n\nz=x\nz[0]=99\nprint(x,z,sep=\"\\n\")\n#\"z=x\"代表兩者的起始位子一樣,x也會跟著一起被改\n\nx.append(y)\nprint(x)\n# y is a list\n\nprint(x[ len(x)-1 ])\nprint(x[-1])\nx[2:3]=[90,91,92]\nprint(x)\n\ndef prList(arr):\n for idx,el in enumerate(arr):\n if (idx!=len(arr)-1):\n print(el,end=\",\")\n else:\n print(el)\na = [10,20,30,40,50]\nprList(a)\n\ndef enumList(arr):\n for idx,el in enumerate(arr):\n print(idx+1,el,sep=\". \")\n # print(\"{}. {}\".format(idx+1,el))\nb = ['apple','orange','banana'] \nenumList(b)"
},
{
"alpha_fraction": 0.6416382193565369,
"alphanum_fraction": 0.6894198060035706,
"avg_line_length": 12.904762268066406,
"blob_id": "d255300b41c2c20a098ca100a83482b2412e2fb4",
"content_id": "ddfe6e82bb03e48020849d931af25b52e9d5081f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 361,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 21,
"path": "/list_range.py",
"repo_name": "vickychun/myproj",
"src_encoding": "UTF-8",
"text": "nums = list(range(5,0,-1))\nprint(nums)\nnums[2]=100\n#range本身裡面其實沒有定義中括號\nprint(nums)\n\na=[1,2,3]\ntotal=0\nfor el in a:\n total+=el\n#sum 可以做list的運算\ntotal2=sum(a)\nprint(total,total2)\n\nstrA = \"Stone\"\nstrA = list(strA)\n#轉成list\nstrA[0]=\"Y\"\nprint(strA)\nprint(','.join(strA))\n#將list裡的東西用特定符號連接成string\n\n"
}
] | 2 |
jonathaneunice/simplere | https://github.com/jonathaneunice/simplere | ba7c782f9dc06b3d4ec370dabc3219fd590e5d2c | 4732fcee0da80d748fa50e3427c9b603d116b9fd | 064aa70824b20a24e2c18f30d5c0b45bab5750d1 | refs/heads/master | 2021-01-01T06:45:41.469219 | 2017-05-31T18:12:48 | 2017-05-31T18:12:48 | 12,815,000 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7164685726165771,
"alphanum_fraction": 0.7266553640365601,
"avg_line_length": 33.64706039428711,
"blob_id": "2439c55b23aa169ad9e95585e6ba35304d2a07da",
"content_id": "887e220ca823acf7d383b374d85a48102032d20f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 589,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 17,
"path": "/docs/installation.rst",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "Installation\n============\n\nTo install or upgrade to the latest version::\n\n pip install -U simplere\n\nTo install under a specific Python version (3.3 in this example)::\n\n python3.3 -m pip -U simplere\n\nYou may need to prefix these with ``sudo`` to authorize installation. In\nenvironments without super-user privileges, you may want to use ``pip``'s\n``--user`` option, to install only for a single user, rather than\nsystem-wide. If you use the standalone ``pip`` programs, you may also\nneed to use ``pip2`` or ``pip3`` version-dependent variants, depending\non your system configuration.\n"
},
{
"alpha_fraction": 0.593406617641449,
"alphanum_fraction": 0.6428571343421936,
"avg_line_length": 12.923076629638672,
"blob_id": "83ed94e7fcc3459ce4c9a075c4c29f5f7cb976a7",
"content_id": "9f2118e18d5de710a0eb2e0705f8b1a0713b4205",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 182,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 13,
"path": "/tox.ini",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "\n[tox]\nenvlist = py2{6,7}, pypy, py3{3,4,5,6}, pypy3\n\n[testenv]\n# changedir=test\nusedevelop=True\ndeps=\n pytest\n coverage\n pytest-cov\n\tsix\ncommands=\n py.test -l {posargs}\n"
},
{
"alpha_fraction": 0.680701732635498,
"alphanum_fraction": 0.6842105388641357,
"avg_line_length": 24.909090042114258,
"blob_id": "72d78160446dd4cc91c908cac44e753d6e19a442",
"content_id": "b499ce0a1993019103ed76ccd396ee1d5071bd2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 570,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 22,
"path": "/docs/usage.rst",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "Usage\n=====\n\nPython regular expressions are powerful, but the language's lack\nof an *en passant* (in passing) assignment requires a preparatory\nmotion and then a test::\n\n import re\n\n match = re.search(pattern, some_string)\n if match:\n print match.group(1)\n\nWith ``simplere``, you can do it in fewer steps::\n\n from simplere import *\n\n if match / re.search(pattern, some_string):\n print match[1]\n\nIn addition to its own classes, ``from simplere import *`` imports both\nthe standard ``re`` module and the ``match`` object so you don't have to.\n"
},
{
"alpha_fraction": 0.5783132314682007,
"alphanum_fraction": 0.6024096608161926,
"avg_line_length": 30.125,
"blob_id": "f79078b89e7be9554d275d1e380a72bcf457c44d",
"content_id": "a410dc4e00b800e79a7dfc74277eda79b5a621dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 249,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 8,
"path": "/docs/testing.rst",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "Testing\n=======\n\nTo run the module tests, use one of these commands::\n\n tox # normal run - speed optimized\n tox -e py27 # run for a specific version only (e.g. py27, py34)\n tox -c toxcov.ini # run full coverage tests\n"
},
{
"alpha_fraction": 0.5997947454452515,
"alphanum_fraction": 0.6110826134681702,
"avg_line_length": 23.06172752380371,
"blob_id": "57048d86a625b3e53bb3ebb2ff37463af117ab81",
"content_id": "97e29b512cbfb87a0afceff072bf5c6af0467fd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1949,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 81,
"path": "/test/test_glob.py",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom simplere import *\nimport re\nimport six\nimport pytest\nimport sys\n\n_STRHASCASEFOLD = hasattr(str, 'casefold')\n\n\nclass YesItIs(ValueError):\n pass\n\n\ndef test_memoization():\n\n assert Glob('a*') is Glob('a*')\n assert Glob(\"a*\", \"b*\") is Glob(\"a*\", \"b*\")\n\n\ndef test_Glob():\n assert \"alpha\" in Glob(\"a*\")\n assert \"beta\" not in Glob(\"a*\")\n\n assert Glob(\"a*\").__contains__(\"alpha\") == \"a*\"\n\n assert 'globtastic' in Glob('glob*')\n\n\ndef test_Glob_starargs():\n g = Glob(\"a*\", \"b*\")\n assert \"alpha\" in g\n assert \"beta\" in g\n assert \"gamma\" not in g\n assert 4 not in g\n\n\ndef test_Glob_contains():\n assert Glob(\"a*\").__contains__(\"alpha\") == \"a*\"\n assert Glob(\"a*\").contains(\"alpha\") == \"a*\"\n\n g = Glob(\"a*\", \"b*\")\n assert g.contains(\"andy\") == \"a*\"\n assert g.contains(\"bubba\") == \"b*\"\n\n\ndef test_InsensitiveGlob():\n g = InsensitiveGlob(\"b*\")\n gg = InsensitiveGlob(\"B*\")\n\n # some easy ASCII stuff\n assert \"bubba\" in g\n assert \"bubba\" in gg\n assert \"Bubba\" in g\n assert \"Bubba\" in gg\n assert \"wubba\" not in g\n assert \"wubba\" not in gg\n\n # now some (basic) Unicode foldign\n assert six.u(\"BUSSE\") in g\n assert six.u(\"BU\\u1E9EE\") in g # LATIN CAPITAL LETTER SHARP S\n assert six.u(\"BU\\u00DFE\") in g # LATIN SMALL LETTER SHARP S\n\[email protected](not _STRHASCASEFOLD, reason=\"Unicode is hard\")\ndef test_InsensitiveGlob_harder():\n ug = InsensitiveGlob(six.u(\"BUSSE\"))\n ugg = InsensitiveGlob(six.u(\"BU\\u1E9EE\")) # LATIN CAPITAL LETTER SHARP S\n uggg = InsensitiveGlob(six.u(\"BU\\u00DFE\")) # LATIN SMALL LETTER SHARP S\n\n assert six.u(\"BUSSE\") in ug\n assert six.u(\"BUSSE\") in ugg\n assert six.u(\"BUSSE\") in uggg\n\n assert six.u(\"BU\\u1E9EE\") in ug\n assert six.u(\"BU\\u1E9EE\") in ugg\n assert six.u(\"BU\\u1E9EE\") in uggg\n\n assert six.u(\"BU\\u00DFE\") in ug\n assert six.u(\"BU\\u00DFE\") in ugg\n assert six.u(\"BU\\u00DFE\") in uggg\n"
},
{
"alpha_fraction": 0.6598639488220215,
"alphanum_fraction": 0.6598639488220215,
"avg_line_length": 40.85714340209961,
"blob_id": "e4447d5b645cbe8da01268edf4e1608d927c0bbc",
"content_id": "c07d647b5e2a61798815babd3bfeaf7733b2b0f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 588,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 14,
"path": "/docs/alternatives.rst",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "Options and Alternatives\n========================\n\nIf you prefer the look of the less-than (``<``) or less-than-or-equal\n(``<=``), as indicators that ``match`` takes the value of the\nfollowing function call, they are experimentally supported as aliases\nof the division operation (``/``). You may define your own match\nobjects, and can use them on memoized ``Re`` objects too. Putting\na few of these optional things together::\n\n answer = Match() # need to do this just once\n\n if answer < Re(r'(?P<word>th..)').search('and that goes there'):\n assert answer.word == 'that'\n\n\n"
},
{
"alpha_fraction": 0.72499018907547,
"alphanum_fraction": 0.729305624961853,
"avg_line_length": 42.20338821411133,
"blob_id": "9daf7395f2b72bce4ea2f99d8bb033c34a97007a",
"content_id": "0c21093b150c76f354f587d2b44af057a3b753fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 2549,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 59,
"path": "/docs/reobjects.rst",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "Re Objects\n==========\n\n``Re`` objects are `memoized <http://en.wikipedia.org/wiki/Memoization>`_\nfor efficiency, so they compile their pattern just once, regardless of how\nmany times they're mentioned in a program.\n\nNote that the ``in`` test turns the sense of the matching around (compared\nto the standard ``re`` module). It asks \"is the given string *in* the set of\nitems this pattern describes?\" To be fancy, the ``Re`` pattern is an\nintensionally defined set (namely \"all strings matching the pattern\"). This\norder often makes excellent sense whey you have a clear intent for the test.\nFor example, \"is the given string within the set of *all legitimate\ncommands*?\"\n\nSecond, the ``in`` test had the side effect of setting the underscore name\n``_`` to the result. Python doesn't support *en passant*\nassignment--apparently, no matter how hard you try, or how much\nintrospection you use. This makes it harder to both test and collect results\nin the same motion, even though that's often exactly appropriate. Collecting\nthem in a class variable is a fallback strategy (see the *En Passant*\nsection below for a slicker one).\n\nIf you prefer the more traditional ``re`` calls::\n\n if Re(pattern).search(some_string):\n print Re._[1]\n\n``Re`` works even better with named pattern components, which are exposed\nas attributes of the returned object::\n\n person = 'John Smith 48'\n if person in Re(r'(?P<name>[\\w\\s]*)\\s+(?P<age>\\d+)'):\n print Re._.name, \"is\", Re._.age, \"years old\"\n else:\n print \"don't understand '{}'\".format(person)\n\nOne trick being used here is that the returned object is not a pure\n``_sre.SRE_Match`` that Python's ``re`` module returns. Nor is it a subclass.\n(That class `appears to be unsubclassable\n<http://stackoverflow.com/questions/4835352/subclassing-matchobject-in-python>`_.)\nThus, regular expression matches return a proxy object that\nexposes the match object's numeric (positional) and\nnamed groups through indices and attributes. If a named group has the same\nname as a match object method or property, it takes precedence. Either\nchange the name of the match group or access the underlying property thus:\n``x._match.property``\n\nIt's possible also to loop over the results::\n\n for found in Re('pattern (\\w+)').finditer('pattern is as pattern does'):\n print found[1]\n\nOr collect them all in one fell swoop::\n\n found = Re('pattern (\\w+)').findall('pattern is as pattern does')\n\nPretty much all of the methods and properties one can access from the standard\n``re`` module are available.\n"
},
{
"alpha_fraction": 0.5589850544929504,
"alphanum_fraction": 0.5671274662017822,
"avg_line_length": 23.109588623046875,
"blob_id": "e97398af2e1fad446f07d487289cea7edfdaea13",
"content_id": "8e78a1b50bbcd3bb75233f20f7b861d7d3fd5019",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5281,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 219,
"path": "/test/test_re.py",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "\nfrom simplere import *\nfrom simplere.core import regrouped\nimport pytest\n\n\nclass YesItIs(ValueError):\n pass\n\n\ndef test_basic():\n tests = 'some string with things in it ok?'\n\n sword = Re(r'\\b(s\\w*)\\b')\n\n if tests in sword:\n assert Re._[1] == 'some'\n assert Re._.end(1) == 4\n assert Re._._match.group(1) == Re._[1]\n else:\n raise YesItIs()\n\n assert 'ddd' not in sword\n assert 'it goes other ways' not in sword\n assert 'it goes other ways sometimes' in sword\n\n\ndef test_auto_stringify():\n num = Re(r'^\\d+')\n assert 0 in num\n assert 101 in num\n assert -3 not in num\n assert \"sword\" not in num\n\n\ndef test_match():\n num = Re(r'^\\d+')\n assert num.match('0')\n assert num.match('101')\n assert not num.match('x101')\n assert not num.match('-3')\n assert not num.match(\"sword\")\n\n\ndef test_sub():\n s = 'and this is this but also THIS'\n s1 = Re('this', re.I).sub('***', s)\n assert s1 == 'and *** is *** but also ***'\n\n\ndef test_subn():\n s = 'and this is this but also THIS'\n ans = Re('this', re.I).subn('***', s)\n assert ans == ('and *** is *** but also ***', 3)\n\n\ndef test_split():\n s = 'and this is this but also THIS'\n ans = Re('\\s*this\\s*', re.I).split(s)\n assert ans == ['and', 'is', 'but also', '']\n\n\ndef test_escape():\n assert Re.escape(r'\\[()[') == '\\\\\\\\\\\\[\\\\(\\\\)\\\\['\n\n\ndef test_findall():\n tests = 'some string with things in it ok?'\n\n sword = Re(r'\\b(s\\w*)\\b')\n\n assert sword.findall(tests) == ['some', 'string']\n\n iterlist = [m[1] for m in sword.finditer(tests)]\n assert iterlist == ['some', 'string']\n\n\ndef test_attributes():\n\n tests = 'some string with things in it ok?'\n sword = Re(r'\\b(?P<word>s\\w*)\\b')\n\n if tests in sword:\n assert Re._.word == 'some'\n else:\n raise YesItIs()\n\n iterlist = [m.word for m in sword.finditer(tests)]\n assert iterlist == ['some', 'string']\n\n person = 'John Smith 48'\n if person in Re(r'(?P<name>[\\w\\s]*)\\s+(?P<age>\\d+)'):\n assert Re._.name == 'John Smith'\n assert int(Re._.age) == 48\n assert Re._.name == Re._._match.group('name')\n assert Re._.age == Re._._match.group('age')\n\n with pytest.raises(AttributeError):\n Re._.taste\n else:\n raise YesItIs()\n\n for found in Re(r'pattern (\\w+)').finditer('pattern is as pattern does'):\n assert isinstance(found, ReMatch)\n assert found[1] in ['is', 'does']\n\n found = Re(r'pattern (\\w+)').findall('pattern is as pattern does')\n assert found == 'is does'.split()\n\n\ndef test_regrouping():\n sentence = \"you've been a bad boy\"\n pattern = r'(?P<word>bad)'\n re_pat = Re(pattern)\n\n repl = lambda m: m.word.upper() # note use of attributes\n\n newsent = re_pat.sub(repl, sentence)\n assert newsent == \"you've been a BAD boy\"\n\n sentcap = Re(r'^(?P<first>.)')\n sentcap_repl = lambda m: m.first.upper()\n\n assert sentcap.sub(sentcap_repl, newsent) == \"You've been a BAD boy\"\n\n\ndef test_broken_regrouping():\n # make sure there's coverage of one odd immediate-return case\n assert regrouped(44) == 44\n\n\ndef test_memoization():\n testpat = Re(r'\\b(s\\w*)\\b')\n testpat1 = Re(r'\\b(s\\w*)\\b')\n assert testpat is testpat1 # test memoization\n\n\ndef test_from_sre():\n pat = re.compile(r'\\b(s\\w*)\\b')\n repat = Re(pat)\n\n tests = 'some string with things in it ok?'\n assert tests in repat\n assert repat.findall(tests) == ['some', 'string']\n\n assert 'ddd' not in repat\n\n\ndef test_direct_ReMatch():\n\n m = re.match(r'this', 'that')\n assert not m\n assert not ReMatch(m)\n\n m = re.match(r'this', 'this')\n assert m\n assert ReMatch(m)\n\n\ndef test_direct_ReMatch_easy_access():\n m = re.match(r'this', 'this')\n rm = ReMatch(m)\n assert m.group(0) == 'this'\n assert rm.group(0) == 'this'\n\n match = re.match(r'(?P<word>this)', 'this is a string')\n match = ReMatch(match)\n assert match[1] == match.word\n assert match.group(1) == match.word\n\n\ndef test_en_passant_Match():\n s = 'this is the test of that thing you like'\n\n match = Match()\n\n if match / re.search(r'th\\w*', s):\n assert match[0] == 'this'\n else:\n assert YesItIs()\n\n if match < re.search(r'th\\w*', s):\n assert match[0] == 'this'\n else:\n assert YesItIs()\n\n if match <= re.search(r'th\\w*', s):\n assert match[0] == 'this'\n else:\n assert YesItIs()\n\n if match / re.search(r'(?P<target>th\\w+g)', s):\n assert match.target == 'thing'\n\n # from the docs\n if match / re.search(r'(?P<word>th.s)', 'this is a string'):\n assert match[1] == 'this'\n assert match.word == 'this'\n assert match.group(1) == 'this'\n\n if match < Re(r'(?P<word>th..)').search('and that goes there'):\n assert match.word == 'that'\n\n answer = Match() # need to do this just once\n\n if answer < Re(r'(?P<word>th..)').search('and that goes there'):\n assert answer.word == 'that'\n\n\ndef test_en_passant_with_Re():\n \"\"\"\n Make sure that if ReMatch object already generated, that\n en passant usage understands that.\n \"\"\"\n\n match = Match()\n s = 'this is the test of that thing you like'\n\n if match / Re(r'thi\\w+').search(s):\n assert match[0] == 'this'\n"
},
{
"alpha_fraction": 0.6553133726119995,
"alphanum_fraction": 0.7002725005149841,
"avg_line_length": 39.77777862548828,
"blob_id": "b13661e9dac9e40ec55624fee40581455307611a",
"content_id": "ce020da03d96260b9df43956c6ef4c9050e3c9ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 734,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 18,
"path": "/docs/notes.rst",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "Notes\n=====\n\n* Automated multi-version testing managed with `pytest\n <http://pypi.python.org/pypi/pytest>`_ and `tox\n <http://pypi.python.org/pypi/tox>`_. Continuous integration testing\n with `Travis-CI <https://travis-ci.org/jonathaneunice/intspan>`_.\n Packaging linting with `pyroma <https://pypi.python.org/pypi/pyroma>`_.\n\n* Version 1.2.9 updates testing for early 2017 Python\n versions. Successfully packaged for, and\n tested against, all late-model versions of Python: 2.6, 2.7, 3.3,\n 3.4, 3.5, and 3.6, as well as PyPy 5.6.0 (based on\n 2.7.12) and PyPy3 5.5.0 (based on 3.3.5).\n\n* The author, `Jonathan Eunice <mailto:[email protected]>`_ or\n `@jeunice on Twitter <http://twitter.com/jeunice>`_\n welcomes your comments and suggestions.\n"
},
{
"alpha_fraction": 0.779808521270752,
"alphanum_fraction": 0.7832897901535034,
"avg_line_length": 48.956520080566406,
"blob_id": "2015577c7c0ceec0cdf7517bb34a222a32fab83d",
"content_id": "23ba697dc1caa742765097c9087358217ad8e3bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1149,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 23,
"path": "/docs/motivation.rst",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "Motivation\n==========\n\nIn the simple examples above, \"fewer steps\" seems like a small\nsavings (3 lines to 2). While a 33% savings is a pretty good\noptimization, is it really worth using another module and\na quirky *en passant* operator to get it?\n\nIn code this simple, maybe not. But real regex-based searching tends\nto have multiple, cascading searches, and to be tightly interwoven\nwith complex pre-conditions, error-checking, and post-match formatting\nor actions. It gets complicated fast. When multiple ``re`` matches\nmust be done, it consumes a lot of \"vertical space\" and often\nthreatens to push the number of lines a programmer is viewing at\nany given moment beyond the number that can be easily held in working\nmemory. In that case, it proves valuable to condense what is logically\na single operation (\"regular expression test\") into a single line\nwith its conditional ``if``.\n\nThis is even more true for the \"exploratory\" phases of development,\nbefore a program's appropriate structure and best logical boundaries\nhave been established. One can always \"back out\" the condensing *en\npassant* operation in later production code, if desired.\n"
},
{
"alpha_fraction": 0.6329113841056824,
"alphanum_fraction": 0.6371307969093323,
"avg_line_length": 36.91999816894531,
"blob_id": "c15dc63ae57475af33e8b17f137e37b1737b9798",
"content_id": "eba7198c84a01f210da41256081e32e5ab7f5550",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 948,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 25,
"path": "/docs/undercovers.rst",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "Under the Covers\n================\n\n``ReMatch`` objects\nwrap Python's native``_sre.SRE_Match`` objects (the things that ``re``\nmethod calls return).::\n\n match = re.match(r'(?P<word>th.s)', 'this is a string')\n match = ReMatch(match)\n if match:\n print match.group(1) # still works\n print match[1] # same thing\n print match.word # same thing, with logical name\n\nBut that's a lot of boilerplate for a simple test, right? So ``simplere``\n*en passant* operator redefining the division operation and proxies the ``re`` result\non the fly to the pre-defined ``match`` object::\n\n if match / re.search(r'(?P<word>th.s)', 'this is a string'):\n assert match[1] == 'this'\n assert match.word == 'this'\n assert match.group(1) == 'this'\n\nIf the ``re`` operation fails, the resulting object is guaranteed to have\na ``False``-like Boolean value, so that it will fall through conditional tests.\n"
},
{
"alpha_fraction": 0.636468768119812,
"alphanum_fraction": 0.6931380033493042,
"avg_line_length": 25.171716690063477,
"blob_id": "1b238a2afe76fd286850c8b3ac7261d0adeb5ef5",
"content_id": "8a49caccf63186d7efcb22b2b09a4a76345593b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 2594,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 99,
"path": "/docs/CHANGES.rst",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "Change Log\n==========\n\n**1.2.12** (May 31, 2017)\n\n Updated Python 2 / 3 compatibility strategy to be Python 3\n centric. Should be more future-proofed.\n\n Updated testing.\n\n\n**1.2.11** (February 17, 2017)\n\n Updates testing. Again. Python 3.2 support again dropped, for the\n last time, given failure on Travis CI. It's old, anyway. Time to\n upgrade!\n\n\n**1.2.10** (February 17, 2017)\n\n Updates testing. Python 3.2 support re-established, given still\n supported on Travis CI. Docs tweaked.\n\n\n**1.2.9** (January 23, 2017)\n\n Updates testing. Newly qualified under 2.7.13 and 3.6, as well as\n most recent builds of pypy and pypy3. Python 3.2 support withdrawn\n given obsolescence.\n\n\n**1.2.8** (August 26, 2015)\n\n Substantial documentation reorg.\n\n\n**1.2.7** (August 23, 2015)\n\n Starts automated measurement of test branch coverage. Inital runs\n show 100% branch coverage. Hooah!\n\n\n**1.2.6** (August 20, 2015)\n\n Bumped to 100% test coverage.\n\n\n**1.2.5** (August 19, 2015)\n\n Added automated measurement of test coverage. Line coverage\n started at 92%. Bumped to 97%.\n\n\n**1.2.0** (August 14, 2015)\n\n Realized imports were overly restrictive, requiring clients of\n module to needlessly (and contra docs) manually import ``re`` and\n construct the ``match`` object. Fixed. Bumped minor version number\n to reflect de facto API change.\n\n\n**1.1.1** (August 14, 2015)\n\n Simplified ``setup.py`` and packaging. Tweaked docs.\n\n\n**1.1.0** \n\n Adds multi-pattern and case insensitive Glob subclass. Added wheel\n packaging. Rearranged and extended testing structure. Updated\n setup and docs.\n\n\n**1.0.10** \n\n Added ``bdist_wheel`` package support. Extended testing matrix to\n 3.5 pre-release builds. Switched to Apache License.\n\n\n**1.0.5** \n\n In several dot-releases, have added support for Travis-CI cloud-\n based continuous integration testing, Sphinx-based documentation,\n and readthedocs.org hosted documentation. The Travis bit has\n required a separate Github repository be created. It is managed\n out of the same development directory, overlaying the existing\n Mercurial / Bitbucket repo. So far, that has caused no problems.\n\n Documentation somewhat improved.\n\n\n**1.0.0** \n\n Cleaned up source for better PEP8 conformance\n\n Bumped version number to 1.0 as part of move to `semantic\n versioning <http://semver.org>`_, or at least enough of it so as\n to not screw up Python installation procedures (which don't seem\n to understand 0.401 is a lesser version that 0.5, because 401 > 5)\n\n\n\n"
},
{
"alpha_fraction": 0.7239999771118164,
"alphanum_fraction": 0.7239999771118164,
"avg_line_length": 22.809524536132812,
"blob_id": "bf426b21e9d96bf4659256f24bfa7dcb4ade21e8",
"content_id": "a985902a37f705e5f02d40cfc9a927c73463d658",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 500,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 21,
"path": "/docs/index.rst",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "simplere\n========\n\nA simplified interface to Python's regular expression (``re``)\nstring search. Eliminates steps and provides\nsimpler access to results. As a bonus, also provides compatible way to\naccess Unix glob searches.\n\n.. toctree::\n :titlesonly:\n\n Usage <usage>\n Motivation <motivation>\n Re Objects <reobjects>\n Under the Covers <undercovers>\n Options and Alternatives <alternatives>\n Globs <globs>\n Notes <notes>\n Installation <installation>\n Testing <testing>\n CHANGES\n"
},
{
"alpha_fraction": 0.6937500238418579,
"alphanum_fraction": 0.7020833492279053,
"avg_line_length": 35.22641372680664,
"blob_id": "2a6a7c383d7b50690d5939e7ab9674403f918086",
"content_id": "c9848b9e4f33b84453777eae01cb1c13b2035448",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1920,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 53,
"path": "/docs/globs.rst",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "Globs\n=====\n\nRegular expressions are wonderfully powerful, but sometimes the simpler `Unix glob\n<http://en.wikipedia.org/wiki/Glob_(programming)>`_ is works just fine. As a bonus,\n``simplere`` also provides simple glob access.::\n\n if 'globtastic' in Glob('glob*'):\n print \"Yes! It is!\"\n else:\n raise ValueError('YES IT IS')\n\nIf you want to search or test\nagainst multiple patterns at once, ``Glob`` objects take\na variable number of patterns. A match is defined as *any* of the\npatterns matching.::\n\n img_formats = Glob(\"*.png\", \"*.jpeg\", \"*.jpg\", \"*.gif\")\n if filename.lower() in img_formats:\n ... further processing ...\n\nAlternatively, you can splat an existing list into the ``Glob``\nconstructor with Python's unary star syntax::\n\n img_formats = \"*.png *.jpeg *.jpg *.gif\".split()\n if filename.lower() in Glob(*img_formats):\n ... further processing ...\n\n\nCase-insensitive glob searches are also available::\n\n bg = InsensitiveGlob('b*')\n if 'bubba' in bg:\n assert 'Bubba' in bg\n\nGlobs have their own syntax for case insensitive characters,\nbut it can be a pain to use. It may be easier to use the\n``InsensitiveGlob`` subclass. Or even alias the case-insensitive\nversion as the main one::\n\n from simplere import InsensitiveGlob as Glob\n\n.. note:: Case folding / case-insensitive searches work well in the\n ASCII range, but Unicode characters and case folding is more\n intricate. Basic folding is provided out of the box. It's quite\n adequate for mapping against common\n filename patterns, for example. Those\n needing more extensive Unicode case folding should consider\n normalizing strings, `as described here\n <http://stackoverflow.com/a/29247821/240490>`_. As the tests\n show, basic Unicode folding works fine everywhere. Using\n Unicode in glob patterns (not just strings to be matched)\n works *only* on Python 3.3 or above.\n"
},
{
"alpha_fraction": 0.3928571343421936,
"alphanum_fraction": 0.6428571343421936,
"avg_line_length": 13,
"blob_id": "97a2bb94b9db5ea5a3f4db1a38871f46a6e60ebc",
"content_id": "fcdcf0d9e544326e0954b09d1541708d9bc67ab7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 28,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "jonathaneunice/simplere",
"src_encoding": "UTF-8",
"text": "mementos>=1.2.8\nsix>=1.10.0\n"
}
] | 15 |
NextThought/nti.app.pyramid_zope | https://github.com/NextThought/nti.app.pyramid_zope | ca0032613d542ff4829090aa64db0c4f8d5dfbff | e3c65c0f22159314e3b7820cfd14436e83a82c81 | a78daf95455e01722dd97f1205185c44251e20ec | refs/heads/master | 2021-11-25T14:03:14.392892 | 2021-11-10T19:04:12 | 2021-11-10T19:04:12 | 139,613,392 | 1 | 0 | NOASSERTION | 2018-07-03T16:59:38 | 2021-04-15T15:58:18 | 2021-08-11T10:46:04 | Python | [
{
"alpha_fraction": 0.7810333967208862,
"alphanum_fraction": 0.7863894104957581,
"avg_line_length": 45.67647171020508,
"blob_id": "86b265aa6e14464a884e81d4268f8aa2710099b5",
"content_id": "a9b173d8eeb5ad4200331bf9046907985e6a7e0b",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3174,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 68,
"path": "/src/nti/app/pyramid_zope/i18n/__init__.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nSupport for application-level (request, context and user based) internationalization.\n\nNegotiation\n===========\n\nThe process of finding a locale to use is somewhat complex. It is all\ndriven around the interface\n:class:`zope.i18n.interfaces.IUserPreferredLanguages` and its mutable\nsubclass\n:class:`zope.i18n.interfaces.IModifiableUserPreferredLanguages`. These\nare combined with a :class:`zope.i18n.interfaces.INegotiator`, given a\ncontext and a list of available languages, to determine the best\nmatching locale to use by taking the intersection of the preferred\nlanguages of the context with the available languages (individual\ntranslation utilities handle fallback and default languages).\n\nZope provides an implementation of preferred languages for (Zope)\nrequests in\n:class:`zope.publisher.browser.ModifiableBrowserLanguages`. This uses\nthe HTTP ``Accept-Language`` header to determine a language. We let\nPyramid requests also have this implementation through our\ncompatibility shims in :mod:`nti.pyramid_zope.i18n`.\n\nHowever, there are cases where we may not want to rely on the browser\nto have the right setting, either for testing, or to support broken\nbrowsers, or to explicitly enable user preferences. Supporting user\npreferences is easy: provide a ``IUserPreferredLanguages`` adapter for your\n``IPrincipal`` implementation. For temporary testing or for the\nuse of unauthenticated users, we can also look at the HTTP cookies\n``_LOCALE_`` (Pyramid's default) and ``I18N_LANGUAGE`` (Zope/Plone\ndefault), or the _LOCALE_ request parameter (or request attribute);\nwe can even use the `++lang++` namespace to set a language during traversal.\n\nThe complexity comes in combining all of these policies. Almost all\nuses of the translation functions pass the current request as the\ncontext, and by default that's just going to use the\n``Accept-Language`` based picker. Our solution is to define a new\ninterface :class:`nti.app.pyramid_zope.i18n.interfaces.IPreferredLanguagesRequest`,\nderiving from :class:`pyramid.interfaces.IRequest` and register a\npolicy for that interface. When the\n:class:`pyramid.interfaces.IContextFound` event is fired, if the\ncookies are present or an authenticated user is present, we make the\nrequest object provide that interface. In this way, our policy is used\nto override anything else.\n\nPyramid/Zope Integration\n========================\n\nThe Zope I18N system is more powerful and flexible than the Pyramid system.\nThe Pyramid system is based simply on providing one locale name and a list\nof directories containing translations. We implement Pyramid's\n:class:`pyramid.interfaces.ILocaleNegotiator` and :class:`pyramid.interfaces.ITranslationDirectories`\nbased on the information we gather from Zope. In this way, the Pyramid\nChameleon support, for example, uses the same information as other places.\n\n.. important::\n\n Make sure and include ``<include package=\"nti.app.pyramid_zope.i18n\" file=\"pyramid.zcml\" />``\n from your root ``pyramid.zcml`` to register the negotiator.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n"
},
{
"alpha_fraction": 0.6038087606430054,
"alphanum_fraction": 0.6057397723197937,
"avg_line_length": 39.699188232421875,
"blob_id": "43b7c8eb590ea1eb1d24b65da5b7a369df137401",
"content_id": "bc359e09d3d8fa1579bf79953e75648c89b720be",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15018,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 369,
"path": "/src/nti/app/pyramid_zope/z3c_zpt.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nPyramid template renderer using z3c.pt, for the path syntax\nand other niceties that Chameleon itself doesn't support\n\"\"\"\n\nfrom __future__ import print_function, absolute_import, division\n__docformat__ = \"restructuredtext en\"\n\nfrom zope import interface\n\nfrom zope.publisher.interfaces.browser import IBrowserRequest\n\nimport z3c.pt.pagetemplate\n\nfrom pyramid.decorator import reify\n\nfrom pyramid_chameleon.renderer import template_renderer_factory\nfrom chameleon.zpt.template import PageTemplateFile\n\n# ITemplateRenderer is deprecated as of pyramid 1.5a3,\n# but there is no corresponding pyramid_chameleon\n# release yet, so we still need to implement it, not\n# its parent IRenderer. Avoid the deprecation warning\n# this way\nimport pyramid.interfaces\ntry:\n ITemplateRenderer = pyramid.interfaces.__dict__['ITemplateRenderer']\nexcept KeyError:\n raise ImportError()\n\n\n\nlogger = __import__('logging').getLogger(__name__)\n\ndef renderer_factory(info):\n \"\"\"\n Factory to produce renderers. Intended to be used with asset specs.\n\n .. note:: At this time, this does not support the pyramid 1.4 macro syntax.\n \"\"\"\n return template_renderer_factory(info, ZPTTemplateRenderer)\n\n\nclass _ViewPageTemplateFileWithLoad(z3c.pt.pagetemplate.ViewPageTemplateFile):\n \"\"\"\n Enables the load: expression type for convenience.\n \"\"\"\n # NOTE: We cannot do the rational thing and copy this\n # and modify our local value. This is because\n # certain packages, notably z3c.macro,\n # modify the superclass's value; depending on the order\n # of import, we may or may not get that change.\n # So we do the bad thing too and modify the superclass also\n\n @property\n def builtins(self):\n d = super(_ViewPageTemplateFileWithLoad, self).builtins\n d['__loader'] = self._loader\n # https://github.com/malthe/chameleon/issues/154\n # That's been fixed, so we should no longer\n # need to do this:\n # # We try to get iteration order fixed here:\n # result = OrderedDict()\n # for k in sorted(d.keys()):\n # result[k] = d[k]\n # return result\n return d\n\n\nz3c.pt.pagetemplate.BaseTemplate.expression_types['load'] = PageTemplateFile.expression_types['load']\n\n# Re-export our version\nViewPageTemplateFile = _ViewPageTemplateFileWithLoad\n\n\[email protected](ITemplateRenderer)\nclass ZPTTemplateRenderer(object):\n \"\"\"\n Renders using a :class:`z3c.pt.pagetemplate.ViewPageTemplateFile`\n \"\"\"\n\n def __init__(self, path, lookup, macro=None):\n \"\"\"\n :keyword macro: New in pyramid 1.4, currently unsupported.\n :raise ValueError: If the ``macro`` argument is supplied.\n \"\"\"\n self.path = path\n self.lookup = lookup\n if macro:\n __traceback_info__ = path, lookup, macro\n raise ValueError(macro)\n\n @reify # avoid looking up reload_templates before manager pushed\n def template(self):\n return _ViewPageTemplateFileWithLoad(self.path,\n auto_reload=self.lookup.auto_reload,\n debug=self.lookup.debug,\n translate=self.lookup.translate)\n\n def implementation(self): # pragma: no cover\n return self.template\n\n def __call__(self, value, system):\n \"\"\"\n :param value: The object returned from the view. Either a dictionary,\n or a context object. If a context object, will be available at the path\n ``options/here`` in the template. If a dictionary, its values are merged with\n those in `system`.\n \"\"\"\n __traceback_info__ = value, system\n try:\n system.update(value)\n except (TypeError, ValueError):\n # raise ValueError('renderer was passed non-dictionary as value')\n system['here'] = value\n # See plasTeX/Renderers/__init__.py for comments about how 'self'\n # is a problem\n\n request = None\n if 'request' in system and system['request'] is not None:\n request = IBrowserRequest(system['request'])\n system['request'] = request\n\n view = system['view'] # TODO: We can do better with this\n if view is None and request is not None:\n view = request\n system['view'] = request\n\n # We used to register macros, but now you should use\n # z3c.macro and the macro: expression type\n # if 'master' not in system:\n # XXX: FIXME: There must be a better way to handle this.\n # How did zope do it? (Acquisition?)\n # (Answer: Yes, basically. Every template was auto-loaded\n # and at a traversable location, usually also in the\n # acquisition path; pages traversed to the macros of the\n # template they wanted. We can do something similar though\n # traversal, we just need to update our templates.)\n # FIXME: Note use of nti.appserver package\n # master = get_renderer('nti.appserver:templates/master_email.pt').implementation()\n # system['master'] = master\n result = self.template.bind(view)(**system)\n return result\n\n# pylint:disable=wrong-import-position,wrong-import-order\n# XXX: Why are these down there?\nimport csv\nimport six\nimport sys\nimport codecs\nimport os.path\nimport argparse\nimport datetime\n\nimport yaml\n\nimport simplejson\n\nfrom chameleon.tal import RepeatDict\n\nfrom zope.configuration import config\nfrom zope.configuration import xmlconfig\n\nfrom zope.dottedname import resolve as dottedname\n\nfrom zope.i18n import translate as ztranslate\n\nfrom zope.traversing import api as tapi\n\n\ndef _configure(self=None, set_up_packages=(), features=(),\n context=None, execute=True):\n if set_up_packages:\n if context is None:\n context = config.ConfigurationMachine()\n xmlconfig.registerCommonDirectives(context)\n for feature in features:\n context.provideFeature(feature)\n\n for i in set_up_packages:\n __traceback_info__ = (i, self, set_up_packages)\n if isinstance(i, tuple):\n filename = i[0]\n package = i[1]\n else:\n filename = 'configure.zcml'\n package = i\n\n if isinstance(package, six.string_types):\n package = dottedname.resolve(package)\n context = xmlconfig.file(filename, package=package,\n context=context, execute=execute)\n return context\n\n\ndef main(): # pylint:disable=too-many-locals,too-many-statements\n arg_parser = argparse.ArgumentParser(description=\"Render a single file with JSON data\")\n arg_parser.add_argument('input', help=\"The input template\")\n arg_parser.add_argument('output',\n help=\"The output filename, or - for standard out.\")\n arg_parser.add_argument('--data',\n dest='data',\n help=\"The path to a filename to read to get the data for template options.\\n\"\n \"JSON, YAML or CSV can be used. If JSON or YAML, the options will be whatever was\"\n \" specified in the file, typically a dictionary or array.\"\n \"If CSV, the first row should be a header row naming the fields, and the options\"\n \" will be a list of dictionaries with those keys\")\n arg_parser.add_argument('--repeat-on',\n dest='repeat_on',\n help=\"If given, a traversal path that specifies something that can be \"\n \"iterated; the template will be applied repeatedly to the elements.\")\n arg_parser.add_argument('--repeat-on-sequence-name',\n dest='repeat_on_sequence_name',\n help=\"If given along with --repeat-on, this name will be bound in\"\n \"the options dictionary as the sequence that --repeat-on is iterating\")\n arg_parser.add_argument('--repeat-on-name',\n dest='repeat_on_name',\n help=\"The name of the element being iterated. REQUIRED if --repeat-on is given\")\n arg_parser.add_argument('--repeat-as-iterable',\n dest='repeat_iter',\n action='store_true',\n default=False,\n help=\"If given, wrap each item from --repeat-on as a one-element list. This makes \"\n \"it easy to convert templates to create multiple files and share the basic iteration code.\")\n arg_parser.add_argument('--repeat-filename-specific-path',\n dest='repeat_filename',\n help=\"If given, a TAL path evaluated for each item being repeated. If found and true, \"\n \"used as a part of the filename, subject to mangling.\")\n arg_parser.add_argument('--json', dest='data')\n arg_parser.add_argument('--encoding',\n dest='encoding',\n help=\"The encoding of the output file.\")\n arg_parser.add_argument('--repeat-exclude-field',\n dest='repeat_exclude_field',\n help=\"If given, a field looked for in order to exclude the given element from \"\n \"the rendering process.\")\n\n args = arg_parser.parse_args()\n\n # Must configure traversing;\n # other stuff might be convenient but slows down startup,\n # so add as use-cases arise\n # _configure( set_up_packages=('nti.appserver', 'nti.app.pyramid_zope') )\n _configure(set_up_packages=('z3c.ptcompat',))\n # Turn zope.security back off, pointless in this context\n z3c.pt.pagetemplate.sys_modules = sys.modules\n\n class Lookup(object):\n auto_reload = False\n debug = True\n translate = ztranslate\n\n class View(object):\n context = None\n request = None\n\n renderer = ZPTTemplateRenderer(os.path.abspath(args.input), Lookup())\n system = {}\n system['view'] = View()\n system['request'] = None\n options = {}\n if args.data:\n # Mac Excel likes to save CSV files with Mac line endings (\\r)\n # which is weird and breaks the parser unless universal newlines\n # is in effect.\n openers = {'.csv': ('rU', lambda x: list(csv.DictReader(x))),\n '.yaml': ('rb', yaml.load),\n '.json': ('rb', simplejson.load)}\n mode, func = openers[os.path.splitext(args.data)[1]]\n with open(args.data, mode) as data:\n options = func(data)\n\n encoding = args.encoding or 'utf-8'\n\n def _write(result, output):\n # The result of PT rendering is a unicode string.\n # If it contained actual non-ascii characters,\n # we need to pick an encoding on the way out.\n # Because we are in HTML/XML the safest thing to\n # do for an encoding that doesn't handle a given value\n # is to use an entity escape (however our default of utf8\n # should handle everything)\n with codecs.open(output, 'wb', encoding=encoding, errors='xmlcharrefreplace') as f:\n f.write(result)\n\n # It is useful to have the render time available to the templates\n # There doesn't seem to be a way to do this entirely in the templates\n # so we help out here.\n options['nti_render_time'] = datetime.datetime.now()\n\n if args.repeat_on:\n output_base, output_ext = os.path.splitext(args.output)\n\n repeat_on = tapi.traverse(options, args.repeat_on)\n if args.repeat_on_sequence_name:\n repeat_on = list(repeat_on) # so multiple iterations work\n options[args.repeat_on_sequence_name] = repeat_on\n\n if args.repeat_exclude_field:\n # Filter out the elements we do not want and add the dict back in.\n exclude_field = args.repeat_exclude_field\n repeat_on = [x for x in repeat_on if exclude_field not in x]\n options[args.repeat_on_sequence_name] = repeat_on\n\n # Establish a repeat dict for the pages. This will be visible\n # as options/repeat, leaving the builtin repeat as specified.\n # (If our template class overrode _pt_get_context, we could\n # promote this to the top-level (builtin) scope (chameleon will\n # accept that, z3c.pt is the one that prevents it by wrapping\n # ALL keyword args in the options dict)).\n # When you specify the repeat_on_name, the RepeatItem will then be\n # available at 'options/repeat/$repeat_on_name', giving you access\n # to such things as 'index'.\n # NOTE: For that to work, we have to iterate across the returned\n # iterator, because the RepeatItem that's in the dict must stay in\n # sync, and it does this by peeking into the iterator itself.\n global_repeat = options['repeat'] = RepeatDict({})\n\n # register the repeat item...\n global_repeat(args.repeat_on_name, repeat_on)\n # ...get it...\n global_repeat_item = global_repeat[args.repeat_on_name]\n # ...now iterate on it\n global_repitition_iterator = iter(global_repeat_item)\n\n for repitition_value in global_repitition_iterator:\n i = global_repeat_item.index # auto-advanced\n raw_val = repitition_value\n\n if args.repeat_iter: # wrap if required\n repitition_value = [repitition_value]\n\n options_for_this_repitition = options.copy()\n options_for_this_repitition[args.repeat_on_name] = repitition_value\n\n result = renderer(options_for_this_repitition, system)\n\n # We avoiding rendering this for some reason; next.\n if not result or result.isspace():\n continue\n\n output_specific = None\n if args.repeat_filename:\n try:\n output_specific = tapi.traverse(raw_val,\n args.repeat_filename)\n output_specific = output_specific.strip()\n output_specific = output_specific.lower().replace(' ', '_')\n output_specific = output_specific.replace(os.path.sep, '_')\n if not output_specific:\n raise ValueError()\n except (KeyError, TypeError, ValueError):\n output_specific = None\n if output_specific is None:\n output_specific = str(i)\n\n output = output_base + os.path.extsep + output_specific + output_ext\n _write(result, output)\n else:\n result = renderer(options, system)\n _write(result, args.output)\n\n sys.exit(0)\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6733333468437195,
"alphanum_fraction": 0.6800000071525574,
"avg_line_length": 17.75,
"blob_id": "6062f290fb2ded0e2070c1d755fc80d1c6598864",
"content_id": "85c288796c4fc549edf49067d929955424e3a029",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 450,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 24,
"path": "/src/nti/app/pyramid_zope/i18n/interfaces.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nI18N related interfaces.\n\n.. $Id$\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom pyramid.interfaces import IRequest\n\n__docformat__ = \"restructuredtext en\"\n\nlogger = __import__('logging').getLogger(__name__)\n\n\n\nclass IPreferredLanguagesRequest(IRequest):\n \"\"\"\n An extension to a standard request used as a marker.\n \"\"\"\n"
},
{
"alpha_fraction": 0.5246179699897766,
"alphanum_fraction": 0.5297113656997681,
"avg_line_length": 18,
"blob_id": "a09a2031b4c58cb12db375a56f3182260c052246",
"content_id": "0dc0e4bddc8e7477e01d0a8434de1fe77a6eb60d",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 589,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 31,
"path": "/docs/api.rst",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "===============\n API Reference\n===============\n\n.. toctree::\n :maxdepth: 1\n\n i18n\n\n\nnti.app.pyramid_zope\n====================\n.. automodule:: nti.app.pyramid_zope\n :members:\n\nnti.app.pyramid_zope.request\n============================\n.. automodule:: nti.app.pyramid_zope.request\n :members:\n\nnti.app.pyramid_zope.traversal\n==============================\n.. automodule:: nti.app.pyramid_zope.traversal\n :members:\n :special-members:\n\nnti.app.pyramid_zope.security\n==============================\n.. automodule:: nti.app.pyramid_zope.security\n :members:\n :special-members:\n"
},
{
"alpha_fraction": 0.6985726356506348,
"alphanum_fraction": 0.7023509740829468,
"avg_line_length": 26.06818199157715,
"blob_id": "c096a7b77217c6fd9d87a53e87c145bba0121403",
"content_id": "e5a67512ab45ef63d5b2399b09e7d5fbd45ea4a2",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2382,
"license_type": "permissive",
"max_line_length": 67,
"num_lines": 88,
"path": "/src/nti/app/pyramid_zope/tests/test_security.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, absolute_import, division\n__docformat__ = \"restructuredtext en\"\n\n# disable: accessing protected members, too many methods\n# pylint: disable=W0212,R0904\n\nfrom hamcrest import assert_that\nfrom hamcrest import is_\n\nimport unittest\n\nfrom zope import interface\n\nfrom nti.testing.matchers import verifiably_provides\n\nfrom zope.security.interfaces import IParticipation\nfrom zope.security.interfaces import IPrincipal\n\nfrom zope.security.management import endInteraction\nfrom zope.security.management import newInteraction\nfrom zope.security.management import queryInteraction\n\nfrom ..security import principal_from_interaction\n\nfrom nti.testing.base import SharedConfiguringTestBase\n\[email protected](IPrincipal)\nclass _Principal(object):\n\n __slots__ = ('username')\n\n def __init__(self, username):\n self.username = username\n\n\[email protected](IParticipation)\nclass _Participation(object):\n\n __slots__ = ('interaction', 'principal')\n\n def __init__(self, principal):\n self.interaction = None\n self.principal = principal\n\n\nclass _MockInteraction(object):\n\n __slots__ = ('participations',)\n\n\nclass TestPrincipalFromInteraction(unittest.TestCase):\n\n def setUp(self):\n self.principal = _Principal('bob')\n self.participation = _Participation(self.principal)\n\n def test_participations_is_iterator(self):\n interaction = _MockInteraction()\n interaction.participations = iter([self.participation])\n assert_that(principal_from_interaction(interaction),\n is_(self.principal))\n\n def test_participations_is_list(self):\n interaction = _MockInteraction()\n interaction.participations = [self.participation]\n assert_that(principal_from_interaction(interaction),\n is_(self.principal))\n\n\nclass TestSecurityAdapters(SharedConfiguringTestBase):\n\n set_up_packages = (__name__,)\n\n def test_principal_from_iteraction(self):\n principal = _Principal('bob')\n participation = _Participation(principal)\n\n newInteraction(participation)\n\n try:\n interaction = queryInteraction()\n p_from_i = IPrincipal(interaction)\n assert_that(p_from_i.username, is_(principal.username))\n finally:\n endInteraction()\n"
},
{
"alpha_fraction": 0.6862244606018066,
"alphanum_fraction": 0.6897959113121033,
"avg_line_length": 33.385963439941406,
"blob_id": "d953057d6d867a1dc910b69b455d69b0f688d708",
"content_id": "d17058df7c0f48c2b018d3f94fdf7c95668a2468",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1960,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 57,
"path": "/src/nti/app/pyramid_zope/__init__.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nDOCUMENT ME.\n\"\"\"\n\nfrom __future__ import print_function, absolute_import, division\n__docformat__ = \"restructuredtext en\"\n\nimport os\nfrom six import string_types\n\nfrom zope.browserpage import viewpagetemplatefile\n\n# Make viewlets use our version of page template files\n# Unfortunately, the zope.browserpage VPT is slightly\n# incompatible in calling convention\nfrom zope.viewlet import viewlet\n\nfrom zope.pagetemplate.pagetemplatefile import package_home\n\nfrom z3c.template import template\n\nfrom nti.app.pyramid_zope.z3c_zpt import ViewPageTemplateFile\n\nlogger = __import__('logging').getLogger(__name__)\n\n# Best to use a class not a function to avoid changing\n# calling depth\n\nclass _VPT(ViewPageTemplateFile):\n\n def __init__(self, filename, _prefix=None, content_type=None):\n path = _prefix\n if not isinstance(path, string_types) and path is not None:\n # zope likes to pass the globals\n path = package_home(path)\n # TODO: The correct values for reload and debug come from\n # pyramid settings. Can we get to that from here?\n auto_reload = os.getenv('PYRAMID_RELOAD_TEMPLATES')\n debug = os.getenv('PYRAMID_DEBUG_TEMPLATES')\n ViewPageTemplateFile.__init__(self, filename, path=path,\n content_type=content_type,\n auto_reload=auto_reload,\n debug=debug)\n\n\nif viewlet.ViewPageTemplateFile is viewpagetemplatefile.ViewPageTemplateFile:\n # TODO: Formalize this\n logger.debug(\"Monkey-patching zope.viewlet to use z3c.pt\")\n viewlet.ViewPageTemplateFile = _VPT\n\nif template.ViewPageTemplateFile is viewpagetemplatefile.ViewPageTemplateFile:\n # They claim that including of z3c.ptcompat does this, I'm not\n # convinced\n logger.debug(\"Monkey-patching z3c.template to use z3c.pt\")\n template.ViewPageTemplateFile = _VPT\n"
},
{
"alpha_fraction": 0.6734693646430969,
"alphanum_fraction": 0.6751700639724731,
"avg_line_length": 22.520000457763672,
"blob_id": "34d9893bae5189d6c88d49577e3a5cbedfe4a5e5",
"content_id": "f3b97da1625e48a1676a1527212bb09002616cb0",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 588,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 25,
"path": "/src/nti/app/pyramid_zope/tests/__init__.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, absolute_import, division\n\nfrom nti.testing.layers import ZopeComponentLayer\nfrom nti.testing.layers import ConfiguringLayerMixin\n\nclass ConfiguringLayer(ZopeComponentLayer,\n ConfiguringLayerMixin):\n set_up_packages = ('nti.app.pyramid_zope',)\n\n @classmethod\n def setUp(cls):\n cls.setUpPackages()\n\n @classmethod\n def tearDown(cls):\n cls.tearDownPackages()\n\n @classmethod\n def testSetUp(cls):\n \"Does nothing\"\n\n testTearDown = testSetUp\n"
},
{
"alpha_fraction": 0.6406117081642151,
"alphanum_fraction": 0.6796941161155701,
"avg_line_length": 25.75,
"blob_id": "1e3db5e463b6dea953d94de9e45ea28c8706ca65",
"content_id": "8b95819ef6d85d3c3b22ec3e073e1f303eb2515d",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 1177,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 44,
"path": "/CHANGES.rst",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "=========\n Changes\n=========\n\n\n0.0.4 (unreleased)\n==================\n\n- Nothing changed yet.\n\n\n0.0.3 (2021-08-11)\n==================\n\n- Require ``zope.principalregistry`` and configure it from\n ``configure.zcml``. This packge needs the unauthenticated principal\n utility it provides.\n\n- Require ``zope.publisher`` and configure it from ``configure.zcml``.\n This is needed for language negotiation based on requests.\n\n- Add support for I18N.\n\n- Add support for Python 3.9. 3.10 is expected to work once zodbpickle\n is released with support for 3.10.\n\n0.0.2 (2020-01-02)\n==================\n\n- Add ``nti.app.pyramid_zope.traversal.ZopeResourceTreeTraverser``, a\n Pyramid ``ITraverser`` that uses the ``zope.traversing`` machinery,\n including path adapters and namespaces.\n\n- Make ``configure.zcml`` register the standard traversing adapters\n which accept a Pyramid ``IRequest`` object. This goes hand-in-hand\n with using the ``ZopeResourceTreeTraverser``. These are the same\n namespaces that would be registered by ``zope.traversing`` for the\n Zope request (with the exception of the ``attribute`` namespace).\n\n\n0.0.1 (2020-01-01)\n==================\n\n- Initial release.\n"
},
{
"alpha_fraction": 0.5637369155883789,
"alphanum_fraction": 0.5652061104774475,
"avg_line_length": 41.85555648803711,
"blob_id": "80f5a8c9e7b048a6b994d8b704187220bd693c13",
"content_id": "5972698777d79bedffa763c4187bf404260d4067",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11571,
"license_type": "permissive",
"max_line_length": 108,
"num_lines": 270,
"path": "/src/nti/app/pyramid_zope/traversal.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nSupport for resource tree traversal.\n\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nfrom pyramid import traversal\n\nfrom pyramid.compat import is_nonstr_iter\nfrom pyramid.compat import decode_path_info\n\nfrom pyramid.exceptions import URLDecodeError\n\nfrom pyramid.httpexceptions import HTTPNotFound\n\nfrom pyramid.interfaces import VH_ROOT_KEY\n\nfrom pyramid.interfaces import ITraverser\n\nfrom zope import interface\n\nfrom zope.component import queryMultiAdapter\nfrom zope.event import notify\n\nfrom zope.location.interfaces import LocationError\n\nfrom zope.traversing import api as ztraversing\n\nfrom zope.traversing.interfaces import ITraversable\nfrom zope.traversing.interfaces import BeforeTraverseEvent\n\nfrom zope.publisher.interfaces.browser import IBrowserRequest\nfrom zope.publisher.interfaces.browser import IDefaultBrowserLayer\n\nfrom zope.traversing.namespace import resource as _zresource\n\nlineage = traversal.lineage\nfind_interface = traversal.find_interface\n\nempty = traversal.empty\nsplit_path_info = traversal.split_path_info\n\nlogger = __import__('logging').getLogger(__name__)\n\n__all__ = [\n 'ZopeResourceTreeTraverser',\n 'resource',\n]\n\ndef _notify_before_traverse_event(ob, request):\n \"\"\"\n Notifies a BeforeTraverseEvent, but safely: if the\n handlers themselves raise a location error, turn that into\n a HTTP 404 exception.\n\n Because handlers are deliberately doing this, we stop\n traversal and abort rather than try to return an information\n dictionary and find a view and context, etc. This is limiting, but\n safe.\n \"\"\"\n try:\n notify(BeforeTraverseEvent(ob, request))\n except LocationError:\n # this is often a setup or programmer error\n logger.debug(\"LocationError from traverse subscribers\", exc_info=True)\n raise HTTPNotFound(\"Traversal failed\")\n\n\[email protected](ITraverser)\nclass ZopeResourceTreeTraverser(traversal.ResourceTreeTraverser):\n \"\"\"\n A :class:`pyramid.interfaces.ITraverser` based on pyramid's\n default traverser, but modified to use the\n :mod:`zope.traversing.api` machinery instead of (only) dictionary\n lookups. This provides is with the flexibility of the\n :obj:`zope.traversing.interfaces.ITraversable` adapter pattern,\n plus the support of namespace lookups\n (:func:`zope.traversing.namespace.nsParse` and\n :func:`zope.traversing.namespace.namespaceLookup`).\n\n As this object traverses, it fires :obj:`~.IBeforeTraverseEvent`\n events. If you either load the configuration from\n :mod:`zope.app.publication` or manually enable the\n :obj:`zope.site.site.threadSiteSubscriber <zope.site.site>` to\n subscribe to this event, then any Zope site managers found along\n the way will be made the current site.\n \"\"\"\n\n def __init__(self, root):\n traversal.ResourceTreeTraverser.__init__(self, root)\n\n def __call__(self, request): # pylint:disable=too-many-locals,too-many-branches,too-many-statements\n \"\"\"\n See :meth:`pyramid.interfaces.ITraversar.__call__`.\n \"\"\"\n # JAM: Unfortunately, the superclass implementation is entirely monolithic\n # and we so we cannot reuse any part of it. Instead,\n # we copy-and-paste it. Unless otherwise noted, comments below are\n # original.\n\n # JAM: Note the abundance of no covers. These are for features we are\n # not currently using and the code is lifted directly from pyramid.\n environ = request.environ\n\n if request.matchdict is not None:\n matchdict = request.matchdict\n\n path = matchdict.get('traverse', '/') or '/'\n if is_nonstr_iter(path):\n # this is a *traverse stararg (not a {traverse})\n # routing has already decoded these elements, so we just\n # need to join them\n path = '/'.join(path) or '/'\n\n subpath = matchdict.get('subpath', ())\n if not is_nonstr_iter(subpath): # pragma: no cover\n # this is not a *subpath stararg (just a {subpath})\n # routing has already decoded this string, so we just need\n # to split it\n subpath = split_path_info(subpath)\n\n else: # pragma: no cover\n # this request did not match a route\n subpath = ()\n try:\n # empty if mounted under a path in mod_wsgi, for example\n path = decode_path_info(environ['PATH_INFO'] or '/')\n except KeyError:\n path = '/'\n except UnicodeDecodeError as e:\n raise URLDecodeError(e.encoding, e.object, e.start, e.end,\n e.reason)\n\n if VH_ROOT_KEY in environ: # pragma: no cover\n # HTTP_X_VHM_ROOT\n vroot_path = decode_path_info(environ[VH_ROOT_KEY])\n vroot_tuple = split_path_info(vroot_path)\n # both will (must) be unicode or asciistr\n vpath = vroot_path + path\n vroot_idx = len(vroot_tuple) - 1\n else:\n vroot_tuple = ()\n vpath = path\n vroot_idx = -1\n\n root = self.root\n ob = vroot = root\n\n if vpath == '/': # invariant: vpath must not be empty\n # prevent a call to traversal_path if we know it's going\n # to return the empty tuple\n vpath_tuple = ()\n else:\n i = 0\n view_selector = self.VIEW_SELECTOR\n # A list so that remaining_path can be modified\n vpath_tuple = list(split_path_info(vpath))\n for segment in vpath_tuple:\n # JAM: Fire traversal events, mainly so sites get installed. See\n # zope.publisher.base.\n _notify_before_traverse_event(ob, request)\n # JAM: Notice that checking for '@@' is special cased, and\n # doesn't go through the normal namespace lookup as it would in\n # plain zope traversal. (XXX: Why not?)\n if segment.startswith(view_selector): # pragma: no cover\n return {'context': ob,\n 'view_name': segment[2:],\n 'subpath': vpath_tuple[i + 1:],\n 'traversed': vpath_tuple[:vroot_idx + i + 1],\n 'virtual_root': vroot,\n 'virtual_root_path': vroot_tuple,\n 'root': root}\n\n try:\n # JAM: This is where we differ. instead of using __getitem__,\n # we use the traversing machinery.\n # The zope app would use IPublishTraverser, which\n # would install security proxies along the way. We probably don't need to\n # do that? TODO:\n # NOTE: By passing the request here, we require all traversers\n # (including the namespace traversers) to be registered as multi-adapters.\n # None of the default namespaces are. See our\n # configure.zcml for what is.\n\n # JAM: Damn stupid implementation of traversePathElement ignores\n # the request argument to find a traversable /except/ when a namespace is found.\n # therefore, we explicitly query for the multi adapter ourself in the non-namespace case\n # (In the namespace case, we let traversing handle it, because it needs a named adapter\n # after parsing)\n traversable = None\n if segment and segment[0] not in '+@' \\\n and not ITraversable.providedBy(ob):\n try:\n # Use the installed component registry\n # instead of the request registry (which\n # is the global component registry if\n # pyramid was configured that way, or a\n # standalone registry) in case the act of\n # traversing has changed the site manager;\n # zope.site.site.threadSiteSubscriber will\n # do this for each BeforeTraverseEvent\n # that's fired (though that's not\n # registered by default).\n traversable = queryMultiAdapter((ob, request),\n ITraversable)\n except TypeError:\n # Some things are registered for \"*\" (DefaultTraversable)\n # which means they get called here. If they can't take\n # two arguments, then we bail. Sucks.\n pass\n\n remaining_path = vpath_tuple[i + 1:]\n next_ob = ztraversing.traversePathElement(ob,\n segment,\n remaining_path,\n traversable=traversable,\n request=request)\n if remaining_path != vpath_tuple[i + 1:]:\n # Is this if check necessary? It would be faster to\n # always assign\n vpath_tuple[i + 1:] = remaining_path\n except LocationError:\n # LocationError is a type of KeyError. The DefaultTraversable turns\n # plain KeyError and TypeErrors into LocationError.\n return {'context': ob,\n 'view_name': segment,\n 'subpath': vpath_tuple[i + 1:],\n 'traversed': vpath_tuple[:vroot_idx + i + 1],\n 'virtual_root': vroot,\n 'virtual_root_path': vroot_tuple,\n 'root': root}\n if i == vroot_idx: # pragma: no cover\n vroot = next_ob\n ob = next_ob\n i += 1\n\n # JAM: Also fire before traversal for the actual context item, since we\n # won't actually traverse into it. Be sure not to fire multiple times\n # for this (E.g., the root). This logic is complicated by the\n # multi-returns above.\n _notify_before_traverse_event(ob, request)\n\n return {'context': ob,\n 'view_name': empty,\n 'subpath': subpath,\n 'traversed': vpath_tuple,\n 'virtual_root': vroot,\n 'virtual_root_path': vroot_tuple,\n 'root': root}\n\n\n\nclass resource(_zresource):\n \"\"\"\n Handles resource lookup in a way compatible with :mod:`zope.browserresource`.\n This package registers resources as named adapters from :class:`.IDefaultBrowserLayer`\n to Interface. We connect the two by making the pyramid request implement\n the right thing.\n \"\"\"\n\n def __init__(self, context, request):\n request = IBrowserRequest(request)\n if not IDefaultBrowserLayer.providedBy(request):\n interface.alsoProvides(request, IDefaultBrowserLayer) # We lie\n super(resource, self).__init__(context, request)\n"
},
{
"alpha_fraction": 0.7007481455802917,
"alphanum_fraction": 0.7042394280433655,
"avg_line_length": 29.846153259277344,
"blob_id": "c8c9561a63256d01cc06df5bbe676f534ebd1bfe",
"content_id": "9e74e19f73bb5c18f7bbb80a3ba4be0709bae6ca",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2005,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 65,
"path": "/src/nti/app/pyramid_zope/i18n/tests/test_subscribers.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\n\nimport fudge\nfrom hamcrest import assert_that\nfrom hamcrest import has_property\nfrom hamcrest import is_not as does_not\n\nfrom nti.testing.matchers import provides\nfrom pyramid.events import ContextFound\nfrom pyramid.request import Request\nfrom zope import interface\nfrom zope.i18n.interfaces import IUserPreferredLanguages\n\nfrom ..interfaces import IPreferredLanguagesRequest\nfrom ..subscribers import adjust_request_interface_for_preferred_languages as _adjust\n\ndef adjust(request):\n _adjust(ContextFound(request))\n\n\nclass TestSubscribers(unittest.TestCase):\n\n request = None\n\n def setUp(self):\n self.request = Request.blank('/')\n\n def test_adjust_interface_blank(self):\n # Initially, nothing\n adjust(self.request)\n assert_that(self.request,\n does_not(provides(IPreferredLanguagesRequest)))\n\n def test_adjust_zope_cookie(self):\n self.request.cookies['I18N_LANGUAGE'] = 'ru'\n adjust(self.request)\n assert_that(self.request, provides(IPreferredLanguagesRequest))\n # It got copied to the request attribute too for benefit\n # of the default pyramid localizer\n assert_that(self.request, has_property('_LOCALE_', 'ru'))\n\n def test_adjust_pyramid_property(self):\n self.request._LOCALE_ = 'ru'\n adjust(self.request)\n assert_that(self.request, provides(IPreferredLanguagesRequest))\n\n @fudge.patch('nti.app.pyramid_zope.i18n.subscribers.IPrincipal')\n def test_adjust_remote_user(self, fake_get):\n\n @interface.implementer(IUserPreferredLanguages)\n class User(object):\n def getPreferredLanguages(self):\n return ['ru']\n\n fake_get.is_callable().returns(User())\n\n adjust(self.request)\n assert_that(self.request, provides(IPreferredLanguagesRequest))\n"
},
{
"alpha_fraction": 0.6881720423698425,
"alphanum_fraction": 0.6953405141830444,
"avg_line_length": 24.363636016845703,
"blob_id": "6d0bcb3f96233314b2901ddde2804956857b90fa",
"content_id": "b9f59c5d75475f00405e7c68d811b58c9345ee96",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 279,
"license_type": "permissive",
"max_line_length": 56,
"num_lines": 11,
"path": "/.coveragerc",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "[run]\nsource = nti.app.pyramid_zope\n# New in 5.0; required for the GHA coveralls submission.\n# Perhaps this obsoletes the source section in [paths]?\nrelative_files = True\n\n[report]\nexclude_lines =\n pragma: no cover\n raise NotImplementedError\n if __name__ == .__main__.:\n"
},
{
"alpha_fraction": 0.7148422002792358,
"alphanum_fraction": 0.7178288698196411,
"avg_line_length": 34.65277862548828,
"blob_id": "7777a76166fa76a4640100c91c091c63e3e0f3fa",
"content_id": "9c36b645a1fa748885281e659731528f74977439",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7701,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 216,
"path": "/src/nti/app/pyramid_zope/i18n/adapters.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nI18N related adapters.\n\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport pyramid.interfaces\n\nfrom pyramid.i18n import default_locale_negotiator\nfrom pyramid.interfaces import ILocaleNegotiator\nfrom pyramid.interfaces import ITranslationDirectories\n\nfrom zope import component\nfrom zope import interface\n\nfrom zope.cachedescriptors.property import Lazy\n\nfrom zope.i18n.interfaces import IModifiableUserPreferredLanguages\nfrom zope.i18n.interfaces import ITranslationDomain\nfrom zope.i18n.interfaces import IUserPreferredCharsets\nfrom zope.i18n.interfaces import IUserPreferredLanguages\nfrom zope.i18n.locales import LoadLocaleError\nfrom zope.i18n.locales import locales\n\nfrom zope.publisher.http import HTTPCharsets\nfrom zope.publisher.interfaces.browser import IBrowserRequest\n\nfrom zope.security.interfaces import IPrincipal\n\nfrom .interfaces import IPreferredLanguagesRequest\nfrom ..request import PyramidZopeRequestProxy\n\n__all__ = [\n 'EnglishUserPreferredLanguages',\n 'PreferredLanguagesPolicy',\n 'PyramidBrowserPreferredCharsets',\n 'PyramidBrowserPreferredLanguages',\n 'preferred_language_locale_negotiator',\n 'ZopeTranslationDirectories',\n]\n\n\[email protected](None)\[email protected](IUserPreferredLanguages)\ndef EnglishUserPreferredLanguages(unused_user):\n \"\"\"\n An implementation of :class:`.IUserPreferredLanguages` that always returns\n English.\n\n This is registered as the least-specific\n adapter for generic objects.\n \"\"\"\n return EnglishUserPreferredLanguagesImpl\n\n\[email protected](IUserPreferredLanguages)\nclass EnglishUserPreferredLanguagesImpl(object):\n PREFERRED_LANGUAGES = ('en',)\n\n @classmethod\n def getPreferredLanguages(cls):\n return cls.PREFERRED_LANGUAGES\n\n\[email protected](IUserPreferredLanguages)\[email protected](IPreferredLanguagesRequest)\nclass PreferredLanguagesPolicy(object):\n \"\"\"\n Implements the preferred languages policy as documented for this\n package: an explicit request parameter or cookie will be used\n first, followed by something set during traversal, followed by a\n non-default persistent user preference, followed by the value set\n from the HTTP headers.\n \"\"\"\n\n def __init__(self, request):\n self.request = request\n\n def getPreferredLanguages(self):\n # If the default locale negotiater can get a value,\n # that means we had a parameter or one of the cookies\n # (because of the subscriber that gets us here).\n\n negotiated = default_locale_negotiator(self.request)\n if negotiated:\n return [negotiated]\n\n # Here is where we would check for something during traversal,\n # but we don't actually support that at this time because it\n # relies on implementation details\n\n # Is there a non-default user preference? Right now we know\n # what a default is due to implementation details above. We also\n # know for sure that we *have* a remote use, otherwise we wouldn't\n # be here\n remote_user = IPrincipal(self.request, None)\n remote_user_langs = IUserPreferredLanguages(remote_user)\n if remote_user_langs is not EnglishUserPreferredLanguagesImpl:\n return remote_user_langs.getPreferredLanguages() # pylint:disable=too-many-function-args\n\n # Ok, see what the HTTP request can come up with. Note that we're\n # going to the Zope interface so that we don't get into an infinite\n # loop\n browser_request = IBrowserRequest(self.request)\n browser_langs = IModifiableUserPreferredLanguages(browser_request)\n return browser_langs.getPreferredLanguages() # pylint:disable=too-many-function-args\n\n\[email protected](IUserPreferredLanguages)\[email protected](pyramid.interfaces.IRequest)\ndef PyramidBrowserPreferredLanguages(request):\n # we implement IUserPreferredLanguages on the Pyramid object, but\n # return an IModifiableUserPreferredLanguages on the Zope object.\n # This prevents an infinite loop\n return IModifiableUserPreferredLanguages(PyramidZopeRequestProxy(request))\n\n\[email protected](IUserPreferredCharsets)\[email protected](pyramid.interfaces.IRequest)\ndef PyramidBrowserPreferredCharsets(request):\n # Unfortunately, the trick we use for UserPreferredLanguages\n # (through an interface) does not work here and so we have to tightly\n # couple to an implementation.\n return HTTPCharsets(PyramidZopeRequestProxy(request))\n\n\[email protected](ILocaleNegotiator)\ndef preferred_language_locale_negotiator(request):\n \"\"\"\n A pyramid locale negotiator that piggybacks off\n the preferred language support. We return a valid locale\n name consisting of at most language-territory, but at least language.\n A valid locale is one for which we have available locale data,\n not necessarily one for which any translation data is available.\n \"\"\"\n # pylint:disable=too-many-function-args, assignment-from-no-return\n\n # This code is similar to that in zope.publisher.http.HTTPRequest.\n # it's point is to find the most specific available locale possible.\n # We differ in that, instead of returning a generic default, we\n # specifically return the english default. We also differ in that we\n # return a locale name instead of a locale object.\n\n result = EnglishUserPreferredLanguagesImpl.PREFERRED_LANGUAGES[0]\n\n pref_langs = IUserPreferredLanguages(request, ())\n if pref_langs:\n pref_langs = pref_langs.getPreferredLanguages()\n\n for lang in pref_langs:\n parts = (lang.split('-') + [None, None])[:3]\n try:\n locales.getLocale(*parts)\n result = lang\n break\n except LoadLocaleError: # pragma: no cover\n continue\n\n return result\n\n\[email protected](ITranslationDirectories)\nclass ZopeTranslationDirectories(object):\n \"\"\"\n Implements the readable contract of Pyramid's translation directory\n list by querying for the zope translation domain objects. This way\n we don't have to repeat the configuration.\n\n .. note:: This queries just once, the first time it is used.\n\n .. note:: We lose the order or registrations, if that mattered.\n \"\"\"\n\n def __iter__(self):\n return iter(self._dirs)\n\n def __repr__(self): # pragma: no cover\n # TODO: Why is this repr this way? It makes broken test\n # output very confusing. There are no specific tests for it.\n return repr(list(self))\n\n @Lazy\n def _dirs(self):\n dirs = []\n domains = component.getAllUtilitiesRegisteredFor(ITranslationDomain)\n for domain in domains:\n for paths in domain.getCatalogsInfo().values():\n # The catalog info is a dictionary of language to [file]\n if len(paths) == 1 and paths[0].endswith('.mo'):\n path = paths[0]\n # strip off the file, go to the directory containing the\n # language directories\n path = os.path.sep.join(path.split(os.path.sep)[:-3])\n if path not in dirs:\n dirs.append(path)\n return dirs\n\n @classmethod\n def testing_cleanup(cls): # pragma: no cover\n for d in component.getAllUtilitiesRegisteredFor(ITranslationDirectories):\n if isinstance(d, ZopeTranslationDirectories):\n d.__dict__.pop('_dirs', None)\n\ntry:\n from zope.testing import cleanup\nexcept ImportError: # pragma: no cover\n pass\nelse:\n cleanup.addCleanUp(ZopeTranslationDirectories.testing_cleanup)\n"
},
{
"alpha_fraction": 0.652501106262207,
"alphanum_fraction": 0.6578131914138794,
"avg_line_length": 31.7391300201416,
"blob_id": "42e386b8499ec27808b102d9f2294315a145f59f",
"content_id": "63972bb13121477bd416502e913baf2c007b0dd9",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2259,
"license_type": "permissive",
"max_line_length": 84,
"num_lines": 69,
"path": "/src/nti/app/pyramid_zope/tests/test_traversal.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nTests for traversal.py.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\n\nimport unittest\n\nfrom zope import interface\nfrom zope.traversing import interfaces as trv_interfaces\n\nfrom pyramid.testing import DummyRequest\n\nfrom hamcrest import assert_that\nfrom hamcrest import has_entries\nfrom hamcrest import is_\n\nfrom . import ConfiguringLayer\nfrom .. import traversal\n\nclass TestTraversal(unittest.TestCase):\n\n def test_unicode_traversal(self):\n # UnicodeEncodeError is specially handled\n # by the traversing machinery and doesn't raise\n # an error. (This is in zope.traversing.)\n\n # On Python 2, this was triggered in the real world by\n # attempting to access a non-ASCII attribute on an object\n # (which isn't allowed); this happened in the real world:\n # getattr(self, u'\\u2019', None) # Raise unicode error\n # On Python 3, though, that's fine and is\n # allowed. The UnicodEncodeError constructor takes lots of\n # parameters, so rather than instantiate directly, we\n # trigger it indirectly by encoding --- as Python2 would do.\n\n @interface.implementer(trv_interfaces.ITraversable)\n class BrokenTraversable(object):\n raised = False\n def traverse(self, name, furtherPath): # pylint:disable=unused-argument\n BrokenTraversable.raised = True\n return u'\\u2019'.encode('ascii')\n\n @interface.implementer(trv_interfaces.ITraversable)\n class Root(object):\n def traverse(self, name, furtherPath): # pylint:disable=unused-argument\n return BrokenTraversable()\n\n req = DummyRequest(path='/a/b/c')\n req.matchdict = {'traverse': ('a', 'b', 'c')}\n result = traversal.ZopeResourceTreeTraverser(Root())(req)\n\n self.assertTrue(BrokenTraversable.raised)\n\n assert_that(result, has_entries(\n context=is_(BrokenTraversable),\n root=is_(Root),\n ))\n\nclass TestConfiguration(unittest.TestCase):\n\n layer = ConfiguringLayer\n\n def test_configures(self):\n \"\"\"\n Setting up the layer either works or fails.\n \"\"\"\n # TODO: More specific tests\n"
},
{
"alpha_fraction": 0.7636138796806335,
"alphanum_fraction": 0.7641088962554932,
"avg_line_length": 37.846153259277344,
"blob_id": "6489a493c3a10333866559ef07176e0917c082fc",
"content_id": "0005edfdf31c5042b88ecf6129bf2c07629534e7",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4040,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 104,
"path": "/src/nti/app/pyramid_zope/security.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nIntegrations for :mod:`zope.security` and :mod:`zope.authentication`.\n\nMany of these are adapters registered automatically by this package's\nconfigure.zcml.\n\nIn plain Zope3, the :class:`zope.publisher.interfaces.IRequest` *is*\nan :class:`zope.security.interfaces.IParticipation` for the request's\nprincipal (or the unauthenticated or fallback unauthenticated\nprincipal). That request is defined to be the first participation in\nthe interaction by :mod:`zope.app.publication.zopepublication` (right after\nauthentication and right before traversal).\n\nPyramid's request is not a participation, and Pyramid doesn't\nestablish an interaction either. Something else (typically a\ntween like **TODO: Copy Tween**) does that. These adapters will\nwork only after that is done.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom pyramid.threadlocal import get_current_request\nfrom pyramid.interfaces import IRequest\n\nfrom zope import component\nfrom zope import interface\n\nfrom zope.authentication.interfaces import IFallbackUnauthenticatedPrincipal\nfrom zope.security import management as default_interaction_mgmt\nfrom zope.security.interfaces import IInteractionManagement\nfrom zope.security.interfaces import IInteraction\nfrom zope.security.interfaces import IPrincipal\nfrom zope.security.interfaces import NoInteraction\n\[email protected](IRequest)\[email protected](IInteraction)\ndef interaction_from_request(request=None):\n \"\"\"\n interaction_from_request(request: IRequest) -> IInteraction\n\n Find the :class:`.IInteraction` for the *request*.\n\n The request is adapted to :class:`IInteractionManagement` (using\n the default :mod:`zope.security.management` for a thread-local\n interaction if there is no specific adapter registered), and\n the current interaction is returned.\n\n This is registered as an adapter on the Pyramid ``IRequest`` interface;\n to provide a more specific policy, register an adapter on the concrete\n class.\n\n :raise zope.security.interfaces.NoInteraction: If there is\n no interaction.\n\n .. seealso:: :class:`zope.security.interfaces.IInteractionManagement`\n \"\"\"\n request = get_current_request() if request is None else request\n interaction_mgmt = IInteractionManagement(request, default_interaction_mgmt)\n # If we return None here, we can use a default value for the interaction\n # or raise a TypeError with IInteraction(request, <default>); if we\n # raise NoInteraction it would be propagated unconditionally.\n return interaction_mgmt.getInteraction() # pylint:disable=too-many-function-args\n\n\[email protected](IInteraction)\[email protected](IPrincipal)\ndef principal_from_interaction(interaction):\n \"\"\"\n principal_from_interaction(interaction: IInteraction) -> IPrincipal\n\n Find the primary :class:`IPrincipal` for the *interaction*. The primary\n principal is the principal of the first participation.\n \"\"\"\n return next(iter(interaction.participations)).principal\n\n\[email protected](IRequest)\[email protected](IPrincipal)\ndef principal_from_request(request=None):\n \"\"\"\n principal_from_request(request: IRequest) -> IPrincipal\n\n Find the primary :class:`IPrincipal` for the *request*.\n\n First adapts the request into an :class:`IInteraction` (probably\n using :func:`interaction_from_request`), and then adapts the\n interaction into an ``IPrincipal`` (probably using :func:`principal_from_interaction`).\n If there is no interaction, the unauthenticated principal is returned.\n\n This is registered as an adapter on the Pyramid ``IRequest`` interface;\n to provide a more specific policy, register an adapter on the concrete\n class.\n\n \"\"\"\n try:\n interaction = IInteraction(\n request if request is not None else get_current_request(),\n )\n except NoInteraction:\n return component.getUtility(IFallbackUnauthenticatedPrincipal)\n\n return IPrincipal(interaction)\n"
},
{
"alpha_fraction": 0.7389575839042664,
"alphanum_fraction": 0.7438162565231323,
"avg_line_length": 34.375,
"blob_id": "aeef46ec81c37c89f6d02101278bce74a3e94a40",
"content_id": "2e248d65db7219dbb8b2ef29d13cd9e02cc0c395",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2264,
"license_type": "permissive",
"max_line_length": 115,
"num_lines": 64,
"path": "/src/nti/app/pyramid_zope/i18n/subscribers.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nI18N related subscribers.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom pyramid.i18n import default_locale_negotiator\nfrom pyramid.interfaces import IContextFound\n\nfrom zope import component\nfrom zope import interface\n\nfrom zope.i18n.interfaces import IUserPreferredLanguages\nfrom zope.security.interfaces import IPrincipal\nfrom zope.authentication.interfaces import IUnauthenticatedPrincipal\n\nfrom .interfaces import IPreferredLanguagesRequest\n\n__docformat__ = \"restructuredtext en\"\n\n__all__ = [\n 'adjust_request_interface_for_preferred_languages',\n]\n\n\n\n\[email protected](IContextFound)\ndef adjust_request_interface_for_preferred_languages(event):\n \"\"\"\n Checks the conditions outlined in this package's documentation and\n adds a marker interface (:class:`.IPreferredLanguagesRequest`) to\n the request if they hold true.\n\n This is registered as a subscriber for Pyramid's\n :class:`.IContextFound` event by this package's ``configure.zcml``\n \"\"\"\n request = event.request\n # Does pyramid's default negotiator, which uses explicit settings\n # like a request param or cookie have an answer? If so, we need\n # our custom policy...these override the Accept-Language header\n if default_locale_negotiator(request):\n interface.alsoProvides(request, IPreferredLanguagesRequest)\n return\n\n # What about the zope/plone cookie?\n if request.cookies.get('I18N_LANGUAGE'):\n # For benefit of the default localization machinery\n # in case it's used, copy\n request._LOCALE_ = request.cookies.get('I18N_LANGUAGE')\n interface.alsoProvides(request, IPreferredLanguagesRequest)\n return\n\n # Ok, is there an authenticated user with preferred languages?\n # (We leave detecting defaults up to the actual policy)\n remote_user = IPrincipal(request, None)\n if remote_user and not IUnauthenticatedPrincipal.providedBy(remote_user):\n remote_user_langs = IUserPreferredLanguages(remote_user)\n if remote_user_langs and remote_user_langs.getPreferredLanguages(): # pylint:disable=too-many-function-args\n interface.alsoProvides(request, IPreferredLanguagesRequest)\n"
},
{
"alpha_fraction": 0.6273504495620728,
"alphanum_fraction": 0.6299145221710205,
"avg_line_length": 32.42856979370117,
"blob_id": "d48850512e89244b0de20b614c20761ccff44ff6",
"content_id": "1f95944346c2f9905f6ec028249524c03f8dde74",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10530,
"license_type": "permissive",
"max_line_length": 125,
"num_lines": 315,
"path": "/src/nti/app/pyramid_zope/request.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nPartial support for making a Pyramid request/response object pair work more\nlike a Zope request.\n\nPartially based on ideas from :mod:`pyramid_zope_request`\n\"\"\"\n\nfrom __future__ import print_function, absolute_import, division\n__docformat__ = \"restructuredtext en\"\n\nfrom six.moves.urllib_parse import urlparse\nfrom six.moves.urllib_parse import urlunparse\nfrom six.moves.urllib_parse import urljoin\n\nfrom zope import component\nfrom zope import interface\n\nfrom zope.authentication.interfaces import IUnauthenticatedPrincipal\n\nfrom zope.cachedescriptors.property import Lazy\n\nfrom zope.i18n.locales import locales\n\nfrom zope.proxy import non_overridable\nfrom zope.proxy import getProxiedObject\n\nfrom zope.proxy.decorator import SpecificationDecoratorBase\n\nfrom zope.publisher.base import RequestDataProperty\n\nfrom zope.publisher.http import URLGetter\n\nimport zope.publisher.interfaces.browser\n\nfrom zope.security.interfaces import NoInteraction\nfrom zope.security.management import getInteraction\n\nfrom six import text_type\nfrom pyramid.interfaces import IRequest\nfrom pyramid.i18n import get_locale_name\n\nfrom nti.property.property import alias\n\n# Implement the request\n# and the \"skin\". In zope, the skin is changeable (IBrowserRequest\n# implements ISkinnable), especially\n# through the ++skin++ namespace adapter. Here\n# we're just declaring it (as it happens, IDefaultBrowserLayer\n# is a sub-type of IBrowserRequest)\n\n\[email protected](IRequest)\[email protected](zope.publisher.interfaces.browser.IBrowserRequest,\n zope.publisher.interfaces.browser.IDefaultBrowserLayer)\nclass PyramidZopeRequestProxy(SpecificationDecoratorBase):\n \"\"\"\n Makes a Pyramid IRequest object look like a Zope request\n for purposes of rendering. The existing interfaces (IRequest) are preserved.\n\n Changes to a proxy, including annotations, are persistent, and\n will be reflected if the same pyramid request is proxied again\n later (unlike :mod:`pyramid_zope_request`, which takes the approach of\n subclassing :class:`zope.publisher.base.BaseRequest` and overriding\n certain methods to call through to pyramid, but not things\n like annotations.)\n\n .. note:: Most of this behaviour is added from reverse-engineering what\n existing zope code, most notably :mod:`z3c.table.table` uses.\n Some additional support for :mod:`z3c.form` comes from\n looking at what :mod:`pyramid_zope_request` does.\n \"\"\"\n\n def __init__(self, base):\n super(PyramidZopeRequestProxy, self).__init__(base)\n if getattr(base, 'registry', None) is None:\n base.registry = component.getSiteManager()\n\n base.response.getHeader = lambda k: base.response.headers[k]\n\n def setHeader(name, value, literal=False):\n __traceback_info__ = name, value, literal\n # Go to bytes for python 2 if incoming was a string\n name = str(name)\n value = str(value) if isinstance(value, text_type) else value\n if name.lower() == 'content-type':\n # work around that webob stores the charset\n # in the header ``Content-type``, zope kills the charset\n # by setting e.g. ``text/html`` without charset\n charset = base.response.charset\n base.response.headers[name] = value\n # restore the old charset\n base.response.charset = charset\n else:\n base.response.headers[name] = value\n\n def redirect(location, status=302, trusted=False):\n \"\"\"\n TODO support for the trusted arg\n \"\"\"\n base.response.status_code=status\n location = urljoin(base.url, location)\n base.response.setHeader('Location', location)\n base.response.redirect = redirect\n \n base.response.setHeader = setHeader\n base.response.addHeader = setHeader\n\n base.response.getStatus = lambda: base.response.status_code\n base.response.setStatus = lambda status_code: setattr(base.response,\n 'status_code',\n status_code)\n\n @Lazy\n def form(self):\n \"\"\"\n Process inputs into the form object.\n\n See also: https://github.com/zopefoundation/pyramid_zope_request/blob/master/src/pyramid_zope_request/__init__.py#L78\n \"\"\"\n # BrowserRequest processes inputs HEAVILY\n # we'll process only :list because that's only what we use nowadays\n # and the code in BrowserRequest isn't really reusable\n params = self.params\n rv = {}\n for k in params.keys():\n v = params.getall(k)\n if k.endswith(':list'):\n name = k[:-5]\n else:\n v = v[0]\n name = k\n rv[name] = v\n\n return rv\n\n @non_overridable\n def get(self, key, default=None):\n \"\"\"\n Returns GET and POST params. Multiple values are returned as lists.\n\n Pyramid's IRequest has a deprecated method that exposes\n the WSGI environ, making the request dict-like for the environ.\n Hence the need to mark this method non_overridable.\n \"\"\"\n # Zope does this by actually processing the inputs\n # into a \"form\" object\n\n def _d_o_l(o):\n # DummyRequest GET/POST are different\n return o.dict_of_lists() if hasattr(o, 'dict_of_lists') else o.copy()\n dict_of_lists = _d_o_l(self.GET)\n dict_of_lists.update(_d_o_l(self.POST))\n val = dict_of_lists.get(key)\n if val:\n if len(val) == 1:\n val = val[0] # de-list things that only appeared once\n else:\n # Ok, in the environment?\n val = self.environ.get(key, default)\n return val\n\n def items(self):\n result = {}\n result.update(self.environ)\n result.update(self.GET)\n result.update(self.POST)\n return result.items()\n\n def keys(self):\n return [k for k, _ in self.items()]\n\n def has_key(self, k):\n return k in self\n\n def values(self):\n return [v for _, v in self.items()]\n\n def __iter__(self):\n return iter(self.keys())\n\n def __len__(self):\n return len(self.items())\n\n def __contains__(self, key):\n return key in self.keys()\n\n def __getitem__(self, key):\n result = self.get(key, self)\n if result is self:\n raise KeyError(key)\n return result\n\n def getHeader(self, name, default=None):\n return self.headers.get(name, default)\n\n def _traverse_request_path(self, level, path_only):\n parsed = list(urlparse(self.path_url))\n if level:\n parts = [x for x in parsed[2].split('/') if x]\n if abs(level) > len(parts):\n raise IndexError(abs(level))\n parts = parts[:level]\n parsed[2] = '/'+'/'.join(parts) if parts else ''\n\n return parsed[2] if path_only else urlunparse(parsed)\n\n def getURL(self, level=0, path_only=False):\n \"\"\"\n zope.publisher uses traversed names here\n instead of working on the url of the request.\n This implementation works off the request, which\n will potentially yield different results. What's this gonna break?\n \"\"\"\n if level == 0 and path_only:\n return self.path_url\n\n return self._traverse_request_path(-level, path_only)\n\n def getApplicationURL(self, depth=0, path_only=False):\n \"\"\"\n Like getURL, zope.publisher uses traversed names here\n instead of working on the url of the request.\n This implementation works off the request, which\n will potentially yield different results. What's this gonna break?\n \"\"\"\n if depth == 0 and not path_only:\n return self.application_url\n \n return self._traverse_request_path(depth, path_only)\n\n URL = RequestDataProperty(URLGetter)\n\n @property\n def locale(self):\n try:\n # Country is optional\n lang_country = get_locale_name(self).split('-')\n except AttributeError: # Testing, registry has no settings\n lang_country = ('en', 'US')\n return locales.getLocale(*lang_country)\n\n @property\n def annotations(self):\n return getProxiedObject(self).__dict__.setdefault('annotations', {})\n\n def _get__annotations__(self):\n return getProxiedObject(self).__dict__.get('__annotations__')\n\n def _set__annotations__(self, val):\n getProxiedObject(self).__dict__['__annotations__'] = val\n __annotations__ = property( # pylint:disable=bad-option-value,property-on-old-class\n # On python 2, pylint thinks this is an old-style\n # class, for some reason, and complains here. But not about\n # the call to super() in the constructor.\n # Python 3, of course has no old-style classes so it doesn't have that\n # warning\n _get__annotations__,\n _set__annotations__\n )\n\n\n environment = alias('environ')\n\n @property\n def bodyStream(self):\n return self.body_file_seekable\n\n def _unimplemented(self, *args, **kwargs):\n raise NotImplementedError()\n\n @property\n def _unimplemented_prop(self):\n return NotImplemented\n\n def getVirtualHostRoot(self):\n return None\n\n def getPositionalArguments(self):\n return tuple()\n\n setPathSuffix = _unimplemented\n getTraversalStack = _unimplemented\n setTraversalStack = _unimplemented\n processInputs = _unimplemented\n publication = _unimplemented_prop\n setPublication = _unimplemented\n retry = _unimplemented\n hold = _unimplemented\n setupLocale = _unimplemented\n traverse = _unimplemented\n close = _unimplemented\n debug = False\n\n def supportsRetry(self):\n return False\n\n # This is supposed to be an IParticipation;\n # we could almost do that\n setPrincipal = _unimplemented\n\n @property\n def principal(self):\n try:\n return getInteraction().participations[0].principal\n except (NoInteraction, IndexError, AttributeError):\n return component.queryUtility(IUnauthenticatedPrincipal)\n\n @property\n def interaction(self):\n try:\n return getInteraction()\n except NoInteraction:\n return None\n"
},
{
"alpha_fraction": 0.6098400354385376,
"alphanum_fraction": 0.6163191199302673,
"avg_line_length": 30.86451530456543,
"blob_id": "a8c6fc2a2d559cf8d66780e2478570f259f425c4",
"content_id": "c43cbda3ee991db06709c893025880db7f168eed",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4939,
"license_type": "permissive",
"max_line_length": 93,
"num_lines": 155,
"path": "/src/nti/app/pyramid_zope/i18n/tests/test_adapters.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\n\nimport fudge\nfrom hamcrest import assert_that\nfrom hamcrest import is_\nfrom hamcrest import is_not as does_not\n\nfrom nti.testing.matchers import is_empty\nfrom nti.testing.matchers import provides\n\nfrom pyramid.events import ContextFound\nfrom pyramid.interfaces import ITranslationDirectories\nfrom pyramid.request import Request\n\nfrom zope import component\nfrom zope import interface\nfrom zope.event import notify\nfrom zope.i18n.interfaces import IUserPreferredLanguages\n\nfrom ..adapters import preferred_language_locale_negotiator\nfrom ..interfaces import IPreferredLanguagesRequest\n\nfrom ...tests import ConfiguringLayer\n\ndef adjust(request):\n notify(ContextFound(request))\n\n\nclass TestApplicationRequestPolicy(unittest.TestCase):\n # pylint:disable=too-many-function-args\n\n layer = ConfiguringLayer\n\n request = None\n\n def setUp(self):\n self.request = Request.blank('/')\n\n def _langs(self):\n langs = IUserPreferredLanguages(self.request)\n return langs.getPreferredLanguages()\n\n def _locale(self):\n return preferred_language_locale_negotiator(self.request)\n\n def test_adjust_interface_blank(self):\n # Initially, nothing\n adjust(self.request)\n assert_that(self.request, does_not(\n provides(IPreferredLanguagesRequest)))\n assert_that(self._langs(), is_empty())\n assert_that(self._locale(), is_('en'))\n\n def test_adjust_zope_cookie(self):\n self.request.cookies['I18N_LANGUAGE'] = 'ru'\n adjust(self.request)\n assert_that(self._langs(), is_(['ru']))\n assert_that(self._locale(), is_('ru'))\n\n def test_adjust_pyramid_property(self):\n self.request._LOCALE_ = 'ru'\n adjust(self.request)\n assert_that(self._langs(), is_(['ru']))\n assert_that(self._locale(), is_('ru'))\n\n @fudge.patch('nti.app.pyramid_zope.i18n.subscribers.IPrincipal',\n 'nti.app.pyramid_zope.i18n.adapters.IPrincipal')\n def test_adjust_remote_user(self, fake_get1, fake_get2):\n @interface.implementer(IUserPreferredLanguages)\n class User(object):\n def getPreferredLanguages(self):\n return ['ru']\n\n fake_get1.is_callable().returns(User())\n fake_get2.is_callable().returns(User())\n\n adjust(self.request)\n assert_that(self._langs(), is_(['ru']))\n assert_that(self._locale(), is_('ru'))\n\n @fudge.patch('nti.app.pyramid_zope.i18n.subscribers.IPrincipal',\n 'nti.app.pyramid_zope.i18n.adapters.IPrincipal')\n def test_adjust_remote_user_default_en(self, fake_get1, fake_get2):\n #@interface.implementer(IUser)\n class User(object):\n pass\n\n fake_get1.is_callable().returns(User())\n fake_get2.is_callable().returns(User())\n\n adjust(self.request)\n # The default, because there's no header and nothing\n # specified for this user, is empty (this would trigger\n # the translation domain fallback)\n assert_that(self._langs(), is_([]))\n assert_that(self._locale(), is_('en'))\n\n @fudge.patch('nti.app.pyramid_zope.i18n.subscribers.IPrincipal',\n 'nti.app.pyramid_zope.i18n.adapters.IPrincipal')\n def test_adjust_remote_user_default_ru(self, fake_get1, fake_get2):\n #@interface.implementer(IUser)\n class User(object):\n pass\n\n fake_get1.is_callable().returns(User())\n fake_get2.is_callable().returns(User())\n\n self.request.environ['HTTP_ACCEPT_LANGUAGE'] = 'ru'\n adjust(self.request)\n\n # The accept header rules\n assert_that(self._langs(), is_(['ru']))\n assert_that(self._locale(), is_('ru'))\n\n\n\nclass TestApplicationTranslationDirs(unittest.TestCase):\n layer = ConfiguringLayer\n\n @fudge.patch('nti.app.pyramid_zope.i18n.adapters.component.getAllUtilitiesRegisteredFor')\n def test_translation_dirs(self, get_all):\n class Domain(object):\n def __iter__(self):\n return iter([CatInfo()])\n\n class CatInfo(object):\n\n def getCatalogsInfo(self):\n return {\n # These tests only work on POSIX.\n 'en': ['/nti/appserver/locales/en/LC_MESSAGES/z3c.password.mo'],\n # Entries with more than one are ignored\n 'ru': [\n 'abc',\n 'def',\n ],\n # Entries that don't end in .mo are ignored\n 'es': [\n 'foo.pot'\n ],\n }\n\n get_all.is_callable().returns(Domain())\n dirs = component.getUtility(ITranslationDirectories)\n\n self.assertEqual(\n list(dirs),\n ['/nti/appserver/locales'])\n"
},
{
"alpha_fraction": 0.6337644457817078,
"alphanum_fraction": 0.6409121751785278,
"avg_line_length": 34.82926940917969,
"blob_id": "fae0975337b52485594435f01dfea0b70efe0d4b",
"content_id": "3dcfed3f4f0b0751b74b38b9e3274b4ef762f966",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2938,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 82,
"path": "/src/nti/app/pyramid_zope/tests/test_request.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, absolute_import, division\n__docformat__ = \"restructuredtext en\"\n\n# disable: accessing protected members, too many methods\n# pylint: disable=W0212,R0904\n\nfrom hamcrest import assert_that\nfrom hamcrest import calling\nfrom hamcrest import is_\nfrom hamcrest import raises\n\nfrom nti.testing.matchers import verifiably_provides\n\nfrom zope.publisher.interfaces.browser import IBrowserRequest\n\nfrom pyramid.request import Request\nfrom pyramid.interfaces import IRequest\n\nfrom nti.testing.base import SharedConfiguringTestBase\n\n\nclass TestRequest(SharedConfiguringTestBase):\n\n set_up_packages = (__name__,)\n\n def test_adapts(self):\n request = Request.blank('/')\n zrequest = IBrowserRequest(request)\n assert_that(zrequest, verifiably_provides(IBrowserRequest))\n # and it's still a valid pyramid request\n assert_that(zrequest, verifiably_provides(IRequest))\n\n def test_form_parsing(self):\n environ = {\n 'PATH_INFO': '/',\n 'QUERY_STRING':\n 'lastName=Doe;country:list=Japan;country:list=Hungary',\n }\n request = Request(environ)\n zrequest = IBrowserRequest(request)\n assert_that(zrequest.form,\n {'country': ['Japan', 'Hungary'], 'lastName': 'Doe'})\n\n def test_has_key(self):\n environ = {\n 'PATH_INFO': '/',\n 'QUERY_STRING':\n 'lastName=Doe;country:list=Japan;country:list=Hungary',\n }\n request = Request(environ)\n zrequest = IBrowserRequest(request)\n\n assert_that(zrequest.has_key('lastName'), is_(True))\n\n def test_url_traversal(self):\n request = Request.blank('http://foobar.com/folder/item')\n zrequest = IBrowserRequest(request)\n\n assert_that(str(zrequest.URL), is_('http://foobar.com/folder/item'))\n\n assert_that(zrequest.URL['-1'], is_('http://foobar.com/folder'))\n assert_that(zrequest.URL['-2'], is_('http://foobar.com'))\n assert_that(calling(zrequest.URL.__getitem__).with_args('-3'), raises(KeyError))\n\n assert_that(zrequest.URL['0'], is_('http://foobar.com'))\n assert_that(zrequest.URL['1'], is_('http://foobar.com/folder'))\n assert_that(zrequest.URL['2'], is_('http://foobar.com/folder/item'))\n assert_that(calling(zrequest.URL.__getitem__).with_args('3'), raises(KeyError))\n\n assert_that(zrequest.URL.get('0'), is_('http://foobar.com'))\n assert_that(zrequest.URL.get('1'), is_('http://foobar.com/folder'))\n assert_that(zrequest.URL.get('2'), is_('http://foobar.com/folder/item'))\n assert_that(zrequest.URL.get('3', 'none'), is_('none'))\n\n def test_positional_args(self):\n request = Request.blank('/dataserver2/foo/bar.html')\n zrequest = IBrowserRequest(request)\n\n assert_that(zrequest.getPositionalArguments(), is_(()))\n"
},
{
"alpha_fraction": 0.4837310314178467,
"alphanum_fraction": 0.509761393070221,
"avg_line_length": 20.952381134033203,
"blob_id": "45688feec1caa98dae82c3f743193b665132b98b",
"content_id": "d125b15d2b187d4c1ab4a1c9c6fb0f15e7fa1f44",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 461,
"license_type": "permissive",
"max_line_length": 53,
"num_lines": 21,
"path": "/docs/i18n.rst",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "======================\n Internationalization\n======================\n\n\n\nnti.app.pyramid_zope.i18n\n=========================\n.. automodule:: nti.app.pyramid_zope.i18n\n :members:\n\nnti.app.pyramid_zope.i18n.adapters\n==================================\n.. automodule:: nti.app.pyramid_zope.i18n.adapters\n :members:\n\n\nnti.app.pyramid_zope.i18n.subscribers\n=====================================\n.. automodule:: nti.app.pyramid_zope.i18n.subscribers\n :members:\n"
},
{
"alpha_fraction": 0.7309582233428955,
"alphanum_fraction": 0.7334152460098267,
"avg_line_length": 46.882354736328125,
"blob_id": "5200a92303b92b6b63a65e48945580e9b15914f0",
"content_id": "a815d6d7df872c936786bae1e039e476d180da30",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 814,
"license_type": "permissive",
"max_line_length": 101,
"num_lines": 17,
"path": "/README.rst",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "======================\n nti.app.pyramid_zope\n======================\n\n.. image:: https://github.com/NextThought/nti.app.pyramid_zope/workflows/tests/badge.svg\n :target: https://github.com/NextThought/nti.app.pyramid_zope/actions?query=workflow%3Atests\n\n.. image:: https://coveralls.io/repos/github/NextThought/nti.app.pyramid_zope/badge.svg?branch=master\n :target: https://coveralls.io/github/NextThought/nti.app.pyramid_zope?branch=master\n\n.. image:: https://readthedocs.org/projects/ntiapppyramid-zope/badge/?version=latest\n :target: https://ntiapppyramid-zope.readthedocs.io/en/latest/?badge=latest\n :alt: Documentation Status\n\nThis library provides a set of components (often in the\n``zope.interface`` and ``zope.component`` meanings) to integrate Zope3\ntechnologies into a modern Pyramid application.\n"
},
{
"alpha_fraction": 0.6774716377258301,
"alphanum_fraction": 0.7017828226089478,
"avg_line_length": 18.90322494506836,
"blob_id": "c694f273bd45205b1560b48ed10c7f0c5c6eeb58",
"content_id": "2db11b247c0f8de98e6115b5371b6695d60ae243",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 617,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 31,
"path": "/tox.ini",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "[tox]\nenvlist =\n py27,py36,py37,py38,py39,pypy,pypy3,coverage,docs\n\nsetenv =\n CHAMELEON_CACHE={envbindir}\n\n[testenv]\nusedevelop = true\nextras = test\ncommands =\n python -m zope.testrunner --test-path=src\n\n[testenv:coverage]\nbasepython =\n python3\ncommands =\n coverage run -p -m zope.testrunner --test-path=src\n coverage combine\n coverage report --fail-under=75\ndeps =\n coverage\n\n[testenv:docs]\nbasepython =\n python3\ncommands =\n sphinx-build -b html -d docs/_build/doctrees docs docs/_build/html\n sphinx-build -b doctest -d docs/_build/doctrees docs docs/_build/doctest\ndeps =\n .[docs]\n"
},
{
"alpha_fraction": 0.5340579748153687,
"alphanum_fraction": 0.5492753386497498,
"avg_line_length": 26.600000381469727,
"blob_id": "73793399176594134dfa165f56b7cb2e81688722",
"content_id": "c5fa320f05d73ce182e977c9b0a0485a1c824754",
"detected_licenses": [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2760,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 100,
"path": "/setup.py",
"repo_name": "NextThought/nti.app.pyramid_zope",
"src_encoding": "UTF-8",
"text": "import codecs\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nentry_points = {\n 'console_scripts': [\n 'nti_zpt_render = nti.app.pyramid_zope.z3c_zpt:main',\n ],\n \"z3c.autoinclude.plugin\": [\n 'target = nti.app',\n ],\n}\n\nTESTS_REQUIRE = [\n 'coverage',\n 'fudge',\n 'nti.testing',\n 'zope.testrunner',\n]\n\n\ndef _read(fname):\n with codecs.open(fname, encoding='utf-8') as f:\n return f.read()\n\n\nsetup(\n name='nti.app.pyramid_zope',\n version=\"0.0.4.dev0\",\n author='Jason Madden',\n author_email='[email protected]',\n description=\"Support for a more Zope-like pyramid.\",\n long_description=(_read('README.rst') + '\\n\\n' + _read(\"CHANGES.rst\")),\n license='Apache',\n keywords='pyramid zope',\n classifiers=[\n 'Framework :: Pyramid',\n 'Framework :: Zope :: 3',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n url=\"https://github.com/NextThought/nti.app.pyramid_zope\",\n zip_safe=True,\n packages=find_packages('src'),\n package_dir={'': 'src'},\n include_package_data=True,\n namespace_packages=['nti', 'nti.app'],\n tests_require=TESTS_REQUIRE,\n install_requires=[\n 'Chameleon',\n 'PyYAML',\n 'nti.property',\n 'nti.traversal',\n 'pyramid < 2.0',\n 'pyramid-chameleon',\n 'setuptools',\n 'simplejson',\n 'six',\n 'z3c.pt',\n 'z3c.ptcompat',\n 'z3c.template',\n 'zope.authentication',\n 'zope.browserpage',\n 'zope.cachedescriptors',\n 'zope.component',\n 'zope.configuration',\n 'zope.dottedname',\n 'zope.i18n',\n 'zope.interface',\n 'zope.pagetemplate',\n 'zope.principalregistry',\n 'zope.proxy',\n 'zope.publisher',\n 'zope.security',\n 'zope.traversing',\n 'zope.viewlet',\n ],\n extras_require={\n 'test': TESTS_REQUIRE,\n 'docs': [\n 'Sphinx',\n 'repoze.sphinx.autointerface',\n 'sphinx_rtd_theme',\n ] + TESTS_REQUIRE,\n },\n entry_points=entry_points,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5\",\n)\n"
}
] | 22 |
sarlindo-zz/rancher | https://github.com/sarlindo-zz/rancher | 132cba820b6b8f5d1ceb38ea3a7e54f3d339e81e | cf23f8cf481a33979297e1f3558984050133818e | 43eff4be3f234da2b2d89326d105cb5e28645c94 | refs/heads/master | 2021-05-30T13:26:08.040710 | 2016-01-19T16:36:37 | 2016-01-19T16:36:37 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.26459142565727234,
"alphanum_fraction": 0.6322957277297974,
"avg_line_length": 45.6363639831543,
"blob_id": "564e471019e82427704e1dc91e5d7201b2a91527",
"content_id": "b47c2c75357d2e643d086a20cea1fa88c24a1ece",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 514,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 11,
"path": "/tools/runagent.sh",
"repo_name": "sarlindo-zz/rancher",
"src_encoding": "UTF-8",
"text": "#sudo docker run -d -p 8400:8400 -p 8500:8500 -p 8600:53/udp --name node2 -h node2 progrium/consul -join 172.17.0.2\nsudo docker run -d --name consul -h $HOSTNAME -v /mnt:/data \\\n -p 192.168.33.11:8300:8300 \\\n -p 192.168.33.11:8301:8301 \\\n -p 192.168.33.11:8301:8301/udp \\\n -p 192.168.33.11:8302:8302 \\\n -p 192.168.33.11:8302:8302/udp \\\n -p 192.168.33.11:8400:8400 \\\n -p 192.168.33.11:8500:8500 \\\n -p 172.17.0.1:153:53/udp \\\n progrium/consul -advertise 192.168.33.11 -join 192.168.33.10\n\n"
},
{
"alpha_fraction": 0.4876273572444916,
"alphanum_fraction": 0.491994172334671,
"avg_line_length": 39.629032135009766,
"blob_id": "c03a54b90a899552e6703236b33f4e123a68b7fe",
"content_id": "aa0d39d0390538eb172e007e680e6f171d15522f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7557,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 186,
"path": "/tools/find_service.py",
"repo_name": "sarlindo-zz/rancher",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport os\nimport sys\nimport getopt\nimport requests, json\nfrom requests.auth import HTTPBasicAuth\nfrom time import sleep\nimport random\n\n# log errors\ndef log(msg):\n sys.stderr.write(\"%s\\n\" % msg)\n\n# usage help message\ndef usage(msg = None):\n\n log(\"Usage: find_ports [--url=<RANCHER_URL> --key=<RANCHER_ACCESS_KEY> --secret=<RANCHER_SECRET_KEY>]\\n\"\n \" --stack=<STACK> --service=<SERVICE> [--port=<PORT>] [--one] [--uri=<URI>] [--proto=<proto>]\\n\"\n \"Finds public ports for a specific specific service in rancher\\n\"\n \"Credentials can be supplied as args or defined as environment vars\\n\"\n \"If port is specified then only that *private* port will be searched for, if not then you must have\\n\"\n \"only one exposed port. If this script finds more than one it will return an error. THis is built\\n\"\n \"in in order to handle random external ports when Rancher get round to it.\\n\"\n \"This script returns a list of the public ips and ports <ip1>:<port1> <ip2>:<port2> ...\\n\"\n \"Unless --one is specified in which case it returns one random instance\\n\"\n \"Giving a uri which could be as simple as / will return an http:// url. THis obviously also implies --one\\n\"\n \"URL protocol is normally http but can be specified if you need https, ssh etc\\n\"\n \"%s\" % msg or \"\")\n sys.exit(1)\n\n# get credentials form args or env\ndef getArgs():\n args = {}\n args['rancher_url'] = os.environ.get(\"RANCHER_URL\",None)\n args['rancher_key'] = os.environ.get(\"RANCHER_ACCESS_KEY\",None)\n args['rancher_secret'] = os.environ.get(\"RANCHER_SECRET_KEY\",None)\n args['port'] = 0\n args['one'] = False\n args['uri'] = False\n args['proto'] = 'http'\n\n try:\n opts, argv = getopt.getopt(sys.argv[1:],\"hu:k:s:\",\n [\"help\",\"url=\",\"key=\",\"secret=\",\"stack=\",\"service=\",\"port=\",\"one\",\"uri=\",\"proto=\"])\n for o,a in opts:\n if o in (\"-h\",\"--help\"):\n usage()\n sys.exit(1)\n elif o in (\"-u\",\"--url\"):\n args['rancher_url'] = a\n elif o in (\"-k\",\"--key\"):\n args['rancher_key'] = a\n elif o in (\"-s\",\"--secret\"):\n args['rancher_secret'] = a\n elif o in (\"--stack\"):\n args['stack'] = a\n elif o in ('--service'):\n args['service'] = a\n elif o in (\"--port\"):\n args['port'] = int(a)\n elif o in (\"--one\"):\n args['one'] = True\n elif o in (\"--uri\"):\n args['one'] = True\n args['uri'] = a\n elif o in (\"--proto\"):\n args['proto'] = a\n\n except getopt.GetoptError,e:\n usage(e)\n\n if args.get('rancher_url',None) == None:\n usage(\"Rancher URL not specified\")\n sys.exit(1)\n if args.get('rancher_key',None) == None:\n usage(\"Rancher key not specified\")\n sys.exit(1)\n if args.get('rancher_secret',None) == None:\n usage(\"Rancher secret not specified\")\n sys.exit(1)\n if args.get('stack',None) == None:\n usage(\"Stack not specified\")\n sys.exit(1)\n if args.get('service',None) == None:\n usage(\"Service not specified\")\n sys.exit(1)\n\n args['rancher_protocol'], args['rancher_host'] = args['rancher_url'].split(\"://\")\n\n return args\n\n# make a rancher url\ndef rancherUrl(args,uri):\n\n url = \"%s://%s%s\" % (args['rancher_protocol'],args['rancher_host'],uri)\n\n return url\n\n# general purpose rancher call\ndef rancherCall(args,url):\n response = requests.get(url,auth=HTTPBasicAuth(args['rancher_key'],args['rancher_secret']))\n\n if response.status_code == 200:\n data = json.loads(response.text)\n else:\n log(\"Could not retrieve link %s - staus %d\" % (url, response.status_code))\n data = {}\n\n return response.status_code, data\n\n# follow a link\ndef rancherLink(args,data,link):\n\n url = data['links'][link]\n status, data = rancherCall(args,url)\n\n return status,data\n\n# find an item in data by name\ndef findByName(data,name):\n for item in data['data']:\n if item['name'] == name:\n return item\n return None\n\n# traverse a path from a given rancher item\ndef rancherTraverse(args,item,path):\n\n thisitem = item\n for step in path:\n status, children = rancherLink(args,thisitem,step[0])\n child = findByName(children,step[1])\n\n if not child:\n log(\"Cannot find %s called %s in %s %s\" % (step[0], step[1], item['type'], item['name']))\n sys.exit(2)\n\n thisitem = child\n\n return thisitem\n\n# get my environment with a bit of error checking\ndef getEnvironment(args):\n status, data = rancherCall(args,rancherUrl(args,'/v1/projects'))\n\n if status != 200:\n sys.exit(2)\n if len(data['data']) != 1:\n log(\"Found %d environments when expecting one\" % len(data['data']))\n sys.exit(2)\n\n status, environment = rancherLink(args,data['data'][0],'self')\n return environment\n\nargs = getArgs()\n\nenvironment = getEnvironment(args)\nservice = rancherTraverse(args,environment,[\n ('environments', args['stack']),\n ('services', args['service'])])\n\nstatus, instances = rancherLink(args,service,'instances')\nif not instances:\n print \"No instances of service %s in stack %s found\" % (args['stack'],args['service'])\n\n# if we are asked for a single instance then make it a random one\nif args['one']:\n random.shuffle(instances['data'])\n\nfor instance in instances['data']:\n if instance['state'] == 'running':\n status, ports = rancherLink(args,instance,'ports')\n if args['port'] == 0 and len(ports['data']) > 1:\n log(\"You must specify the internal port if multiple ports are exposed\")\n else:\n for port in ports['data']:\n if port['state'] == 'active' and port['publicPort'] and (args['port'] == 0 or port['privatePort'] == args['port']):\n status, private_ip = rancherLink(args,port,'privateIpAddress')\n status, public_ip = rancherLink(args,port,'publicIpAddress')\n if args['uri']:\n print \"%s://%s:%d%s\" % (args['proto'],public_ip['address'],port['publicPort'],args['uri'])\n else:\n print \"%s:%d\" % (public_ip['address'],port['publicPort']),\n if args['one']:\n sys.exit(0)\n"
},
{
"alpha_fraction": 0.31707316637039185,
"alphanum_fraction": 0.6416510343551636,
"avg_line_length": 43.33333206176758,
"blob_id": "19392f14571cff5806818d3fcce215ac12ee58ef",
"content_id": "48fab29c8d13aea148cf2929c1ca1e9acd8a288a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 533,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 12,
"path": "/tools/runconsul.sh",
"repo_name": "sarlindo-zz/rancher",
"src_encoding": "UTF-8",
"text": "#sudo docker run -d -p 8400:8400 -p 8500:8500 -p 8600:53/udp -h node1 progrium/consul -server -bootstrap-expect 1\n\nsudo docker run -d --name consul -h $HOSTNAME -v /mnt:/data \\\n -p 192.168.33.10:8300:8300 \\\n -p 192.168.33.10:8301:8301 \\\n -p 192.168.33.10:8301:8301/udp \\\n -p 192.168.33.10:8302:8302 \\\n -p 192.168.33.10:8302:8302/udp \\\n -p 192.168.33.10:8400:8400 \\\n -p 192.168.33.10:8500:8500 \\\n -p 172.17.0.1:153:53/udp \\\n progrium/consul -server -advertise 192.168.33.10 -bootstrap-expect 1 -ui-dir /ui \n"
},
{
"alpha_fraction": 0.6601941585540771,
"alphanum_fraction": 0.6957928538322449,
"avg_line_length": 29.899999618530273,
"blob_id": "b20abc69103f75bce29d6fc08a537b1cfa728465",
"content_id": "10ddf9655d9c1a76a42c3625cf256dd5f73d73ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "YAML",
"length_bytes": 309,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 10,
"path": "/ghost/docker-compose.yml",
"repo_name": "sarlindo-zz/rancher",
"src_encoding": "UTF-8",
"text": "ghost:\n image: ghost:0.7.1\n ports:\n - 2368:2368\n #volume_driver: convoy\n volumes:\n - /data/nfs:/var/lib/ghost\n labels:\n io.rancher.scheduler.affinity:container_label_ne: io.rancher.stack_service.name=$${stack_name}}/$${service_name}}\n io.rancher.scheduler.affinity:host_label: type=appserver\n"
},
{
"alpha_fraction": 0.669767439365387,
"alphanum_fraction": 0.734883725643158,
"avg_line_length": 34.83333206176758,
"blob_id": "be1ec381b16195b18c8d1367230640f01d03fec1",
"content_id": "b06cdda48badb39274efd23c2d53fb62641deebe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 215,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 6,
"path": "/tools/setenv.sh",
"repo_name": "sarlindo-zz/rancher",
"src_encoding": "UTF-8",
"text": "# Set the url that Rancher is on\nexport RANCHER_URL=http://192.168.8.100:8383/\n# Set the access key, i.e. username\nexport RANCHER_ACCESS_KEY=1A8AB41D362CD61B8E8F\n# Set the secret key, i.e. password\nexport RANCHER_SECRET_KEY=TPdkbtDShxeBSXPfTtHkCUFyCKUeojmgFAzmfQPm\n"
},
{
"alpha_fraction": 0.5405565500259399,
"alphanum_fraction": 0.5820012092590332,
"avg_line_length": 32.117645263671875,
"blob_id": "3bc3c927d3ab0ac05d2a59221fed1049222e2c4c",
"content_id": "99aca3b52f75740caae31b8212fde4f8f90bbf82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": true,
"language": "Ruby",
"length_bytes": 1689,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 51,
"path": "/Vagrantfile",
"repo_name": "sarlindo-zz/rancher",
"src_encoding": "UTF-8",
"text": "ANSIBLE_GROUPS = {\n \"masternodes\" => [\"node1\"],\n \"slavenodes\" => [\"node2\", \"node3\"],\n \"all_groups:children\" => [\"masternodes\", \"slavenodes\"]\n }\n\nVagrant.configure(2) do |config|\n\n config.vm.box = \"ubuntu/trusty64\"\n config.vm.synced_folder '.', '/vagrant', nfs: true\n\n config.vm.define \"node1\" do |node1|\n node1.vm.network \"private_network\", ip: \"192.168.8.100\"\n node1.vm.hostname = \"rancher-server\"\n node1.vm.provider \"virtualbox\" do |v|\n v.customize [\"modifyvm\", :id, \"--memory\", \"924\"]\n v.customize [\"modifyvm\", :id, \"--cpus\", \"1\"]\n end\n node1.vm.provision \"ansible\" do |ansible|\n ansible.playbook = \"playbook.yml\"\n ansible.groups = ANSIBLE_GROUPS\n end\n end\n\n config.vm.define \"node2\" do |node2|\n node2.vm.network \"private_network\", ip: \"192.168.8.101\"\n node2.vm.hostname = \"rancher-slave1\"\n node2.vm.provider \"virtualbox\" do |v|\n v.customize [\"modifyvm\", :id, \"--memory\", \"1024\"]\n v.customize [\"modifyvm\", :id, \"--cpus\", \"2\"]\n end\n node2.vm.provision \"ansible\" do |ansible|\n ansible.playbook = \"playbook.yml\"\n ansible.groups = ANSIBLE_GROUPS\n end\n end\n\n config.vm.define \"node3\" do |node3|\n node3.vm.network \"private_network\", ip: \"192.168.8.102\"\n node3.vm.hostname = \"rancher-slave2\"\n node3.vm.provider \"virtualbox\" do |v|\n v.customize [\"modifyvm\", :id, \"--memory\", \"1024\"]\n v.customize [\"modifyvm\", :id, \"--cpus\", \"2\"]\n end\n node3.vm.provision \"ansible\" do |ansible|\n ansible.playbook = \"playbook.yml\"\n ansible.groups = ANSIBLE_GROUPS\n end\n end\n\nend\n"
},
{
"alpha_fraction": 0.6521739363670349,
"alphanum_fraction": 0.760869562625885,
"avg_line_length": 37.33333206176758,
"blob_id": "5f83e73fbd86e173ec70e4db3f49b5a131b38641",
"content_id": "75b4be9df9027ce06ce7d37ff3b64ea4d3812a75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 230,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 6,
"path": "/glusterfs/setenv.sh",
"repo_name": "sarlindo-zz/rancher",
"src_encoding": "UTF-8",
"text": "# Set the url that Rancher is on\nexport RANCHER_URL=http://192.168.8.100:8383/\n# Set the access key, i.e. username\nexport RANCHER_ACCESS_KEY=022EA4C0CE88DA5E0F47\n# Set the secret key, i.e. password\nexport RANCHER_SECRET_KEY=Tm4ZApj3HKYhVhZELiNVnQVMDFfa8p5XDmet7TD4\n"
},
{
"alpha_fraction": 0.6527777910232544,
"alphanum_fraction": 0.75,
"avg_line_length": 35,
"blob_id": "a50a0f57116a5d3b8c3548dcc51990825c143071",
"content_id": "191e4210ecbf83c0e8c236be3852b9ac5e595684",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 144,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 4,
"path": "/ghost/setenv.sh",
"repo_name": "sarlindo-zz/rancher",
"src_encoding": "UTF-8",
"text": "# Set the url that Rancher is on\nexport RANCHER_URL=http://192.168.8.100:8383/\nexport RANCHER_ACCESS_KEY=B299EECCC76F2CC30BD3 \nexport RANCHER_SECRET_KEY=j9K3t5jFkoXkqqFwHWM97bn8Fa5WSBdciuWTdu12\n"
}
] | 8 |
doraycweng/SI507_HW13-flask | https://github.com/doraycweng/SI507_HW13-flask | 183bce3588175c40c36ca4a6a5dcf13456cc9d8b | d52577570f7fbb1e10601570f7a7f7d335321bb2 | 8b1938e1a1c1aef75441d71ed132a39d5de7305e | refs/heads/master | 2022-12-09T23:56:51.433575 | 2018-04-27T13:35:04 | 2018-04-27T13:35:04 | 131,300,815 | 0 | 0 | null | 2018-04-27T13:34:35 | 2018-04-27T13:35:24 | 2022-12-08T01:00:40 | Python | [
{
"alpha_fraction": 0.6368600726127625,
"alphanum_fraction": 0.6464163661003113,
"avg_line_length": 32.29545593261719,
"blob_id": "fd2aba7538e1f10f40061d837bde057750a2908f",
"content_id": "1ccb7c645e0b9a4c69f1c67e5e859dc9f973ee79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1465,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 44,
"path": "/top_headlines.py",
"repo_name": "doraycweng/SI507_HW13-flask",
"src_encoding": "UTF-8",
"text": "\nfrom flask import Flask, render_template, url_for\nfrom secrets_example import *\nimport requests\nimport json\nimport datetime\n\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return render_template('welcome.html')\n\[email protected]('/user/<user>')\[email protected]('/user/<user>/<topic>')\ndef show_user(user,topic=None):\n if not topic:\n topic='technology'\n base_url = \"https://api.nytimes.com/svc/topstories/v2/\"+topic+\".json\"\n params={'api-key': api_key}\n results = json.loads(requests.get(base_url, params).text)['results']\n return_value=[ {'title':i['title'],'url':i['url']} for i in results][:5]\n return render_template('user.html', name=user, newsdata=return_value, topic=topic)\n\[email protected]('/user/<user>/time')\ndef greeter(user):\n base_url = \"https://api.nytimes.com/svc/topstories/v2/technology.json\"\n params={'api-key': api_key}\n results = json.loads(requests.get(base_url, params).text)['results']\n return_value=[ {'title':i['title'],'url':i['url']} for i in results][:5]\n currentTime = datetime.datetime.now()\n if currentTime.hour < 12: \n greeting = 'Good morning,'\n elif 12 <= currentTime.hour < 16:\n greeting = 'Good afternoon,'\n elif 16 <= currentTime.hour < 20:\n greeting = 'Good evening,'\n else:\n greeting = 'Good night,'\n return render_template('greeter.html', name=user, newsdata=return_value, greeting=greeting)\n\n\nif __name__ == '__main__':\n app.run(debug=True)"
},
{
"alpha_fraction": 0.7237569093704224,
"alphanum_fraction": 0.8176795840263367,
"avg_line_length": 44.25,
"blob_id": "e13c41f2bf50d857c5f569104606f16df4c3351f",
"content_id": "ea8b5bd3a12388b8c7b543e283fd8b364a1c8473",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 181,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 4,
"path": "/README.md",
"repo_name": "doraycweng/SI507_HW13-flask",
"src_encoding": "UTF-8",
"text": "# 507-206-hw13-flask\nInstructions for homework 13 can be found here: https://docs.google.com/document/d/1a19oh_OSYaPwK7JzlM4iSuKvuAGoRjC1lTt_V6iNFSM/edit?usp=sharing\n\nHappy coding!\n"
}
] | 2 |
ctwise/alfredworkflows | https://github.com/ctwise/alfredworkflows | 72804dc4dd77a5bba32ae193ffcf732d7960924b | dd32aa3101301f9f53cfda440a2ee6c86d1c20d4 | 456ed6cfda1518d55d0b969aeb41b3f19b87f997 | refs/heads/master | 2021-01-16T21:02:35.305117 | 2013-05-01T14:46:58 | 2013-05-01T14:46:58 | 9,793,157 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5532599687576294,
"alphanum_fraction": 0.5564797520637512,
"avg_line_length": 29.801652908325195,
"blob_id": "64b15dc80fef8fb5c36257cc0bbbd24be8fd8b9b",
"content_id": "175b0c418bce6f53fa8a043664068cefc6aac8a9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3727,
"license_type": "permissive",
"max_line_length": 98,
"num_lines": 121,
"path": "/net.isometry.alfred.pipe/pipe.py",
"repo_name": "ctwise/alfredworkflows",
"src_encoding": "UTF-8",
"text": "#-*- coding: utf-8 -*-\n# pipe.alfredworkflow, v1.0\n# Robin Breathe, 2013\n\nimport alfred\nimport json\n\nfrom fnmatch import fnmatch\nfrom os import path\nfrom time import strftime\n\n_MAX_RESULTS=9\n_ALIASES_FILE=u'aliases.json'\n_BUILTINS_FILE=u'builtins.json'\n_TIMESTAMP=u'%Y-%m-%d @ %H:%M'\n\ndef fetch_aliases(_path=_ALIASES_FILE):\n file = path.join(alfred.work(volatile=False), _path)\n if not path.isfile(file):\n return {}\n return json.load(open(file, 'r'))\n\ndef write_aliases(_dict, _path=_ALIASES_FILE):\n file = path.join(alfred.work(volatile=False), _path)\n json.dump(_dict, open(file, 'w'), indent=4, separators=(',', ': '))\n\ndef define_alias(_dict, definition):\n if u'=' in definition:\n (alias, pipe) = definition.split(u'=', 1)\n else:\n (alias, pipe) = (definition, u'')\n\n if not alias:\n return alfred.xml([alfred.Item(\n attributes = {'uid': u'pipe:help', 'valid': u'no'},\n title = u\"alias NAME=VALUE\",\n subtitle = u'Terminate VALUE with @@ to save',\n icon = u'icon.png'\n )])\n\n if pipe and pipe.endswith('@@'):\n pipe = pipe[:-2]\n _dict[alias] = pipe\n write_aliases(_dict)\n return alfred.xml([alfred.Item(\n attributes = {'uid': u'pipe:{}'.format(pipe) , 'valid': u'no', 'autocomplete': alias},\n title = u\"alias {}={}\".format(alias, pipe),\n subtitle = u'Alias saved! TAB to continue',\n icon = u'icon.png'\n )])\n \n return alfred.xml([alfred.Item(\n attributes = {'uid': u'pipe:{}'.format(pipe) , 'valid': u'no'},\n title = u\"alias {}={}\".format(alias, pipe or 'VALUE'),\n subtitle = u'Terminate with @@ to save',\n icon = u'icon.png'\n )])\n\ndef exact_alias(_dict, query):\n pipe = _dict[query]\n return alfred.xml([alfred.Item(\n attributes = {'uid': u'pipe:{}'.format(pipe), 'arg': pipe},\n title = pipe,\n subtitle = u'(expanded alias)',\n icon = u'icon.png'\n )])\n\ndef match_aliases(_dict, query):\n results = []\n for (alias, pipe) in _dict.iteritems():\n if (pipe != query) and fnmatch(alias, u'{}*'.format(query)):\n results.append(alfred.Item(\n attributes = {'uid': u'pipe:{}'.format(pipe) , 'arg': pipe, 'autocomplete': pipe},\n subtitle = pipe,\n title = u'{} (alias)'.format(alias),\n icon = u'icon.png'\n ))\n return results\n\ndef fetch_builtins(_path=_BUILTINS_FILE):\n return json.load(open(_path, 'r'))\n\ndef match_builtins(_dict, query):\n results = []\n for (pipe, desc) in _dict.iteritems():\n lower_pipe = pipe.lower()\n lower_query = query.lower()\n lower_desc = desc.lower()\n if (query == '') or (lower_query in lower_pipe) or (lower_query in lower_desc):\n results.append(alfred.Item(\n attributes = {'uid': u'pipe:{}'.format(pipe) , 'arg': pipe, 'autocomplete': pipe},\n subtitle = pipe,\n title = u'{} (builtin)'.format(desc),\n icon = u'icon.png'\n ))\n return results\n\ndef verbatim(query):\n return alfred.Item(\n attributes = {'uid': u'pipe:{}'.format(query), 'arg': query},\n title = query,\n subtitle = None,\n icon = u'icon.png'\n )\n\ndef complete(query, maxresults=_MAX_RESULTS):\n aliases = fetch_aliases()\n builtins = fetch_builtins()\n\n if query.startswith('alias '):\n return define_alias(aliases, query[6:])\n\n results = []\n\n for matches in (\n match_aliases(aliases, query),\n match_builtins(builtins, query)\n ):\n results.extend(matches)\n\n return alfred.xml(results, maxresults=maxresults)\n"
}
] | 1 |
LialinMaxim/Alnicko | https://github.com/LialinMaxim/Alnicko | 17b15ff3ac3794a8f5e6681aae81c29b28f53341 | 65878e390e7f94d45998e771526db03a5f9c25e3 | 2e4e0e238811ef640c2e51e942bdba3e348bb18b | refs/heads/master | 2023-07-30T21:00:04.385245 | 2019-10-18T09:08:42 | 2019-10-18T09:08:42 | 214,381,169 | 0 | 0 | null | 2019-10-11T08:15:14 | 2019-10-18T09:09:05 | 2023-05-22T22:30:52 | Python | [
{
"alpha_fraction": 0.5575647950172424,
"alphanum_fraction": 0.5877034068107605,
"avg_line_length": 28.625,
"blob_id": "85d11d584d799cae2f5c69f406a5d1591e95573d",
"content_id": "529be1b8a3961ba5ae423721cc6028a71b8b6f95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1659,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 56,
"path": "/test_sender.py",
"repo_name": "LialinMaxim/Alnicko",
"src_encoding": "UTF-8",
"text": "import multiprocessing\nimport time\nfrom unittest import TestCase, mock\n\nfrom files_sender import Uploader\n\n\ndef mock_file_sender(path_to_file, ):\n \"\"\"\n Simulator of the loading process\n\n :param path_to_file: string\n :return: (path_to_file, status code, status name)\n \"\"\"\n\n time.sleep(.6)\n if '7' in path_to_file:\n result = path_to_file, 403, 'Forbidden'\n elif '9' in path_to_file:\n result = path_to_file, 404, 'Not Found'\n else:\n result = path_to_file, 200, 'OK'\n print(f'{multiprocessing.current_process().name} -> {result}')\n return result\n\n\nclass TestUploader(TestCase):\n\n def test_file_not_found(self):\n uploader = Uploader([], 2, None)\n result = uploader.file_sender('3.txt')\n expect = ('3.txt', 404, 'File not found')\n self.assertEqual(result, expect)\n\n def test_send_file(self):\n uploader = Uploader([], 2, None)\n with mock.patch('requests.post'):\n file, status_code, reason = uploader.file_sender('requirements.txt')\n self.assertEqual(file, 'requirements.txt')\n\n def test_start(self):\n test_files_list = [f'{i}.txt' for i in range(7)]\n m = multiprocessing.Manager()\n q = m.Queue()\n\n uploader = Uploader(test_files_list, 2, q)\n uploader.start()\n\n expect_errors = {'0.txt': 404, '1.txt': 404, '2.txt': 404, '3.txt': 404,\n '4.txt': 404, '5.txt': 404, '6.txt': 404}\n self.assertEqual(uploader.errors, expect_errors)\n\n q_result = []\n while uploader.is_active():\n q_result.append(q.get())\n self.assertEqual(len(q_result), 7)\n"
},
{
"alpha_fraction": 0.553735077381134,
"alphanum_fraction": 0.557900607585907,
"avg_line_length": 29.2605037689209,
"blob_id": "9aad10c98959f04abaf2615a6fe62a17c80996ec",
"content_id": "ef7294b83510c25623fa89c2f939244dae0c03a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3601,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 119,
"path": "/files_sender.py",
"repo_name": "LialinMaxim/Alnicko",
"src_encoding": "UTF-8",
"text": "\"\"\"\nWrite module to upload some files to the remote server.\nUploading should be done in parallel.\nPython multiprocessing module should be used.\nReal uploading is not part of this task, use some dummy function for emulate upload.\n\nInput data:\n- List of files to upload\n- Maximum number of parallel uploading process\n- Queue for passing progress to the caller\n\nOutput data:\n- Uploading progress\n- Final uploading report (uploaded files and not uploaded)\n\"\"\"\n\nimport multiprocessing\nimport ntpath\nimport os\nimport time\nimport datetime\n\nimport requests\n\n\nclass Uploader:\n server_url = 'http://my_site.com/test'\n\n def __init__(self, files, num_process, queue, worker=None):\n self.files_list = files\n self.num_process = num_process\n self.queue = queue\n self.worker = worker or self.file_sender\n self.errors = {}\n self.done = 0\n self._complete = 0\n self._star_time = None\n self._end_time = None\n self._is_terminate = None\n\n def __str__(self):\n return f'< Uploader obj - ' \\\n f'files: {len(self.files_list)}, ' \\\n f'done: {self.done}, ' \\\n f'errors: {len(self.errors)}, ' \\\n f'loading time: {self.get_loading_time()} sec >'\n\n def start(self):\n self._star_time = datetime.datetime.utcnow()\n self._is_terminate = False\n with multiprocessing.Pool(processes=self.num_process) as pool:\n for result in pool.imap_unordered(self.worker, self.files_list):\n f, status_code, status_name = result\n\n if status_code != 200:\n self.errors[f] = status_code\n self.queue.put({\n 'file': f,\n 'error': status_name\n })\n else:\n self.done += 1\n self.queue.put({\n 'file': f,\n 'done': status_name\n })\n self._complete += 1\n\n # interrupted all process\n if self._is_terminate:\n pool.terminate()\n self._end_time = datetime.datetime.utcnow()\n return 'The pool was terminated'\n\n self._end_time = datetime.datetime.utcnow()\n\n def stop(self):\n self._is_terminate = True\n\n def file_sender(self, path_to_file):\n time.sleep(.3)\n if os.path.isfile(path_to_file):\n head_path, file_name = ntpath.split(path_to_file)\n with open(path_to_file, 'rb') as f:\n r = requests.post(self.server_url, files={file_name: f})\n return path_to_file, r.status_code, r.reason\n else:\n return path_to_file, 404, 'File not found'\n\n def is_active(self):\n return self.queue.empty() is False\n\n def get_loading_time(self):\n loading_time = 0\n if self._end_time:\n loading_time = (self._end_time - self._star_time).total_seconds()\n return loading_time\n\n\nif __name__ == '__main__':\n \"\"\" Shows how it works. \"\"\"\n\n from test_sender import mock_file_sender\n\n test_files_list = [f'{i}.txt' for i in range(14)]\n\n m = multiprocessing.Manager()\n q = m.Queue()\n\n uploader = Uploader(test_files_list, 3, q, worker=mock_file_sender)\n uploader.start()\n\n print('\\n---Lading complete---')\n while uploader.is_active():\n print('Result:', q.get())\n\n print('\\n', uploader)\n print('loading errors:', uploader.errors)\n print('loading time:', uploader.get_loading_time(), 'seconds')\n"
}
] | 2 |
mathieuhayek/oeis | https://github.com/mathieuhayek/oeis | a869c11902937d1e24793d5c52274f2e00bab727 | d94e8f9b7cc835fdbbe0b6b2e3276c312a3967c9 | 13961ea2a04eacbee25b12b1dbb26f61952d354a | refs/heads/master | 2020-09-12T14:26:23.203367 | 2019-11-18T14:30:43 | 2019-11-18T14:30:43 | 222,452,768 | 0 | 0 | null | 2019-11-18T13:13:47 | 2019-11-18T12:53:37 | 2019-11-18T12:53:35 | null | [
{
"alpha_fraction": 0.68544602394104,
"alphanum_fraction": 0.7699530720710754,
"avg_line_length": 25.625,
"blob_id": "a4efaa01ec287df8051f3dbf9caa3af5dd822d2b",
"content_id": "044c46a4a0cb6f0cb24221be92efc180626ad14b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 213,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 8,
"path": "/tests/test_A181391.py",
"repo_name": "mathieuhayek/oeis",
"src_encoding": "UTF-8",
"text": "from oeis import A181391\nfrom hypothesis import given\nfrom hypothesis.strategies import integers\n\n\n@given(integers(min_value=1, max_value=20000))\ndef test_sequence_length(x):\n assert len(A181391(limit=x)) == x\n"
}
] | 1 |
jvoisin/pyste | https://github.com/jvoisin/pyste | 504dea5ed1fe36f056d359d6630017edbf36bebf | 335ee1246d543722ecec9eb8894abf52509c598d | 44efc2a64a8224fb99b045d0954e4b2cd5346eaa | refs/heads/master | 2016-09-05T16:19:26.820518 | 2012-11-23T13:14:16 | 2012-11-23T13:14:16 | 6,104,367 | 7 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 14.25,
"blob_id": "3f43f9b431e0cd0a01f785245273af33a101b94f",
"content_id": "187a33636b271b2dcc8e00c506117bfed60ee53a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 60,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 4,
"path": "/README.md",
"repo_name": "jvoisin/pyste",
"src_encoding": "UTF-8",
"text": "pyste\n=====\n\nA simple pastebin powered by flask and pygments"
},
{
"alpha_fraction": 0.5940623879432678,
"alphanum_fraction": 0.6001212000846863,
"avg_line_length": 29.850467681884766,
"blob_id": "08531f47a2bcc021e83275fedba0734b183416ed",
"content_id": "88eb9f56d39834f3c490e180a9695825f6f36659",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3301,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 107,
"path": "/flaskr.py",
"repo_name": "jvoisin/pyste",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n'''\nPyste\n\nA dead-simple pastebin, using Flask and Pygments\n\n:copyright: (c) 2012 by Julien (jvoisin) Voisin.\n:license: GPL.\n'''\n\nimport sqlite3\nimport hashlib\nimport datetime\nimport time\n\nfrom contextlib import closing\nfrom flask import Flask, render_template, request, g, flash, redirect, url_for\n\nfrom pygments import highlight\nfrom pygments.lexers import guess_lexer\nfrom pygments.formatters import HtmlFormatter\n\nDATABASE = '/tmp/pyste.db'\nSECRET_KEY = 'zibzap'\nDEBUG = True\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\ndef connect_db():\n return sqlite3.connect(app.config['DATABASE'],\n detect_types=sqlite3.PARSE_DECLTYPES)\n\ndef init_db():\n with closing(connect_db()) as db:\n with app.open_resource('schema.sql') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\[email protected]_request\ndef before_request():\n g.db = connect_db()\n\[email protected]_request\ndef teardown_request(exception):\n if hasattr(g, 'db'):\n g.db.close()\n\[email protected]('/', methods=['GET', 'POST'])\ndef index():\n ''' Main page : please enter your paste '''\n if request.method == 'POST':\n if not request.form['input']:\n flash('Please type a content to paste')\n return render_template('index.html')\n\n delta = datetime.timedelta(seconds=int(request.form['expiration']))\n expiration = datetime.datetime.now() + delta\n if request.form['expiration'] == '0':\n expiration = datetime.datetime(1, 1, 1)\n\n identifier = hashlib.sha1(request.form['input'] + time.ctime()).hexdigest()[:8]\n paste = highlight(\n request.form['input'],\n guess_lexer(request.form['input']),\n HtmlFormatter(linenos='table')\n )\n\n g.db.execute('INSERT INTO PASTE (id, title, expiration, content) VALUES (?, ?, ?, ?)',\n (\n identifier,\n request.form['title'],\n expiration,\n paste\n )\n )\n g.db.commit()\n return render_template('index.html', identifier=identifier, url=request.url)\n return render_template('index.html')\n\[email protected]('/<identifier>')\ndef show_paste(identifier):\n ''' Show the <id> paste if it exists, index instead '''\n cursor = g.db.execute('SELECT * FROM PASTE WHERE id = ?', [identifier])\n result = [dict((cursor.description[idx][0], value)\n for idx, value in enumerate(row)) for row in cursor.fetchall()]\n paste = result[0] if result else None\n\n try:\n if paste['expiration'] - datetime.datetime.now() < datetime.timedelta(seconds=1):\n g.db.execute('DELETE FROM PASTE WHERE id = ?', [identifier])\n g.db.commit()\n\n if paste['expiration'] == datetime.datetime(1, 1, 1): # burn after reading\n flash('This paste will be burned when you close it')\n paste.pop('id')\n return render_template('paste.html', paste=paste)\n raise TypeError\n except TypeError:\n flash('No paste for id ' + identifier + '.')\n return redirect(url_for('index'))\n\n return render_template('paste.html', paste=paste, url=request.url)\n\nif __name__ == '__main__':\n init_db()\n app.run()\n"
},
{
"alpha_fraction": 0.7696969509124756,
"alphanum_fraction": 0.7696969509124756,
"avg_line_length": 19.75,
"blob_id": "6d595e27b59d77f64e0caa59feb7cd9f27bde5b9",
"content_id": "9840ce92a87d9061c6d9517d7decfd6a5ba1b425",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 165,
"license_type": "no_license",
"max_line_length": 32,
"num_lines": 8,
"path": "/schema.sql",
"repo_name": "jvoisin/pyste",
"src_encoding": "UTF-8",
"text": "drop table if exists PASTE;\ncreate table PASTE (\n\tid string not null primary key,\n\ttitle string,\n\texpiration timestamp,\n\tcontent string not null,\n\tpassword string\n);"
}
] | 3 |
jeannas/jet_tracking | https://github.com/jeannas/jet_tracking | 3f00ae77c01e863eb23fef4fffa54feb9dbeb07c | 5de06febcc56f0f46b9e01ae2b6c1afff8f45583 | d3e1d9fa1c3a2eda534f4044071b70faec37d5e4 | refs/heads/master | 2020-03-28T07:04:07.925046 | 2019-09-14T00:59:32 | 2019-09-14T00:59:32 | 147,878,985 | 0 | 0 | null | 2018-09-07T22:03:11 | 2018-09-07T22:02:13 | 2018-09-07T22:02:12 | null | [
{
"alpha_fraction": 0.6971830725669861,
"alphanum_fraction": 0.7464788556098938,
"avg_line_length": 34.5,
"blob_id": "add27fc6c3667acd345da3a12dbdb3a2218e4a13",
"content_id": "92c9844601128ae0ff9319f8215d3ecdcbaa94d9",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 142,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 4,
"path": "/.flake8",
"repo_name": "jeannas/jet_tracking",
"src_encoding": "UTF-8",
"text": "[flake8]\nexclude = .git,__pycache__,build,dist,versioneer.py,jet_tracking/_version.py,docs/source/conf.py\nignore = W504\nmax-line-length = 100\n"
},
{
"alpha_fraction": 0.5705236196517944,
"alphanum_fraction": 0.6017736196517944,
"avg_line_length": 28.600000381469727,
"blob_id": "43506da01c19150a51892c112d89b4796e7edf3d",
"content_id": "0a6c06af017770c811035f74538d27cc959082b2",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2368,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 80,
"path": "/jet_tracking/tests/test_cam_utils.py",
"repo_name": "jeannas/jet_tracking",
"src_encoding": "UTF-8",
"text": "import pytest\nimport numpy as np\nfrom .. import cam_utils\nfrom . import conftest\n\n\[email protected]()\ndef onaxis_image():\n # TODO: an actual image would be nice...\n return np.random.random((100, 100))\n\n\ndef test_smoke_jet_detect(onaxis_image):\n print(cam_utils.jet_detect(onaxis_image))\n\n\ndef test_smoke_get_jet_z():\n cam_utils.get_jet_z(rho=0.0, theta=0.0, roi_y=1, roi_z=1, pxsize=0.001,\n cam_y=1, cam_z=1, beam_y=1, beam_z=1, cam_pitch=1)\n\n\ndef test_smoke_get_jet_x():\n cam_utils.get_jet_x(rho=0.0, theta=0.0, roi_x=1, roi_y=1, pxsize=0.001,\n cam_x=1, cam_y=1, beam_x=1, beam_y=1, cam_roll=1)\n\n\ndef test_smoke_get_jet_pitch():\n cam_utils.get_jet_pitch(theta=0.0, cam_pitch=1)\n\n\ndef test_smoke_get_jet_roll():\n cam_utils.get_jet_roll(theta=0.0, cam_roll=1)\n\n\ndef test_smoke_get_jet_width(onaxis_image):\n cam_utils.get_jet_width(im=onaxis_image, rho=0.0, theta=1.0)\n\n\ndef test_smoke_get_offaxis_coords():\n cam_utils.get_offaxis_coords(cam_beam_y=0.0, cam_beam_z=0.0,\n cam_pitch=1, pxsize=0.001)\n\n\ndef test_smoke_get_cam_coords():\n cam_utils.get_cam_coords(cam_beam_x=0.0, cam_beam_y=0.0,\n cam_roll=1, pxsize=0.001)\n\n\ndef test_smoke_get_cam_pitch(onaxis_image):\n cam_utils.get_cam_pitch([onaxis_image,\n np.random.random(onaxis_image.shape)])\n\n\ndef test_smoke_get_cam_roll(onaxis_image):\n cam_utils.get_cam_roll([onaxis_image,\n np.random.random(onaxis_image.shape)])\n\n\ndef test_smoke_get_cam_pitch_pxsize(onaxis_image):\n cam_utils.get_cam_pitch_pxsize([onaxis_image,\n np.random.random(onaxis_image.shape)],\n positions=[0, 1])\n\n\ndef test_smoke_get_cam_roll_pxsize(onaxis_image):\n cam_utils.get_cam_roll_pxsize([onaxis_image,\n np.random.random(onaxis_image.shape)],\n positions=[0, 1])\n\n\ndef test_smoke_get_nozzle_shift(onaxis_image):\n cam_utils.get_nozzle_shift(\n onaxis_image, np.random.random(onaxis_image.shape),\n cam_roll=1, pxsize=0.001)\n\n\ndef test_smoke_get_burst_avg(jet_control):\n roi_image = jet_control.camera.ROI_image\n conftest.set_random_image(roi_image)\n cam_utils.get_burst_avg(2, roi_image)\n"
},
{
"alpha_fraction": 0.6540284156799316,
"alphanum_fraction": 0.6581753492355347,
"avg_line_length": 32.97986602783203,
"blob_id": "c16f40ecdf1ccd2a2c4fefe6cd41d060fe246ef6",
"content_id": "3cc3ab105a7f3286c857c95967b1526643267c0a",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5064,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 149,
"path": "/jet_tracking/jettracking.py",
"repo_name": "jeannas/jet_tracking",
"src_encoding": "UTF-8",
"text": "'''\nCalls the GUI for jet tracking. Ultimately only this file should need to be run, and the GUI will\ncontrol when the jet tracking methods e.g. calibrate(), jet_detect(), etc should be run\n'''\n\nfrom qtpy.QtCore import QThread\nfrom pydm import Display\n\nimport jt_utils\nimport jet_control\nfrom time import sleep\n\n\nclass TrackThread(QThread):\n\n def __init__(self):\n # def __init__(self, injector, camera, cspad, stopper, pulse_picker, wave8, params):\n super().__init__()\n\n '''\n self.stopper = stopper\n self.pulse_picker = pulse_picker\n self.wave8 = wave8\n self.cspad = cspad\n self.camera = camera\n self.injector = injector\n self.params = params\n '''\n\n def run(self):\n while not self.isInterruptionRequested():\n \n '''\n # check devices first\n # check if stopper is in\n if (jt_utils.get_stopper(self.stopper) == 1):\n # if stopper is in, stop jet tracking\n print('Stopper in - TRACKING STOPPED')\n self.requestInterruption()\n continue\n\n # check if pulse picker is closed\n if (jt_utils.get_pulse_picker(self.pulse_picker) == 1):\n # if pulse picker is closed, stop jet tracking\n print('Pulse picker closed - TRACKING STOPPED')\n self.requestInterruption()\n continue\n\n # check wave8\n if (jt_utils.get_wave8(self.wave8) < self.params.thresh_w8):\n # if wave8 is below threshold, continue running jet tracking but do not move\n print('Wave8 below threshold - NOT TRACKING')\n continue\n\n # check CSPAD\n # get azimuthal average from CSPAD & Wave8 data\n if (jt_utils.get_cspad(azav, params.radius.get(), gas_det) <\n self.params.intensity.get() * self.params.thresh_lo.get()):\n # if CSPAD is below lower threshold, move jet\n if (not self.params.bypass_camera()):\n # if camera is not bypassed, check if there is a jet and location of jet\n try:\n jet_control.jet_calculate_inline(self.camera, self.params)\n # if jet is more than 10 microns away from x-rays, move jet using camera feedback\n # threshold for this can be changed if needed\n if (self.params.jet_x.get() > 0.01):\n jet_control.jet_move_inline(self.injector, self.camera, self.params)\n continue\n except Exception:\n # if jet is not detected, continue running jet tracking but do not move\n print('Cannot find jet - NOT TRACKING')\n continue\n\n # if camera is bypassed or if jet is less than 10 microns away from x-rays, scan jet across x-rays to find new maximum\n jet_control.scan(self.injector, self.cspad)\n # get azimuthal average from CSPAD & Wave8 data\n intensity = jt_utils.get_cspad(azav, self.params.radius.get(), gas_det)\n self.params.intensity.put(intensity)\n\n # if CSPAD is still below upper threshold, stop jet tracking\n if (jt_utils.get_cspad(azav, self.params.radius.get(), gas_det) <\n self.params.intensity.get() * self.params.thresh_hi.get()):\n print('CSPAD below threshold - TRACKING STOPPED')\n self.requestInterruption()\n '''\n\n\nclass JetTrack(Display):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # TrackThread to run jet tracking in\n self.track_thread = TrackThread()\n # self.track_thread = TrackThread(injector, camera, cspad, stopper, pulse_picker, wave8, params)\n\n # connect GUI buttons to appropriate methods\n self.ui.calibrate_btn.clicked.connect(self.calibrate_clicked)\n self.ui.start_btn.clicked.connect(self.start_clicked)\n self.ui.stop_btn.clicked.connect(self.stop_clicked)\n\n # set initial availability of buttons\n self.ui.calibrate_btn.setEnabled(True)\n self.ui.start_btn.setEnabled(False)\n self.ui.stop_btn.setEnabled(False)\n\n def ui_filename(self):\n '''\n Load ui file for GUI\n '''\n\n return 'jettracking.ui'\n\n def calibrate_clicked(self):\n '''\n Runs calibration method when calibrate button is clicked\n '''\n\n self.ui.logger.write('Calibrating')\n self.ui.calibrate_btn.setEnabled(False)\n #jet_control.calibrate(injector, camera, cspad, params)\n self.ui.logger.write('Calibration complete - can now run jet tracking')\n self.ui.calibrate_btn.setEnabled(True)\n # activate start button\n self.ui.start_btn.setEnabled(True)\n return\n\n def start_clicked(self):\n '''\n Starts new thread to run jet tracking in when start button is clicked\n '''\n\n self.ui.logger.write('Running jet tracking')\n self.ui.start_btn.setEnabled(False)\n self.ui.stop_btn.setEnabled(True)\n self.ui.calibrate_btn.setEnabled(False)\n # start TrackThread\n self.track_thread.start()\n\n def stop_clicked(self):\n '''\n Stops jet tracking when stop button is clicked\n '''\n\n self.track_thread.requestInterruption()\n self.ui.logger.write('Jet tracking stopped')\n self.ui.stop_btn.setEnabled(False)\n self.ui.start_btn.setEnabled(True)\n self.ui.calibrate_btn.setEnabled(True)\n\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 11.5,
"blob_id": "a429ed3f6767e97aa6a70b74d73c5d596b4431ec",
"content_id": "2881c8b6c48061262e46445fda873a27a3203d4b",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "INI",
"length_bytes": 75,
"license_type": "permissive",
"max_line_length": 28,
"num_lines": 6,
"path": "/.coveragerc",
"repo_name": "jeannas/jet_tracking",
"src_encoding": "UTF-8",
"text": "[run]\nsource = jet_tracking\n\n[report]\nomit = \n jet_tracking/_version.py\n"
},
{
"alpha_fraction": 0.6338120102882385,
"alphanum_fraction": 0.6517624258995056,
"avg_line_length": 22.374046325683594,
"blob_id": "23be3289a2537779c09796714b528d5a7f8dd92e",
"content_id": "5c3ddb1b3efeec54f93e3613dfa0e65fc466ab7c",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3064,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 131,
"path": "/jet_tracking/jt_utils.py",
"repo_name": "jeannas/jet_tracking",
"src_encoding": "UTF-8",
"text": "# methods for jet tracking that do not involve the camera\n\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\n\ndef gaussianslope(x, a, mean, std, m, b):\n '''\n Define the function for a Gaussian on a slope (Gaussian + linear)\n\n Parameters\n ----------\n x : float\n x-coordinate\n a : float\n amplitude of Gaussian\n mean : float\n mean of Gaussian\n std : float\n standard deviation of Gaussian\n m : float\n slope of linear baseline\n b : float\n y-intercept of linear baseline\n\n Returns\n -------\n y : float\n the y-coordinate for the given x-coordinate as defined by the\n parameters given for the Gaussian on a slope\n '''\n\n return (a * np.exp(-((x-mean) / 2 / std) ** 2)) + (m * x + b)\n\n\ndef fit_cspad(azav, norm, gas_det):\n '''\n Fit the Azimuthal average of the CSPAD to a Gaussian on a slope\n\n Parameters\n ----------\n azav : ndarray\n Azimuthal average for CSPAD\n norm : ndarray\n number of pixels in each qbin\n gas_det : float\n gas detector \n\n Returns\n -------\n center : int\n radius of the diffraction ring\n intensity : float\n sum of qbins 5 above and below the center, normalized by gas detector\n '''\n\n # determine number of pixels in each qbin, only use qbins where pixels > 150\n # **can change 150 to different value if needed\n start = 0\n end = len(norm)\n begin = end / 2\n for i in range(begin):\n a = begin - i\n b = begin + i\n if (norm[a] < 150) and (a > start):\n start = a\n if (norm[b] < 150) and (b < end):\n end = b\n\n x = np.arange(len(azav))\n\n # estimate mean and standard deviation for Gaussian\n n = len(x)\n mean = sum(x*azav) / sum(azav)\n std = np.sqrt(sum((x-mean)**2)/n)\n\n # estimate slope and y-intercept for linear baseline by taking first & last\n # 50 points and fitting a line between them\n # **can change 50 to different value if needed\n x0 = 50/2\n l = len(azav)\n x1 = l - (50/2)\n y0 = np.mean(azav[0:50])\n y1 = np.mean(azav[l-50:])\n m, b = np.polyfit((x0, x1), (y0, y1), 1)\n\n # fit Gaussian + linear to Azimuthal average; provide initial parameters\n popt, pcov = curve_fit(gaussianslope, x, azav, p0=[max(azav), mean, std, m, b])\n\n # calculate radius of ring and intensity of center 10 qbins\n center = int(round(popt[1]))\n intensity = sum(azav[center-5:center+5]) / gas_det\n\n return center, intensity\n\n\ndef get_cspad(azav, r, gas_det):\n '''\n Get the intensity of the diffraction ring on the CSPAD\n\n Parameters\n ----------\n azav : ndarray\n Azimuthal average calculated from CSPAD\n r : int\n radius of diffraction ring\n gas_det : float\n gas detector\n\n Returns\n -------\n intensity : float\n sum of qbins 5 above and below the center, normalized by gas detector\n '''\n intensity = sum(azav[r-5:r+5]) / gas_det\n return intensity\n\n\n# unfinished methods for checking stopper, pulse picker, and Wave8\n# can make Ophyd devices or load specific PV needed directly into beamline.py\n\ndef get_stopper(stopper):\n return stopper\n\n\ndef get_pulse_picker(pulse_picker):\n return pulse_picker\n\n\ndef get_wave8(wave8):\n return wave8\n\n\n"
},
{
"alpha_fraction": 0.6217798590660095,
"alphanum_fraction": 0.6299765706062317,
"avg_line_length": 26.54838752746582,
"blob_id": "be505a2afa28e001022a8e231f75130aef0e62bb",
"content_id": "49100975f89eafef887eb6879d80099e6ef2a023",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 854,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 31,
"path": "/jet_tracking/tests/test_sim.py",
"repo_name": "jeannas/jet_tracking",
"src_encoding": "UTF-8",
"text": "import os.path\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom jet_tracking.sim import generate_simulation\n\n\[email protected](scope='session')\ndef simulated_data():\n return pd.read_csv(os.path.join(os.path.dirname(__file__), 'sim.csv'))\n\n\ndef test_generate_simulation(simulated_data):\n ns = generate_simulation('x', 'y', simulated_data,\n motor_precision=0,\n random_state=np.random.RandomState(0))\n assert ns.motor.precision == 0\n # Set our motor\n ns.motor.set(4)\n\n # Grab ten readings\n values = list()\n for i in range(10):\n ns.signal.trigger()\n values.append(ns.signal.get())\n\n possible_values = ns.data[ns.data['x'] == 4]['y'].unique()\n assert len(set(values)) == len(possible_values)\n assert all(val in possible_values for val in values)\n"
},
{
"alpha_fraction": 0.6110210418701172,
"alphanum_fraction": 0.6208702325820923,
"avg_line_length": 31.21285057067871,
"blob_id": "f58ae48e51edd80624c1644768b369d08ceee0ff",
"content_id": "98c95ba82d5fd39798b10df7404fc0c80a956b3a",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8021,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 249,
"path": "/jet_tracking/tests/conftest.py",
"repo_name": "jeannas/jet_tracking",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pytest\nimport types\nimport inspect\nfrom ..devices import (Injector, Selector, CoolerShaker, HPLC,\n PressureController, FlowIntegrator, Offaxis, Questar,\n Parameters, OffaxisParams, Control, Diffract,\n SDS)\nfrom ophyd.areadetector.plugins import PluginBase\n\nall_devices = (Injector, Selector, CoolerShaker, HPLC, PressureController,\n FlowIntegrator, Offaxis, Questar, Parameters, OffaxisParams,\n Control, Diffract, SDS)\n\n\[email protected](scope='function')\ndef devices(monkeypatch):\n '''A namespace containing faked versions of all devices\n\n Separately, this monkeypatches jet_tracking.devices so that all access\n to those devices returns the faked versions.\n '''\n from .. import devices as _devices\n from ophyd.areadetector import EpicsSignalWithRBV\n import ophyd.sim\n\n ns = types.SimpleNamespace()\n ophyd.sim.fake_device_cache[EpicsSignalWithRBV] = ophyd.sim.FakeEpicsSignal\n\n for cls in all_devices:\n name = cls.__name__\n if cls is not SDS:\n cls = ophyd.sim.make_fake_device(cls)\n\n setattr(ns, name, cls)\n monkeypatch.setattr(_devices, name, cls)\n\n # Short-circuit all plugin type checks, array data\n\n for dev in (ns.Questar, ns.Offaxis):\n components = [\n cpt.cls\n for name, cpt in dev._sig_attrs.items()\n if hasattr(cpt, 'cls') and issubclass(cpt.cls, PluginBase)\n ]\n\n for component_cls in components:\n if hasattr(component_cls, '_plugin_type'):\n monkeypatch.setattr(component_cls, '_plugin_type', None)\n return ns\n\n\[email protected](scope='function')\ndef injector(devices):\n injector = _instantiate_fake_device(\n devices.Injector,\n name='fake_PI1_injector',\n coarseX='fake_CXI:PI1:MMS:01',\n coarseY='fake_CXI:PI1:MMS:02',\n coarseZ='fake_CXI:PI1:MMS:03',\n fineX='fake_CXI:USR:MMS:01',\n fineY='fake_CXI:USR:MMS:02',\n fineZ='fake_CXI:USR:MMS:03'\n )\n\n for i, attr in enumerate(['coarseX', 'coarseY', 'coarseZ',\n 'fineX', 'fineY', 'fineZ']):\n motor = getattr(injector, attr)\n motor.user_readback.sim_put(0.1 * i)\n motor.user_setpoint.sim_put(0.0)\n motor.motor_spg.sim_put('Go')\n _patch_user_setpoint(motor)\n return injector\n\n\ndef _patch_array_data(plugin_inst):\n def get_array_data(*args, count=None, **kwargs):\n # eat the count argument, unsupported by fakeepicssignal.get()\n return orig_get(*args, **kwargs)\n\n array_data = plugin_inst.array_data\n orig_get = array_data.get\n array_data.get = get_array_data\n\n\ndef _patch_user_setpoint(motor):\n def putter(pos, *args, **kwargs):\n motor.user_setpoint.sim_put(pos, *args, **kwargs)\n motor.user_readback.sim_put(pos)\n motor._done_moving(success=True)\n\n motor.user_setpoint.sim_set_putter(putter)\n\n\[email protected](scope='function')\ndef questar(devices):\n questar = _instantiate_fake_device(\n devices.Questar,\n prefix='fake_CXI:SC1:INLINE',\n name='fake_SC1_questar',\n ROI_port='ROI1',\n ROI_stats_port='Stats1',\n ROI_image_port='IMAGE1',\n )\n\n _patch_array_data(questar.image)\n _patch_array_data(questar.ROI_image)\n return questar\n\n\[email protected](scope='function')\ndef offaxis_camera(devices):\n offaxis = _instantiate_fake_device(\n devices.Offaxis,\n prefix='fake_CXI:SC1:OFFAXIS',\n name='fake_SC1_offaxis',\n ROI_port='ROI1',\n ROI_stats_port='Stats1',\n ROI_image_port='IMAGE1',\n )\n\n _patch_array_data(offaxis.image)\n _patch_array_data(offaxis.ROI_image)\n return offaxis\n\n\[email protected](scope='function')\ndef offaxis_parameters(devices):\n params = _instantiate_fake_device(\n devices.OffaxisParams,\n prefix='fake_CXI:SC1:INLINE',\n name='fake_SC1_params'\n )\n params.beam_y.put(1.0)\n params.beam_z.put(1.0)\n params.beam_y_px.put(1)\n params.beam_z_px.put(1)\n params.cam_y.put(1.0)\n params.cam_z.put(1.0)\n params.pxsize.put(0.001)\n params.cam_pitch.put(1.0)\n return params\n\n\[email protected](scope='function')\ndef parameters(devices):\n params = _instantiate_fake_device(\n devices.Parameters,\n prefix='fake_CXI:SC1:INLINE',\n name='fake_SC1_params'\n )\n params.beam_x.put(1.0)\n params.beam_y.put(1.0)\n params.beam_x_px.put(1)\n params.beam_y_px.put(1)\n params.cam_x.put(1.0)\n params.cam_y.put(1.0)\n params.pxsize.put(0.001)\n params.cam_roll.put(1.0)\n return params\n\n\[email protected](scope='function')\ndef diffract(devices):\n return _instantiate_fake_device(devices.Diffract,\n prefix='fake_CXI:SC1:DIFFRACT',\n name='fake_SC1_diffract')\n\n\[email protected](scope='function')\ndef jet_control(injector, questar, parameters, diffract):\n from ..jet_control import JetControl\n return JetControl(name='test_control',\n injector=injector,\n camera=questar,\n params=parameters,\n diffract=diffract)\n\n\ndef _instantiate_fake_device(dev_cls, name=None, prefix='_prefix',\n **specified_kw):\n '''Instantiate a FakeDevice, optionally specifying some initializer kwargs\n\n If unspecified, all initializer keyword arguments will default to\n the string f\"_{argument_name}_\".\n\n All signals on the device (and its subdevices) are initialized to either 0\n or ''.\n '''\n sig = inspect.signature(dev_cls)\n ignore_kw = {'kind', 'read_attrs', 'configuration_attrs', 'parent',\n 'args', 'name', 'prefix'}\n kwargs = {name: specified_kw.get(name, f'_{param.name}_')\n for name, param in sig.parameters.items()\n if param.kind != param.VAR_KEYWORD and\n name not in ignore_kw\n }\n kwargs['name'] = (name if name is not None else dev_cls.__name__)\n kwargs['prefix'] = prefix\n dev = dev_cls(**kwargs)\n\n devs = [dev]\n while devs:\n sub_dev = devs.pop(0)\n devs.extend([getattr(sub_dev, name)\n for name in sub_dev._sub_devices])\n for name, cpt in sub_dev._sig_attrs.items():\n sig = getattr(sub_dev, name)\n try:\n if cpt.kwargs.get('string', False):\n sig.sim_put('')\n else:\n sig.sim_put(0)\n except Exception:\n ...\n\n return dev\n\n\[email protected](scope='function')\ndef device_instances(injector, questar, offaxis_camera, parameters,\n offaxis_parameters, diffract, devices):\n ns = types.SimpleNamespace()\n ns.Control = _instantiate_fake_device(devices.Control)\n ns.CoolerShaker = _instantiate_fake_device(devices.CoolerShaker)\n ns.Diffract = _instantiate_fake_device(devices.Diffract)\n ns.Diffract = diffract\n ns.FlowIntegrator = _instantiate_fake_device(devices.FlowIntegrator)\n ns.HPLC = _instantiate_fake_device(devices.HPLC)\n ns.Injector = injector\n ns.Offaxis = offaxis_camera\n ns.OffaxisParams = offaxis_parameters\n ns.Parameters = parameters\n ns.PressureController = _instantiate_fake_device(devices.PressureController)\n ns.Questar = questar\n ns.Selector = _instantiate_fake_device(devices.Selector)\n ns.SDS = SDS({})\n ns.SDS.SDS_devices.extend([ns.Selector, ns.CoolerShaker, ns.HPLC,\n ns.PressureController, ns.FlowIntegrator])\n return ns\n\n\ndef set_random_image(plugin, dimx=100, dimy=100):\n 'Set up a random image of dimensions (dimx, dimy) on the given image plugin'\n plugin.array_data.put(np.random.random((dimx, dimy)))\n plugin.array_size.width.sim_put(dimx)\n plugin.array_size.height.sim_put(dimy)\n plugin.array_size.depth.sim_put(0)\n plugin.ndimensions.sim_put(2)\n"
},
{
"alpha_fraction": 0.563896656036377,
"alphanum_fraction": 0.5766341090202332,
"avg_line_length": 33.15308380126953,
"blob_id": "44d351a7b45d8ad88b85978953b51dc838003fbd",
"content_id": "c7a7ac4c82f9aac79d062370506d9f8b790df7f8",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31011,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 908,
"path": "/jet_tracking/devices.py",
"repo_name": "jeannas/jet_tracking",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport epics\n\nfrom ophyd.device import Device, FormattedComponent as FCpt, Component as Cpt\nfrom ophyd.signal import EpicsSignal\nfrom ophyd.areadetector.plugins import ROIPlugin, StatsPlugin, ImagePlugin\n\nfrom pcdsdevices.areadetector.detectors import PCDSDetector\nfrom pcdsdevices.epics_motor import IMS\n\n\nclass _TableMixin:\n _table_attrs = ('value', 'units', 'desc')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._descriptions = None\n\n def _update_descriptions(self):\n adesc = {}\n for name, signal in self._signals.items():\n pvname = getattr(signal, 'pvname', None)\n adesc[name] = (epics.caget(pvname + '.DESC')\n if pvname else '')\n self._descriptions = adesc\n\n @property\n def table(self):\n \"\"\"\n Return table of Device settings\n \"\"\"\n if self._descriptions is None:\n self._update_descriptions()\n\n atable = {}\n for name, signal in sorted(self._signals.items()):\n try:\n value = signal.read()[signal.name]['value']\n except Exception:\n value = None\n\n try:\n units = signal.describe()[signal.name].get('units', '')\n except Exception:\n units = None\n\n atable[name] = {\n 'value': value,\n 'units': units,\n 'desc': self._descriptions.get(name),\n }\n\n return pd.DataFrame(atable).T.loc[:, self._table_attrs]\n\n\nclass Injector(Device, _TableMixin):\n '''An Injector which consists of 3 coarse control motors and 3 fine control motors\n\n Parameters\n ----------\n pvs : str dict\n A dictionary containing the name of the device and\n the PVs of all the injector components\n\n Attributes\n ----------\n coarseX : EpicsSignal\n The coarse control motor in the X direction\n coarseY : EpicsSignal\n The coarse control motor in the Y direction\n coarseZ : EpicsSignal\n The coarse control motor in the Z direction\n fineX : EpicsSignal\n The fine control motor in the X direction\n fineY : EpicsSignal\n The fine control motor in the Y direction\n fineZ : EpicsSignal\n The fine control motor in the Z direction\n '''\n coarseX = FCpt(IMS, '{self._coarseX}')\n coarseY = FCpt(IMS, '{self._coarseY}')\n coarseZ = FCpt(IMS, '{self._coarseZ}')\n\n fineX = FCpt(IMS, '{self._fineX}')\n fineY = FCpt(IMS, '{self._fineY}')\n fineZ = FCpt(IMS, '{self._fineZ}')\n\n def __init__(self, name,\n coarseX, coarseY, coarseZ,\n fineX, fineY, fineZ, **kwargs):\n\n self._coarseX = coarseX\n self._coarseY = coarseY\n self._coarseZ = coarseZ\n\n self._fineX = fineX\n self._fineY = fineY\n self._fineZ = fineZ\n\n super().__init__(name=name, **kwargs)\n\n\nclass Selector(Device, _TableMixin):\n '''A Selector for the sample delivery system\n\n Parameters\n ----------\n pvs : str dict\n A dictionary containing the name of the device and\n the PVs of all the selector components\n\n Attributes\n ----------\n remote_control : EpicsSignal\n Remote control enabled\n status : EpicsSignal\n Connection status for selector\n flow : EpicsSignal\n Flow\n flowstate : EpicsSignal\n State of the flow\n flowtype : EpicsSignal\n Type of the flow\n FM_rb : EpicsSignal\n\n FM_reset : EpicsSignal\n\n FM : EpicsSignal\n\n names_button : EpicsSignal\n\n couple_button : EpicsSignal\n\n names1 : EpicsSignal\n\n names2 : EpicsSignal\n\n shaker1 : EpicsSignal\n Shaker 1\n shaker2 : EpicsSignal\n Shaker 2\n shaker3 : EpicsSignal\n Shaker 3\n shaker4 : EpicsSignal\n Shaker 4\n '''\n\n # also appears on pressure controller screen?\n remote_control = FCpt(EpicsSignal, '{self._remote_control}')\n status = FCpt(EpicsSignal, '{self._status}')\n\n flow = FCpt(EpicsSignal, '{self._flow}')\n flowstate = FCpt(EpicsSignal, '{self._flowstate}')\n flowtype = FCpt(EpicsSignal, '{self._flowtype}')\n\n FM_rb = FCpt(EpicsSignal, '{self._FM_rb}')\n FM_reset = FCpt(EpicsSignal, '{self._FM_reset}')\n FM = FCpt(EpicsSignal, '{self._FM}')\n\n names_button = FCpt(EpicsSignal, '{self._names_button}')\n couple_button = FCpt(EpicsSignal, '{self._couple_button}')\n names1 = FCpt(EpicsSignal, '{self._names1}')\n names2 = FCpt(EpicsSignal, '{self._names2}')\n\n shaker1 = FCpt(EpicsSignal, '{self._shaker1}')\n shaker2 = FCpt(EpicsSignal, '{self._shaker2}')\n shaker3 = FCpt(EpicsSignal, '{self._shaker3}')\n shaker4 = FCpt(EpicsSignal, '{self._shaker4}')\n\n def __init__(self, name,\n remote_control, status,\n flow, flowstate, flowtype,\n FM_rb, FM_reset, FM,\n names_button, couple_button, names1, names2,\n shaker1, shaker2, shaker3, shaker4, **kwargs):\n\n self._status = status\n self._remote_control = remote_control\n\n self._flow = flow\n self._flowstate = flowstate\n self._flowtype = flowtype\n\n self._FM_rb = FM_rb\n self._FM_reset = FM_reset\n self._FM = FM\n\n self._names_button = names_button\n self._couple_button = couple_button\n self._names1 = names1\n self._names2 = names2\n\n self._shaker1 = shaker1\n self._shaker2 = shaker2\n self._shaker3 = shaker3\n self._shaker4 = shaker4\n\n super().__init__(name=name, **kwargs)\n\n\nclass CoolerShaker(Device, _TableMixin):\n '''A Cooler/Shaker for the sample delivery system\n\n Parameters\n ----------\n pvs : str dict\n A dictionary containing the PVs of all the cooler/shaker components\n name : str\n The device name\n\n Attributes\n ----------\n temperature1 : EpicsSignal\n Temperature of 1\n SP1 : EpicsSignal\n Set point of 1\n set_SP1 : EpicsSignal\n Set the set point for 1\n current1 : EpicsSignal\n Current for 1\n temperature2 : EpicsSignal\n Temperature of 2\n SP2 : EpicsSignal\n Set point of 2\n set_SP2 : EpicsSignal\n Set the set point of 2\n current2 : EpicsSignal\n Current of 2\n reboot : EpicsSignal\n Reboot the cooler/shaker\n '''\n\n temperature1 = FCpt(EpicsSignal, '{self._temperature1}')\n SP1 = FCpt(EpicsSignal, '{self._SP1}')\n set_SP1 = FCpt(EpicsSignal, '{self._set_SP1}')\n current1 = FCpt(EpicsSignal, '{self._current1}')\n\n temperature2 = FCpt(EpicsSignal, '{self._temperature2}')\n SP2 = FCpt(EpicsSignal, '{self._SP2}')\n set_SP2 = FCpt(EpicsSignal, '{self._set_SP2}')\n current2 = FCpt(EpicsSignal, '{self._current2}')\n\n reboot = FCpt(EpicsSignal, '{self._reboot}')\n\n def __init__(self, name,\n temperature1, SP1, set_SP1, current1,\n temperature2, SP2, set_SP2, current2,\n reboot, **kwargs):\n\n self._temperature1 = temperature1\n self._SP1 = SP1\n self._set_SP1 = set_SP1\n self._current1 = current1\n\n self._temperature2 = temperature2\n self._SP2 = SP2\n self._set_SP2 = set_SP2\n self._current2 = current2\n\n self._reboot = reboot\n\n super().__init__(name=name, **kwargs)\n\n\nclass HPLC(Device, _TableMixin):\n '''An HPLC for the sample delivery system\n\n Parameters\n ----------\n pvs : str dict\n A dictionary containing the PVs of all the HPLC components\n name : str\n The device name\n\n Attributes\n ----------\n status : EpicsSignal\n Status of the HPLC\n run : EpicsSignal\n Run the HPLC\n flowrate : EpicsSignal\n Flow rate of the HPLC\n set_flowrate : EpicsSignal\n Set the flow rate of the HPLC\n flowrate_SP : EpicsSignal\n Set point for the flow rate\n pressure : EpicsSignal\n Pressure in the HPLC\n pressure_units : EpicsSignal\n Units for the pressure\n set_max_pressure : EpicsSignal\n Set the maximum pressure\n max_pressure : EpicsSignal\n Maximum pressure\n clear_error : EpicsSignal\n Clear errors\n '''\n\n status = FCpt(EpicsSignal, '{self._status}')\n run = FCpt(EpicsSignal, '{self._run}')\n\n flowrate = FCpt(EpicsSignal, '{self._flowrate}')\n set_flowrate = FCpt(EpicsSignal, '{self._set_flowrate}')\n flowrate_SP = FCpt(EpicsSignal, '{self._flowrate_SP}')\n\n pressure = FCpt(EpicsSignal, '{self._pressure}')\n pressure_units = FCpt(EpicsSignal, '{self._pressure_units}')\n set_max_pressure = FCpt(EpicsSignal, '{self._set_max_pressure}')\n max_pressure = FCpt(EpicsSignal, '{self._max_pressure}')\n\n clear_error = FCpt(EpicsSignal, '{self._clear_error}')\n\n def __init__(self, name,\n status, run,\n flowrate, set_flowrate, flowrate_SP,\n pressure, pressure_units, set_max_pressure, max_pressure,\n clear_error, **kwargs):\n\n self._status = status\n self._run = run\n\n self._flowrate = flowrate\n self._set_flowrate = set_flowrate\n self._flowrate_SP = flowrate_SP\n\n self._pressure = pressure\n self._pressure_units = pressure_units\n self._set_max_pressure = set_max_pressure\n self._max_pressure = max_pressure\n\n self._clear_error = clear_error\n\n super().__init__(name=name, **kwargs)\n\n\nclass PressureController(Device, _TableMixin):\n '''An Pressure Controller for the sample delivery system\n\n Parameters\n ----------\n pvs : str dict\n A dictionary containing the PVs of all the pressure controller components\n name : str\n The device name\n\n Attributes\n ----------\n status : EpicsSignal\n Connection status of pressure controller\n pressure1 : EpicsSignal\n Pressure of 1\n enabled1 : EpicsSignal\n Is 1 enabled\n limit1 : EpicsSignal\n High pressure limit of 1\n SP1 : EpicsSignal\n Pressure set point of 1\n pressure2 : EpicsSignal\n Pressure of 2\n enabled2 : EpicsSignal\n Is 2 enabled\n limit2 : EpicsSignal\n High pressure limit of 2\n SP2 : EpicsSignal\n Pressure set point of 2\n '''\n\n status = FCpt(EpicsSignal, '{self._status}')\n\n pressure1 = FCpt(EpicsSignal, '{self._pressure1}')\n enabled1 = FCpt(EpicsSignal, '{self._enabled1}')\n limit1 = FCpt(EpicsSignal, '{self._limit1}')\n SP1 = FCpt(EpicsSignal, '{self._SP1}')\n\n pressure2 = FCpt(EpicsSignal, '{self._pressure2}')\n enabled2 = FCpt(EpicsSignal, '{self._enabled2}')\n limit2 = FCpt(EpicsSignal, '{self._limit2}')\n SP2 = FCpt(EpicsSignal, '{self._SP2}')\n\n def __init__(self, name,\n status,\n pressure1, enabled1, limit1, SP1,\n pressure2, enabled2, limit2, SP2, **kwargs):\n\n self._status = status\n\n self._pressure1 = pressure1\n self._enabled1 = enabled1\n self._limit1 = limit1\n self._SP1 = SP1\n\n self._pressure2 = pressure2\n self._enabled2 = enabled2\n self._limit2 = limit2\n self._SP2 = SP2\n\n super().__init__(name=name, **kwargs)\n\n\nclass FlowIntegrator(Device, _TableMixin):\n '''An FlowIntegrator for the sample delivery system\n\n Parameters\n ----------\n pvs : str dict\n A dictionary containing the PVs of all the flow integrator components\n name : str\n The device name\n\n Attributes\n ----------\n integrator_source: EpicsSignal\n\n flow_source : EpicsSignal\n\n names : EpicsSignal\n Names of\n start1 : EpicsSignal\n Starting volume of 1\n used1 : EpicsSignal\n Flow of 1\n time1 : EpicsSignal\n Estimated depletion time of 1\n start2 : EpicsSignal\n Starting volume of 2\n used2 : EpicsSignal\n Flow of 2\n time2 : EpicsSignal\n Estimated depletion time of 2\n start3 : EpicsSignal\n Starting volume of 3\n used3 : EpicsSignal\n Flow of 3\n time3 : EpicsSignal\n Estimated depletion time of 3\n start4 : EpicsSignal\n Starting volume of 4\n used4 : EpicsSignal\n Flow of 4\n time4 : EpicsSignal\n Estimated depletion time of 4\n start5 : EpicsSignal\n Starting volume of 5\n used5 : EpicsSignal\n Flow of 5\n time5 : EpicsSignal\n Estimated depletion time of 5\n start6 : EpicsSignal\n Starting volume of 6\n used6 : EpicsSignal\n Flow of 6\n time6 : EpicsSignal\n Estimated depletion time of 6\n start7 : EpicsSignal\n Starting volume of 7\n used7 : EpicsSignal\n Flow of 7\n time7 : EpicsSignal\n Estimated depletion time of 7\n start8 : EpicsSignal\n Starting volume of 8\n used8 : EpicsSignal\n Flow of 8\n time8 : EpicsSignal\n Estimated depletion time of 8\n start9 : EpicsSignal\n Starting volume of 9\n used9 : EpicsSignal\n Flow of 9\n time9 : EpicsSignal\n Estimated depletion time of 9\n start10 : EpicsSignal\n Starting volume of 10\n used10 : EpicsSignal\n Flow of 10\n time10 : EpicsSignal\n Estimated depletion time of 10\n '''\n\n integrator_source = FCpt(EpicsSignal, '{self._integrator_source}')\n flow_source = FCpt(EpicsSignal, '{self._flow_source}')\n names = FCpt(EpicsSignal, '{self._names}')\n\n start1 = FCpt(EpicsSignal, '{self._start1}')\n used1 = FCpt(EpicsSignal, '{self._used1}')\n time1 = FCpt(EpicsSignal, '{self._time1}')\n\n start2 = FCpt(EpicsSignal, '{self._start2}')\n used2 = FCpt(EpicsSignal, '{self._used2}')\n time2 = FCpt(EpicsSignal, '{self._time2}')\n\n start3 = FCpt(EpicsSignal, '{self._start3}')\n used3 = FCpt(EpicsSignal, '{self._used3}')\n time3 = FCpt(EpicsSignal, '{self._time3}')\n\n start4 = FCpt(EpicsSignal, '{self._start4}')\n used4 = FCpt(EpicsSignal, '{self._used4}')\n time4 = FCpt(EpicsSignal, '{self._time4}')\n\n start5 = FCpt(EpicsSignal, '{self._start5}')\n used5 = FCpt(EpicsSignal, '{self._used5}')\n time5 = FCpt(EpicsSignal, '{self._time5}')\n\n start6 = FCpt(EpicsSignal, '{self._start6}')\n used6 = FCpt(EpicsSignal, '{self._used6}')\n time6 = FCpt(EpicsSignal, '{self._time6}')\n\n start7 = FCpt(EpicsSignal, '{self._start7}')\n used7 = FCpt(EpicsSignal, '{self._used7}')\n time7 = FCpt(EpicsSignal, '{self._time7}')\n\n start8 = FCpt(EpicsSignal, '{self._start8}')\n used8 = FCpt(EpicsSignal, '{self._used8}')\n time8 = FCpt(EpicsSignal, '{self._time8}')\n\n start9 = FCpt(EpicsSignal, '{self._start9}')\n used9 = FCpt(EpicsSignal, '{self._used9}')\n time9 = FCpt(EpicsSignal, '{self._time9}')\n\n start10 = FCpt(EpicsSignal, '{self._start10}')\n used10 = FCpt(EpicsSignal, '{self._used10}')\n time10 = FCpt(EpicsSignal, '{self._time10}')\n\n def __init__(self, name,\n integrator_source, flow_source, names,\n start1, used1, time1,\n start2, used2, time2,\n start3, used3, time3,\n start4, used4, time4,\n start5, used5, time5,\n start6, used6, time6,\n start7, used7, time7,\n start8, used8, time8,\n start9, used9, time9,\n start10, used10, time10, **kwargs):\n\n self._integrator_source = integrator_source\n self._flow_source = flow_source\n self._names = names\n\n self._start1 = start1\n self._used1 = used1\n self._time1 = time1\n\n self._start2 = start2\n self._used2 = used2\n self._time2 = time2\n\n self._start3 = start3\n self._used3 = used3\n self._time3 = time3\n\n self._start4 = start4\n self._used4 = used4\n self._time4 = time4\n\n self._start5 = start5\n self._used5 = used5\n self._time5 = time5\n\n self._start6 = start6\n self._used6 = used6\n self._time6 = time6\n\n self._start7 = start7\n self._used7 = used7\n self._time7 = time7\n\n self._start8 = start8\n self._used8 = used8\n self._time8 = time8\n\n self._start9 = start9\n self._used9 = used9\n self._time9 = time9\n\n self._start10 = start10\n self._used10 = used10\n self._time10 = time10\n\n super().__init__(name=name, **kwargs)\n\n\nclass SDS:\n '''\n Sample delivery system\n\n Parameters\n ----------\n devices : dict\n A dictionary of dictionaries containing the devices to be made and\n their PV names. The dictionary key is a string, one of the following:\n {'selector', 'cooler_shaker', 'hplc', 'pressure_controller',\n 'flow_integrator'}\n The values of the dictionary, are also dictionaries. These are passed\n to the new device, allowing parameters such as PV names to be\n specified.\n\n Attributes\n ----------\n SDS_devices : list\n List containing all the devices that are in the sample delivery system\n '''\n\n device_types = {\n 'selector': Selector,\n 'cooler_shaker': CoolerShaker,\n 'hplc': HPLC,\n 'pressure_controller': PressureController,\n 'flow_integrator': FlowIntegrator,\n }\n\n def __init__(self, devices):\n self.SDS_devices = [\n self.device_types[dev](**kwargs)\n for dev, kwargs in devices.items()\n if dev in self.device_types\n ]\n\n invalid_devices = [dev for dev in devices\n if dev not in self.device_types]\n for device in invalid_devices:\n print(f'WARNING: {device} is not a valid device type')\n\n\nclass Offaxis(PCDSDetector):\n '''Area detector for Offaxis camera in CXI\n\n Parameters\n ----------\n port_names : str dict\n A dictionary containing the access port names for the plugins\n prefix : str\n Prefix for the PV name of the camera\n name : str\n Name of the camera\n\n Attributes\n ----------\n ROI : ROIPlugin\n ROI on original rate image\n ROI_stats : StatsPlugin\n Stats on ROI of original rate image\n '''\n\n ROI = FCpt(ROIPlugin, '{self.prefix}:{self._ROI_port}:')\n ROI_stats = FCpt(StatsPlugin, '{self.prefix}:{self._ROI_stats_port}:')\n ROI_image = FCpt(ImagePlugin, '{self.prefix}:{self._ROI_image_port}:')\n\n def __init__(self, ROI_port,\n ROI_stats_port,\n ROI_image_port,\n prefix, *args, **kwargs):\n self._ROI_port = ROI_port\n self._ROI_stats_port = ROI_stats_port\n self._ROI_image_port = ROI_image_port\n\n super().__init__(prefix, *args, **kwargs)\n\n self.ROI_stats.nd_array_port.put(ROI_port)\n self.ROI_image.nd_array_port.put(ROI_port)\n self.ROI.enable.put('Enabled')\n self.ROI_stats.enable.put('Enabled')\n self.ROI_image.enable.put('Enabled')\n\n\nclass Questar(PCDSDetector):\n '''\n Area detector for Inline Questar Camera in CXI\n\n Parameters\n ----------\n port_names : str dict\n A dictionary containing the access port names for the plugins\n prefix : str\n Prefix for the PV name of the camera\n name : str\n Name of the camera\n\n Attributes\n ----------\n ROI : ROIPlugin\n ROI on original rate image\n ROI_stats : StatsPlugin\n Stats on ROI of original rate image\n '''\n\n ROI = FCpt(ROIPlugin, '{self.prefix}:{self._ROI_port}:')\n ROI_stats = FCpt(StatsPlugin, '{self.prefix}:{self._ROI_stats_port}:')\n ROI_image = FCpt(ImagePlugin, '{self.prefix}:{self._ROI_image_port}:')\n\n def __init__(self, ROI_port,\n ROI_stats_port,\n ROI_image_port,\n prefix, *args, **kwargs):\n self._ROI_port = ROI_port\n self._ROI_stats_port = ROI_stats_port\n self._ROI_image_port = ROI_image_port\n\n super().__init__(prefix, *args, **kwargs)\n\n self.ROI_stats.nd_array_port.put(ROI_port)\n self.ROI_image.nd_array_port.put(ROI_port)\n self.ROI.enable.put('Enabled')\n self.ROI_stats.enable.put('Enabled')\n self.ROI_image.enable.put('Enabled')\n\n\nclass Parameters(Device, _TableMixin):\n '''\n Contains EPICS PVs used for jet tracking\n '''\n cam_x = Cpt(EpicsSignal, ':CAM_X',\n doc='x-coordinate of camera position in mm')\n cam_y = Cpt(EpicsSignal, ':CAM_Y',\n doc='y-coordinate of camera position in mm')\n pxsize = Cpt(EpicsSignal, ':PXSIZE',\n doc='size of pixel in mm')\n cam_roll = Cpt(EpicsSignal, ':CAM_ROLL',\n doc='rotation of camera about z axis in radians')\n beam_x = Cpt(EpicsSignal, ':BEAM_X',\n doc='x-coordinate of x-ray beam in mm (usually 0)')\n beam_y = Cpt(EpicsSignal, ':BEAM_Y',\n doc='y-coordinate of x-ray beam in mm (usually 0)')\n beam_x_px = Cpt(EpicsSignal, ':BEAM_X_PX',\n doc='x-coordinate of x-ray beam in camera image in pixels')\n beam_y_px = Cpt(EpicsSignal, ':BEAM_Y_PX',\n doc='y-coordinate of x-ray beam in camera image in pixels')\n nozzle_x = Cpt(EpicsSignal, ':NOZZLE_X',\n doc='x-coordinate of nozzle in mm')\n nozzle_y = Cpt(EpicsSignal, ':NOZZLE_Y',\n doc='y-coordinate of nozzle in mm')\n nozzle_xwidth = Cpt(EpicsSignal, ':NOZZLE_XWIDTH',\n doc='width of nozzle in mm')\n jet_x = Cpt(EpicsSignal, ':JET_X',\n doc='distance from sample jet to x-ray beam in mm')\n jet_roll = Cpt(EpicsSignal, ':JET_ROLL',\n doc='rotation of sample jet about z axis in radians')\n state = Cpt(EpicsSignal, ':STATE',\n doc='dictionary of strings')\n jet_counter = Cpt(EpicsSignal, ':JET_Counter',\n doc='Jet counter')\n jet_reprate = Cpt(EpicsSignal, ':JET_RepRate',\n doc='Jet repetition rate')\n nozzle_counter = Cpt(EpicsSignal, ':NOZZLE_Counter',\n doc='Nozzle counter')\n nozzle_reprate = Cpt(EpicsSignal, ':NOZZLE_RepRate',\n doc='Nozzle repetition rate')\n mean = Cpt(EpicsSignal, ':ROI_mean',\n doc='mean of calibration ROI image with jet')\n std = Cpt(EpicsSignal, ':ROI_std',\n doc='standard devation of calibration ROI image with jet')\n radius = Cpt(EpicsSignal, ':RADIUS',\n doc='radius of calibration diffraction ring')\n intensity = Cpt(EpicsSignal, ':INTENSITY',\n doc='intensity of calibration diffraction ring')\n thresh_hi = Cpt(EpicsSignal, ':THRESH_hi',\n doc='upper threshold for CSPAD ring intensity')\n thresh_lo = Cpt(EpicsSignal, ':THRESH_lo',\n doc='lower threshold for CSPAD ring intensity')\n thresh_w8 = Cpt(EpicsSignal, ':THRESH_w8',\n doc='threshold for wave8')\n bypass_cam = Cpt(EpicsSignal, ':BYPASS_cam',\n doc='bypass camera during jet tracking')\n frames_cam = Cpt(EpicsSignal, ':FRAMES_cam',\n doc='number of frames for integration for camera')\n frames_cspad = Cpt(EpicsSignal, ':FRAMES_cspad',\n doc='number of frames for integration for cspad')\n\n\nclass OffaxisParams(Device, _TableMixin):\n '''\n Contains EPICS PVs used with Offaxis camera for jet tracking\n '''\n cam_z = Cpt(EpicsSignal, ':CAM_Z',\n doc='z-coordinate of camera position in mm')\n cam_y = Cpt(EpicsSignal, ':CAM_Y',\n doc='y-coordinate of camera position in mm')\n pxsize = Cpt(EpicsSignal, ':PXSIZE',\n doc='size of pixel in mm')\n cam_pitch = Cpt(EpicsSignal, ':CAM_PITCH',\n doc='rotation of camera about x axis in radians')\n beam_z = Cpt(EpicsSignal, ':BEAM_Z',\n doc='z-coordinate of x-ray beam in mm (usually 0)')\n beam_y = Cpt(EpicsSignal, ':BEAM_Y',\n doc='y-coordinate of x-ray beam in mm (usually 0)')\n beam_z_px = Cpt(EpicsSignal, ':BEAM_Z_PX',\n doc='z-coordinate of x-ray beam in camera image in pixels')\n beam_y_px = Cpt(EpicsSignal, ':BEAM_Y_PX',\n doc='y-coordinate of x-ray beam in camera image in pixels')\n nozzle_z = Cpt(EpicsSignal, ':NOZZLE_Z',\n doc='z-coordinate of nozzle in mm')\n nozzle_y = Cpt(EpicsSignal, ':NOZZLE_Y',\n doc='y-coordinate of nozzle in mm')\n nozzle_zwidth = Cpt(EpicsSignal, ':NOZZLE_ZWIDTH',\n doc='width of nozzle in mm')\n jet_z = Cpt(EpicsSignal, ':JET_Z',\n doc='distance from sample jet to x-ray beam in mm')\n jet_pitch = Cpt(EpicsSignal, ':JET_PITCH',\n doc='rotation of sample jet about z axis in radians')\n state = Cpt(EpicsSignal, ':STATE',\n doc='dictionary of strings')\n jet_counter = Cpt(EpicsSignal, ':JET_Counter',\n doc='Jet counter')\n jet_reprate = Cpt(EpicsSignal, ':JET_RepRate',\n doc='Jet repetition rate')\n nozzle_counter = Cpt(EpicsSignal, ':NOZZLE_Counter',\n doc='Nozzle counter')\n nozzle_reprate = Cpt(EpicsSignal, ':NOZZLE_RepRate',\n doc='Nozzle repetition rate')\n mean = Cpt(EpicsSignal, ':ROI_mean',\n doc='mean of calibration ROI image with jet')\n std = Cpt(EpicsSignal, ':ROI_std',\n doc='standard devation of calibration ROI image with jet')\n radius = Cpt(EpicsSignal, ':RADIUS',\n doc='radius of calibration diffraction ring')\n intensity = Cpt(EpicsSignal, ':INTENSITY',\n doc='intensity of calibration diffraction ring')\n thresh_hi = Cpt(EpicsSignal, ':THRESH_hi',\n doc='upper threshold for CSPAD ring intensity')\n thresh_lo = Cpt(EpicsSignal, ':THRESH_lo',\n doc='lower threshold for CSPAD ring intensity')\n thresh_w8 = Cpt(EpicsSignal, ':THRESH_w8',\n doc='threshold for wave8')\n bypass_cam = Cpt(EpicsSignal, ':BYPASS_cam',\n doc='bypass camera during jet tracking')\n frames_cam = Cpt(EpicsSignal, ':FRAMES_cam',\n doc='number of frames for integration for camera')\n frames_cspad = Cpt(EpicsSignal, ':FRAMES_cspad',\n doc='number of frames for integration for cspad')\n\n\nclass Control(Device, _TableMixin):\n '''\n Contains EPICS PVs used for jet tracking control\n '''\n\n re_state = Cpt(EpicsSignal, ':RE:STATE')\n beam_state = Cpt(EpicsSignal, ':BEAM:STATE')\n injector_state = Cpt(EpicsSignal, ':INJECTOR:STATE')\n beam_trans = Cpt(EpicsSignal, ':BEAM:TRANS')\n beam_pulse_energy = Cpt(EpicsSignal, ':BEAM:PULSE_ENERGY')\n beam_e_thresh = Cpt(EpicsSignal, ':BEAM:E_THRESH')\n xstep_size = Cpt(EpicsSignal, ':INJECTOR:XSTEP_SIZE')\n xscan_min = Cpt(EpicsSignal, ':INJECTOR:XSCAN_MIN')\n xscan_max = Cpt(EpicsSignal, ':INJECTOR:XSCAN_MAX')\n bounce_width = Cpt(EpicsSignal, ':INJECTOR:BOUNCE_WIDTH')\n xmin = Cpt(EpicsSignal, ':INJECTOR:XMIN')\n xmax = Cpt(EpicsSignal, ':INJECTOR:XMAX')\n\n\nclass Diffract(Device, _TableMixin):\n '''\n Contains EPICS PVs used for shared memory X-ray Diffraction detector\n used in jet tracking.\n '''\n total_counter = Cpt(EpicsSignal, ':TOTAL_Counter',\n doc='Total counter')\n total_reprate = Cpt(EpicsSignal, ':TOTAL_RepRate',\n doc='Diffraction total intensity calc rate')\n ring_counter = Cpt(EpicsSignal, ':RING_Counter',\n doc='Diffraction ring intensity event counter')\n ring_reprate = Cpt(EpicsSignal, ':RING_RepRate',\n doc='Diffraction ring intensity event counter')\n psd_counter = Cpt(EpicsSignal, ':PSD_Counter',\n doc='Diffraction periodogram event counter')\n psd_reprate = Cpt(EpicsSignal, ':PSD_RepRate',\n doc='Diffraction periodogram event counter')\n stats_counter = Cpt(EpicsSignal, ':STATS_Counter',\n doc='Diffraction stats event counter')\n stats_reprate = Cpt(EpicsSignal, ':STATS_RepRate',\n doc='Diffraction stats event counter')\n streak_counter = Cpt(EpicsSignal, ':STREAK_Counter',\n doc='Diffraction streak event counter')\n streak_reprate = Cpt(EpicsSignal, ':STREAK_RepRate',\n doc='Diffraction streak event counter')\n cspad_sum = Cpt(EpicsSignal, ':TOTAL_ADU',\n doc='Total detector ADU')\n streak_fraction = Cpt(EpicsSignal, ':STREAK_FRACTION',\n doc='Fraction of events with diffraction streak')\n stats_mean = Cpt(EpicsSignal, ':STATS_MEAN',\n doc='Mean Diffraction Statistic')\n stats_std = Cpt(EpicsSignal, ':STATS_STD',\n doc='Std Diffraction Statistic')\n stats_min = Cpt(EpicsSignal, ':STATS_MIN',\n doc='Min Diffraction Statistic')\n stats_max = Cpt(EpicsSignal, ':STATS_MAX',\n doc='Max Diffraction Statistic')\n psd_frequency = Cpt(EpicsSignal, ':PSD_FREQUENCY',\n doc='Diffraction periodogram fundamental frequency')\n psd_amplitude = Cpt(EpicsSignal, ':PSD_AMPLITUDE',\n doc='Diffraction periodogram Frequency analysis amplitude')\n psd_rate = Cpt(EpicsSignal, ':PSD_RATE',\n doc='Event frequency for periodogram')\n psd_events = Cpt(EpicsSignal, ':PSD_EVENTS',\n doc='Diffraction periodogram')\n psd_resolution = Cpt(EpicsSignal, ':PSD_RESOLUTION',\n doc='Resultion to smooth over for periodogra')\n psd_freq_min = Cpt(EpicsSignal, ':PSD_FREQ_MIN',\n doc='Minimum frequency for periodogram calcs')\n psd_amp_wf = Cpt(EpicsSignal, ':PSD_AMP_WF',\n doc='Diffraction periodogram Frequency analysis waveform array')\n psd_freq_wf = Cpt(EpicsSignal, ':PSD_FREQ_WF',\n doc='Diffraction periodogram frequency waveform')\n psd_amp_array = Cpt(EpicsSignal, ':PSD_AMP_ARRAY',\n doc='Diffraction periodogram Frequency analysis amplitude array')\n state = Cpt(EpicsSignal, ':STATE',\n doc='State of diffraction analysis')\n"
},
{
"alpha_fraction": 0.6942735314369202,
"alphanum_fraction": 0.6952200531959534,
"avg_line_length": 36.07017517089844,
"blob_id": "750c3c1295d7647ada60e176dd6f80d3231bc518",
"content_id": "934de7b001ca10dac4b4aeae899a20f6f9819fe6",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2113,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 57,
"path": "/jet_tracking/sim.py",
"repo_name": "jeannas/jet_tracking",
"src_encoding": "UTF-8",
"text": "import types\n\nimport numpy as np\nfrom ophyd.sim import SynAxis, SynSignal\n\n\ndef generate_simulation(motor_column, signal_column, dataframe,\n motor_precision=3, random_state=None):\n \"\"\"\n Generate a simulation based on a provided DataFrame\n\n Use collected data to simulate the relationship between a single\n input and output variable. A ``SynAxis`` object will be returned that can\n be set to a specified precision. The value of the dependent variable is\n then determined by finding the closest position of the motor we have\n recorded and returning the corresponding value. If multiple readings were\n taken at this position one is randomly chosen.\n\n Parameters\n ----------\n motor_column: str\n The column of data that will be used as the independent variable. Will\n also be the name of the created motor\n\n signal_column: str\n The name of the column to be the dependent variable. Will also be the\n name of the created signal\n\n dataframe: pandas.DataFrame\n Data to use in simulation\n\n motor_precision: int, optional\n Limit the accuracy of the simulated motor\n\n random_state: np.random.RandomState, optional\n Seed the simulation\n\n Returns\n -------\n namespace: types.SimpleNamespace\n A namespace with attributes ``motor``, ``signal``, and ``data``.\n \"\"\"\n # Create our motor that will serve as the independent variable\n motor = SynAxis(name=motor_column, precision=motor_precision)\n ns = types.SimpleNamespace(data=dataframe, motor=motor)\n random_state = random_state or np.random.RandomState(0)\n\n # Create a function to return a random value from closest motor position\n def func():\n motor_positions = ns.data[motor_column].unique()\n sim_data = dict(iter(ns.data.groupby(motor_column)))\n pos = ns.motor.position\n closest_position = motor_positions[np.abs(motor_positions - pos).argmin()]\n return random_state.choice(sim_data[closest_position][signal_column])\n\n ns.signal = SynSignal(name=signal_column, func=func)\n return ns\n"
},
{
"alpha_fraction": 0.5681697726249695,
"alphanum_fraction": 0.5794763565063477,
"avg_line_length": 29.928424835205078,
"blob_id": "da0f2ef5314fcca30a646e14a86097e4289db983",
"content_id": "6ba1d63147a59ed070bbd6ad8943035928be016a",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15124,
"license_type": "permissive",
"max_line_length": 99,
"num_lines": 489,
"path": "/jet_tracking/cam_utils.py",
"repo_name": "jeannas/jet_tracking",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport cv2\n\nfrom scipy.signal import peak_widths\nfrom skimage.feature import register_translation\nfrom skimage.feature import register_translation\nfrom skimage.feature import canny, peak_local_max\nfrom skimage.transform import hough_line, hough_line_peaks, rotate\n\n\ndef image_stats(img):\n '''\n Parameters\n ----------\n img : ndarray\n image\n\n Returns\n -------\n mean : float\n mean of given image\n std : float\n standard deviation of image\n '''\n return img.mean(), img.std()\n\n\ndef jet_detect(img, calibratemean, calibratestd):\n '''Finds the jet from the online camera roi using Canny edge detection and Hough line transform\n\n This method first compares the mean of the ROI image to the mean of the calibrated ROI.\n Then Canny edge detection is used to detect the edges of the jet in the ROI and convert\n the original image to a binary image.\n Hough line transform is performed on the binary image to determine the approximate position\n of the jet.\n Peak-finding is performed on several horizontal slices of the image, and a line is fitted\n to these points to determine the actual position of the jet.\n If a peak is found that is not in the approximate position of the jet determined by the\n Hough line transform, that point is not considered in the line fitting.\n\n Parameters\n ----------\n img : ndarray\n ROI of the on-axis image\n mean : float\n mean of calibration ROI image with jet (see calibrate())\n calibratestd : float\n standard deviation calibration ROI image with jet (see calibrate())\n\n Returns\n -------\n rho : float\n Distance from (0,0) to the line in pixels\n theta : float\n Angle of the shortest vector from (0,0) to the line in radians\n '''\n\n # compare mean & std of current image to mean & std of calibrate image\n mean, std = image_stats(img)\n if (mean < calibratemean * 0.8) or (mean > calibratemean * 1.2):\n raise ValueError('ERROR mean: no jet')\n\n try:\n # use canny edge detection to convert image to binary\n binary = canny(img, sigma=2, use_quantiles=True, low_threshold=0.9, high_threshold=0.99)\n\n # perform Hough Line Transform on binary image\n h, angles, d = hough_line(binary)\n res = hough_line_peaks(h, angles, d, min_distance=1, threshold=int(img.shape[0]/3))\n\n # keep only valid lines\n valid = []\n for _, theta, dist in zip(*res):\n jetValid = True\n # jet must be within 45 degrees of vertical\n if (theta < np.radians(-45)) or (theta > np.radians(45)):\n jetValid = False\n # jet must start from top edge of imagei\n yint = dist / np.sin(theta)\n xint = np.tan(theta) * yint\n if (dist < 0) or (xint > binary.shape[1]):\n jetValid = False\n # jet must be within [x] pixels width\n #if (cam_utils.get_jet_width(img, rho, theta) * pxsize > 0.01):\n # jetValid = false\n # print('ERROR width: not a jet')\n if (jetValid):\n valid.append([theta, dist])\n except Exception:\n raise ValueError('ERROR hough: no jet')\n\n # use local maxes to determine exact jet position\n # line-fitting cannot be performed on vertical line (which is highly likely due to\n # nature of jet) so rotate image first\n imgr = rotate(img, 90, resize=True, preserve_range=True)\n\n jet_xcoords = []\n jet_ycoords = []\n\n for x in range(10):\n # try to find local maxes (corresponds to jet) in 10 rows along height of image)\n col = int(imgr.shape[1] / 10 * x)\n ymax = peak_local_max(imgr[:,col], threshold_rel=0.9, num_peaks=1)[0][0]\n\n # check if point found for max is close to jet lines found with Hough transform\n miny = imgr.shape[0]\n maxy = 0\n for theta, dist in valid:\n xint = dist / np.sin(theta)\n y = imgr.shape[0] - ((xint - col) * np.tan(theta))\n \n if (y < miny):\n miny = y\n if (y > maxy):\n maxy = y\n \n # if x found using local max is close to lines found with Hough transform, keep it \n if (ymax >= (miny - 5)) and (ymax <= (maxy + 5)):\n jet_xcoords.append(col)\n jet_ycoords.append(ymax)\n\n try:\n # fit a line to the points found using local max\n m, b = np.polyfit(jet_xcoords, jet_ycoords, 1)\n theta = -np.arctan(m)\n rho = np.cos(theta) * (imgr.shape[0] - b)\n except Exception:\n raise ValueError('ERROR polyfit: no jet')\n return rho, theta\n\n\ndef get_jet_z(rho, theta, roi_y, roi_z, *, pxsize, cam_y, cam_z, beam_y,\n beam_z, cam_pitch):\n '''\n Calculates the jet position at beam height in the main coordinate system\n in offaxis camera (z and pitch replace x and roll given camera orientation)\n\n Parameters\n ----------\n rho : float\n Distance from (0,0) to the line in pixels\n theta : float\n Angle of the shortest vector from (0,0) to the line in radians\n y_roi : int\n Y-coordinate of the origin of the ROI on the camera image in pixels\n z_roi : int\n Z-coordinate of the origin of the ROI on the camera image in pixels\n pxsize : float\n size of pixel in mm\n cam_y : float\n y-coordinate of camera position in mm\n cam_z : float\n z-coordinate of camera position in mm\n beam_y : float\n y-coordinate of x-ray beam in mm (usually 0)\n beam_z : float\n z-coordinate of x-ray beam in mm (usually 0)\n cam_pitch : float\n rotation of camera about x axis in radians\n\n Returns\n -------\n zj : float\n Jet position at the beam height in millimeters\n '''\n yb_roi = (1.0 / pxsize) * ((cam_y - beam_y) * np.cos(-cam_pitch) +\n (cam_z - beam_z) * np.sin(-cam_pitch)) - roi_y\n # print('yb_roi: {}'.format(yb_roi))\n zj_roi = (rho - yb_roi * np.sin(theta)) / np.cos(theta)\n # print('zj_roi: {}'.format(zj_roi))\n z0_roi = (1.0 / pxsize) * (cam_z * np.cos(cam_pitch) -\n cam_y * np.sin(-cam_pitch)) - roi_z\n zj = pxsize * (z0_roi - zj_roi)\n return zj\n\n\ndef get_jet_x(rho, theta, roi_x, roi_y, *, pxsize, cam_x, cam_y, beam_x,\n beam_y, cam_roll):\n '''Calculates the jet position at beam height in the main coordinate system\n\n Parameters\n ----------\n rho : float\n Distance from (0,0) to the line in pixels\n theta : float\n Angle of the shortest vector from (0,0) to the line in radians\n x_roi : int\n X-coordinate of the origin of the ROI on the camera image in pixels\n y_roi : int\n Y-coordinate of the origin of the ROI on the camera image in pixels\n pxsize : float\n size of pixel in mm\n cam_x : float\n x-coordinate of camera position in mm\n cam_y : float\n y-coordinate of camera position in mm\n beam_x : float\n x-coordinate of x-ray beam in mm (usually 0)\n beam_y : float\n y-coordinate of x-ray beam in mm (usually 0)\n cam_roll : float\n rotation of camera about z axis in radians\n\n Returns\n -------\n xj : float\n Jet position at the beam height in millimeters\n '''\n yb_roi = (1.0 / pxsize) * ((cam_y - beam_y) * np.cos(cam_roll) +\n (cam_x - beam_x) * np.sin(cam_roll)) - roi_y\n # print('yb_roi: {}'.format(yb_roi))\n xj_roi = (rho - yb_roi * np.sin(theta)) / np.cos(theta)\n # print('xj_roi: {}'.format(xj_roi))\n x0_roi = (1.0 / pxsize) * (cam_x * np.cos(cam_roll) -\n cam_y * np.sin(cam_roll)) - roi_x\n xj = pxsize * (x0_roi - xj_roi)\n return xj\n\n\ndef get_jet_pitch(theta, cam_pitch):\n '''Calculates jet angle in the main coordinate system (in radians, from -pi/2 to pi/2)\n\n Parameters\n ----------\n theta : float\n Angle of the shortest vector from (0,0) to the line in radians\n cam_pitch : float\n rotation of camera about x axis in radians\n\n Returns\n -------\n jet_pitch : float\n Jet angle in radians\n '''\n return (theta - np.pi / 2 - cam_pitch) % np.pi - np.pi / 2\n\n\ndef get_jet_roll(theta, cam_roll):\n '''Calculates jet angle in the main coordinate system (in radians, from -pi/2 to pi/2)\n\n Parameters\n ----------\n theta : float\n Angle of the shortest vector from (0,0) to the line in radians\n cam_roll : float\n rotation of camera about z axis in radians\n\n Returns\n -------\n jet_roll : float\n Jet angle in radians\n '''\n return (theta - np.pi / 2 - cam_roll) % np.pi - np.pi / 2\n\n\ndef get_jet_width(im, rho, theta):\n '''Calculates the jet width\n\n Parameters\n ----------\n img : ndarray\n ROI of the on-axis image\n rho : float\n Distance from (0,0) to the line in pixels\n theta : float\n Angle of the shortest vector from (0,0) to the line in radians\n\n Returns\n -------\n w : float\n Jet width in pixels\n '''\n rows, column_indices = np.ogrid[:im.shape[0], :im.shape[1]]\n r = np.asarray([int((rho + y * np.sin(theta)) / np.cos(theta))\n for y in range(im.shape[0])])\n r = r % im.shape[1]\n column_indices = column_indices - r[:, np.newaxis]\n\n s = im[rows, column_indices].sum(axis=0)\n\n return peak_widths(s, [s.argmax()])[0]\n\n\ndef get_offaxis_coords(cam_beam_y, cam_beam_z, *, cam_pitch, pxsize):\n '''Finds cam_y and cam_z using the pixel coordinates of the origin\n\n Parameters\n ----------\n cam_beam_y : float\n y coordinate for the beam (= main coordinate origin) on the camera in pixels\n cam_beam_z : float\n z coordinate for the beam (= main coordinate origin) on the camera in pixels\n cam_pitch : float\n rotation of camera about x axis in radians\n pxsize : float\n size of pixel in mm\n\n Returns\n -------\n cam_y : float\n Y-coordinate of the origin of the camera in the main coordinate system in millimeters\n cam_z : float\n Z-coordinate of the origin of the camera in the main coordinate system in millimeters\n\n '''\n cam_y = pxsize * (cam_beam_z * np.sin(cam_pitch) +\n cam_beam_y * np.cos(cam_pitch))\n cam_z = pxsize * (cam_beam_z * np.cos(cam_pitch) -\n cam_beam_y * np.sin(cam_pitch))\n return cam_y, cam_z\n\n\ndef get_cam_coords(cam_beam_x, cam_beam_y, *, cam_roll, pxsize):\n '''Finds cam_x and cam_y using the pixel coordinates of the origin\n\n Parameters\n ----------\n cam_beam_x : float\n x coordinate for the beam (= main coordinate origin) on the camera in pixels\n cam_beam_y : float\n y coordinate for the beam (= main coordinate origin) on the camera in pixels\n cam_roll : float\n rotation of camera about z axis in radians\n pxsize : float\n size of pixel in mm\n\n Returns\n -------\n cam_x : float\n X-coordinate of the origin of the camera in the main coordinate system in millimeters\n cam_y : float\n Y-coordinate of the origin of the camera in the main coordinate system in millimeters\n\n '''\n cam_x = pxsize * (cam_beam_y * np.sin(cam_roll) +\n cam_beam_x * np.cos(cam_roll))\n cam_y = pxsize * (cam_beam_y * np.cos(cam_roll) -\n cam_beam_x * np.sin(cam_roll))\n return cam_x, cam_y\n\n\ndef get_cam_pitch(imgs):\n '''Finds the camera angle\n\n Parameters\n ----------\n imgs : list(ndarray)\n List of images where nozzle has been moved in x-direction\n\n Returns\n -------\n cam_pitch : float\n Offaxis camera pitch angle in radians\n '''\n ytot = 0\n ztot = 0\n for i in range(len(imgs) - 1):\n im1, im2 = imgs[i], imgs[i + 1]\n (dy, dz), error, diffphase = register_translation(im1, im2, 100)\n if dy < 0:\n dy *= -1\n dz *= -1\n ytot += dy\n ztot += dz\n return np.arctan(ytot / ztot)\n\n\ndef get_cam_roll(imgs):\n '''Finds the camera angle\n\n Parameters\n ----------\n imgs : list(ndarray)\n List of images where nozzle has been moved in x-direction\n\n Returns\n -------\n cam_roll : float\n Camera angle in radians\n '''\n ytot = 0\n xtot = 0\n for i in range(len(imgs) - 1):\n im1, im2 = imgs[i], imgs[i + 1]\n (dy, dx), error, diffphase = register_translation(im1, im2, 100)\n if dy < 0:\n dy *= -1\n dx *= -1\n ytot += dy\n xtot += dx\n return -np.arctan(ytot / xtot)\n\n\ndef get_cam_pitch_pxsize(imgs, positions):\n '''Finds offaxis camera pitch and pixel size\n\n Parameters\n ----------\n imgs : list(ndarray)\n List of images where nozzle has been moved in x-direction\n positions : list(float)\n List of motor positions in millimeters\n\n Returns\n -------\n cam_pitch : float\n Camera angle in radians\n pxsize : float\n Pixel size in millimeters\n '''\n ytot = 0\n ztot = 0\n changetot = 0\n for i in range(len(positions) - 1):\n im1, im2 = imgs[i], imgs[i + 1]\n (dy, dz), error, diffphase = register_translation(im1, im2, 100)\n if dy < 0:\n dy *= -1\n dz *= -1\n ytot += dy\n ztot += dz\n changetot += abs(positions[i + 1] - positions[i])\n\n cam_pitch = np.arctan(ytot / ztot)\n pxsize = changetot / np.sqrt(ytot**2 + ztot**2)\n return cam_pitch, pxsize\n\n\ndef get_cam_roll_pxsize(imgs, positions):\n '''Finds camera angle and pixel size\n\n Parameters\n ----------\n imgs : list(ndarray)\n List of images where nozzle has been moved in x-direction\n positions : list(float)\n List of motor positions in millimeters\n\n Returns\n -------\n cam_roll : float\n Camera angle in radians\n pxsize : float\n Pixel size in millimeters\n '''\n ytot = 0\n xtot = 0\n changetot = 0\n for i in range(len(positions) - 1):\n im1, im2 = imgs[i], imgs[i + 1]\n (dy, dx), error, diffphase = register_translation(im1, im2, 100)\n if dy < 0:\n dy *= -1\n dx *= -1\n ytot += dy\n xtot += dx\n changetot += abs(positions[i + 1] - positions[i])\n\n cam_roll = -np.arctan(ytot / xtot)\n pxsize = changetot / np.sqrt(ytot**2 + xtot**2)\n return cam_roll, pxsize\n\n\ndef get_nozzle_shift(im1, im2, *, cam_roll, pxsize):\n '''Finds the distance the nozzle has shifted between two images\n\n Parameters\n ----------\n im1 : ndarray\n On-axis camera image 1\n im2 : ndarray\n On-axis camera image 2\n cam_roll : float\n rotation of camera about z axis in radians\n pxsize : float\n size of pixel in mm\n\n Returns\n -------\n dy : float\n Distance in y\n dx : float\n Distance in x\n '''\n\n (sy, sx), error, diffphase = register_translation(im1, im2, 100)\n dx = (sx * np.cos(cam_roll) - sy * np.sin(cam_roll)) * pxsize\n dy = (sy * np.cos(cam_roll) + sx * np.sin(cam_roll)) * pxsize\n return dy, dx\n"
},
{
"alpha_fraction": 0.6006882190704346,
"alphanum_fraction": 0.6036275029182434,
"avg_line_length": 31.891510009765625,
"blob_id": "c1aadb50e76f31c1378672751751528cc8e9a635",
"content_id": "72717dbdbafd0884877ed8cf0f6266c996ba3a9c",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13949,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 424,
"path": "/jet_tracking/jet_control.py",
"repo_name": "jeannas/jet_tracking",
"src_encoding": "UTF-8",
"text": "from time import sleep\n\nfrom . import cam_utils\nfrom . import jt_utils\nfrom .move_motor import movex\n\n\nclass JetControl:\n '''\n Jet tracking control class using jet_tracking methods\n '''\n\n def __init__(self, name, injector, camera, params, diffract, *,\n offaxis=False, **kwargs):\n self.name = name\n self.injector = injector\n self.camera = camera\n self.params = params\n self.diffract = diffract\n self.offaxis = offaxis\n\n def set_beam(self, beam_x_px, beam_y_px):\n '''\n Set the coordinates for the x-ray beam\n\n Parameters\n ----------\n beam_x_px : int\n x-coordinate of x-ray beam in the camera image in pixels\n beam_y_px : int\n y-coordinate of x-ray beam in the camera image in pixels\n '''\n set_beam(beam_x_px, beam_y_px, self.params)\n\n def calibrate(self, **kwargs):\n '''\n Calibrate the onaxis camera\n '''\n if self.offaxis:\n return calibrate_off_axis(self.injector, self.camera, self.params,\n **kwargs)\n else:\n return calibrate_inline(self.injector, self.camera, self.params,\n **kwargs)\n\n def jet_calculate(self):\n '''\n Track the sample jet and calculate the distance to the x-ray beam\n '''\n if self.offaxis:\n return jet_calculate_off_axis(self.camera, self.params)\n else:\n return jet_calculate_inline(self.camera, self.params)\n\n def jet_move(self):\n '''\n Move the sample jet to the x-ray beam\n '''\n if self.offaxis:\n raise NotImplementedError()\n else:\n jet_move_inline(self.injector, self.camera, self.params)\n\n\ndef get_burst_avg(n, image_plugin):\n '''\n Get the average of n consecutive images from a camera\n\n Parameters\n ----------\n n : int\n number of consecutive images to be averaged\n image_plugin : ImagePlugin\n camera ImagePlugin from which the images will be taken\n\n Returns\n -------\n burst_avg : ndarray\n average image\n '''\n imageX, imageY = image_plugin.image.shape\n burst_imgs = np.empty((n, imageX, imageY))\n for x in range(n):\n burst_imgs[x] = image_plugin.image\n burst_avg = burst_imgs.mean(axis=0)\n\n return burst_avg\n\n\ndef set_beam(beam_x_px, beam_y_px, params):\n '''\n Set the coordinates for the x-ray beam\n\n Parameters\n ----------\n beam_x_px : int\n x-coordinate of x-ray beam in the camera image in pixels\n beam_y_px : int\n y-coordinate of x-ray beam in the camera image in pixels\n params : Parameters\n EPICS PVs used for recording jet tracking data\n '''\n params.beam_x_px.put(beam_x_px)\n params.beam_y_px.put(beam_y_px)\n\n\ndef get_calibration_images(axis, camera, *, settle_time=1.0,\n burst_images=20):\n # collect images and motor positions to calculate pxsize and cam_roll\n imgs = []\n positions = []\n start_pos = axis.user_readback.get()\n for i in range(2):\n image = cam_utils.get_burst_avg(burst_images, camera.image)\n imgs.append(image)\n positions.append(axis.user_readback.get())\n next_position = axis.user_setpoint.get() - 0.1\n axis.set(next_position, wait=True)\n sleep(settle_time)\n\n axis.set(start_pos, wait=True)\n sleep(settle_time)\n return positions, imgs\n\n\ndef calibrate_off_axis(injector, camera, params, *, settle_time=1.0,\n burst_images=20):\n '''\n Calibrate the off-axis camera\n\n Parameters\n ----------\n injector : Injector\n sample injector\n camera : Questar\n camera looking at sample jet and x-rays\n params : Parameters\n EPICS PVs used for recording jet tracking data\n settle_time : float, optional\n Additional settle time after moving the motor\n burst_imagess : int, optional\n Number of burst images to average from the camera\n '''\n # TODO (koglin) check sign for off-axis calculations\n injector_axis = injector.coarseX\n positions, imgs = get_calibration_images(injector_axis, camera,\n settle_time=settle_time)\n\n cam_pitch, pxsize = cam_utils.get_cam_pitch_pxsize(imgs, positions)\n params.pxsize.put(pxsize)\n params.cam_pitch.put(cam_pitch)\n\n beam_y_px = params.beam_y_px.get()\n beam_z_px = params.beam_z_px.get()\n cam_y, cam_z = cam_utils.get_offaxis_coords(beam_y_px, beam_z_px,\n cam_pitch=cam_pitch,\n pxsize=pxsize)\n params.cam_y.put(cam_y)\n params.cam_z.put(cam_z)\n\n jet_pitch = cam_utils.get_jet_pitch(params.theta.get(), cam_pitch=cam_pitch)\n params.jet_pitch.put(jet_pitch)\n return dict(jet_pitch=jet_pitch, pxsize=pxsize,\n cam_pitch=cam_pitch)\n\n\ndef calibrate_inline(injector, camera, params, *, settle_time=1.0,\n burst_images=20):\n '''\n Calibrate the inline camera\n\n Parameters\n ----------\n injector : Injector\n sample injector\n camera : Questar\n camera looking at sample jet and x-rays\n params : Parameters\n EPICS PVs used for recording jet tracking data\n settle_time : float, optional\n Additional settle time after moving the motor\n burst_imagess : int, optional\n Number of burst images to average from the camera\n '''\n\n injector_axis = injector.coarseZ\n # collect images and motor positions needed for calibration\n positions, imgs = get_calibration_images(injector_axis, camera,\n settle_time=settle_time,\n burst_images=burst_images)\n\n # cam_roll: rotation of camera about z axis in radians\n # pxsize: size of pixel in mm\n cam_roll, pxsize = cam_utils.get_cam_roll_pxsize(imgs, positions)\n params.pxsize.put(pxsize)\n params.cam_roll.put(cam_roll)\n\n # beam_x_px: x-coordinate of x-ray beam in camera image in pixels\n beam_x_px = params.beam_x_px.get()\n # beam_y_px: y-coordinate of x-ray beam in camera image in pixels\n beam_y_px = params.beam_y_px.get()\n # cam_x: x-coordinate of camera position in mm\n # cam_y: y-coordinate of camera position in mm\n cam_x, cam_y = cam_utils.get_cam_coords(beam_x_px, beam_y_px,\n cam_roll=cam_roll, pxsize=pxsize)\n params.cam_x.put(cam_x)\n params.cam_y.put(cam_y)\n\n # jet_roll: rotation of sample jet about z axis in radians\n jet_roll = cam_utils.get_jet_roll(params.theta.get(), cam_roll=cam_roll)\n params.jet_roll.put(jet_roll)\n return dict(jet_roll=jet_roll, pxsize=pxsize,\n cam_roll=cam_roll)\n\n\ndef calibrate(injector, camera, cspad, wave8, params, *, offaxis=False, settle_time=0.1):\n '''\n Calibrate the camera and CSPAD and determine parameters needed for\n jet tracking\n NEED TO CHECK offaxis calculation sign\n\n First set the ROI of the camera to show the proper jet and illumination.\n\n Determines the mean, standard deviation, radius, intensity, jet position and\n tilt, pixel size, camera position and tilt\n\n Params determined if onaxis camera used: mean, std, radius, intensity, pxsize,\n camX, camY, cam_roll, jet_roll\n\n Params determined if offaxis camera used: mean, std, radius, intensity, pxsize,\n camY, camZ, cam_pitch, jet_pitch\n\n Parameters\n ----------\n injector : Injector\n sample injector\n camera : Questar\n camera looking at sample jet and x-rays\n cspad : CSPAD\n CSPAD for data\n wave8 : Wave8\n Wave8 to normalize data from CSPAD\n params : Parameters\n EPICS PVs used for recording jet tracking data\n settle_time : float, optional\n Additional settle time after moving the motor\n offaxis : bool, optional\n Camera is off-axis in y-z plane\n '''\n\n # find jet in camera ROI\n ROI_image = get_burst_avg(params.frames_cam.get(), camera.ROI_image)\n mean, std = cam_utils.image_stats(ROI_image)\n rho, theta = cam_utils.jet_detect(ROI_image, mean, std)\n params.mean.put(mean)\n params.std.put(std)\n\n # take calibration CSPAD data\n # get CSPAD and wave8 data\n azav, norm = get_azav(cspad) # call azimuthal average function\n r, i = jt_utils.fit_CSPAD(azav, norm, gas_det) \n params.radius.put(r)\n params.intensity.put(i)\n\n # call appropriate camera calibration method\n if offaxis:\n return calibrate_off_axis(injector, camera, params,\n settle_time=settle_time)\n else:\n return calibrate_inline(injector, camera, params,\n settle_time=settle_time)\n\n\ndef jet_calculate_off_axis(camera, params):\n '''\n Detect the sample jet and calculate the distance to the x-ray beam using\n the off-axis camera\n\n Parameters\n ----------\n camera : Questar\n camera looking at the sample jet and x-ray beam\n params : Parameters\n EPICS PVs used for recording jet tracking data\n offaxis : bool\n Camera is off-axis in y-z plane\n '''\n # detect the jet in the camera ROI\n ROI_image = cam_utils.get_burst_avg(20, camera.ROI_image)\n rho, theta = cam_utils.jet_detect(ROI_image)\n\n # check x-ray beam position\n beam_y_px = params.beam_y_px.get()\n beam_z_px = params.beam_z_px.get()\n cam_y, cam_z = cam_utils.get_offaxis_coords(\n beam_y_px, beam_z_px, cam_pitch=params.cam_pitch.get(),\n pxsize=params.pxsize.get())\n\n params.cam_y.put(cam_y)\n params.cam_z.put(cam_z)\n\n # find distance from jet to x-rays\n roi_z = camera.ROI.min_xyz.min_x.get()\n roi_y = camera.ROI.min_xyz.min_y.get()\n jet_z = cam_utils.get_jet_z(rho, theta, roi_y=roi_y, roi_z=roi_z,\n pxsize=params.pxsize.get(), cam_y=cam_y,\n cam_z=cam_z, beam_y=params.beam_y.get(),\n beam_z=params.beam_z.get(),\n cam_pitch=params.cam_pitch.get())\n params.jet_z.put(jet_z)\n return dict(rho=rho, theta=theta, cam_y=cam_y, cam_z=cam_z, jet_z=jet_z)\n\n\ndef jet_calculate_inline(camera, params):\n '''\n Detect the sample jet and calculate the distance to the x-ray beam using\n the inline camera\n\n Parameters\n ----------\n camera : Questar\n camera looking at the sample jet and x-ray beam\n params : Parameters\n EPICS PVs used for recording jet tracking data\n offaxis : bool\n Camera is off-axis in y-z plane\n '''\n \n # detect the jet in the camera ROI\n ROI_image = cam_utils.get_burst_avg(20, camera.ROI_image)\n rho, theta = cam_utils.jet_detect(ROI_image)\n\n # check x-ray beam position\n beam_x_px = params.beam_x_px.get()\n beam_y_px = params.beam_y_px.get()\n cam_x, cam_y = cam_utils.get_cam_coords(\n beam_x_px, beam_y_px, cam_roll=params.cam_roll.get(),\n pxsize=params.pxsize.get())\n\n params.cam_x.put(cam_x)\n params.cam_y.put(cam_y)\n\n # find distance from jet to x-rays\n ROIx = camera.ROI.min_xyz.min_x.get()\n roi_y = camera.ROI.min_xyz.min_y.get()\n jet_x = cam_utils.get_jet_x(rho, theta, ROIx, roi_y,\n pxsize=params.pxsize.get(), cam_x=cam_x,\n cam_y=cam_y, beam_x=params.beam_x.get(),\n beam_y=params.beam_y.get(),\n cam_roll=params.cam_roll.get())\n params.jet_x.put(jet_x)\n return dict(rho=rho, theta=theta, cam_y=cam_y, cam_x=cam_x, jet_x=jet_x)\n\n\ndef jet_move_inline(injector, camera, params):\n 'A single step of the infinite-loop jet_move'\n ROIx = camera.ROI.min_xyz.min_x.get()\n # roi_y = camera.ROI.min_xyz.min_y.get()\n\n if abs(params.jet_x.get()) > 0.01:\n # move jet to x-rays using injector motor\n print(f'Moving {params.jet_x.get()} mm')\n movex(injector.coarseX, -params.jet_x.get())\n # move the ROI to keep looking at the jet\n min_x = ROIx + (params.jet_x.get() / params.pxsize.get())\n camera.ROI.min_xyz.min_x.put(min_x)\n # if params.state == [some state]\n # [use [x] for jet tracking]\n # else if params.state == [some other state]:\n # [use [y] for jet tracking]\n # else if params.state == [some other state]:\n # [revert to manual injector controls]\n # etc...\n\n # if jet is clear in image:\n # if jet_x != beam_x:\n # move injector.coarseX\n # walk_to_pixel(detector, motor, target) ??\n # else if nozzle is clear in image:\n # if nozzleX != beam_x:\n # move injector.coarseX\n # else:\n # if injector.coarseX.get() != beam_x:\n # move injector.coarseX\n\n\ndef jet_scan(injector, cspad):\n '''\n Scans jet across x-rays twice to determine highest intensity, then moves jet\n to that position\n\n Parameters\n ----------\n injector : Injector\n sample injector\n cspad : CSPAD\n CSPAD for data\n '''\n\n # step number & sizes from Mark's code\n x_min = 0.0012\n steps = 50\n \n x_step = (-1) * steps * x_min / 2\n\n hi_intensities = []\n best_pos = []\n\n for i in range(2):\n # move motor to first position \n injector.coarseX.mv(x_step, wait=True)\n intensities = []\n positions = []\n for j in range(steps):\n positions.append(injector.coarseX.user_readback.get())\n # get azav from CSPAD\n # get CSPAD and wave8\n azav, norm = get_azav(CSPAD) #call azimuthal average function\n intensities.append(jt_utils.get_cspad(azav, params.radius.get(), gas_det))\n hi_intensities.append(max(intensities))\n best_pos.append(positions[intensities.index(max(intensities))])\n\n # move motor to average of best positions from two sweeps\n injector.coarseX.mv(np.average(best_pos)) \n\n\n"
},
{
"alpha_fraction": 0.6567450761795044,
"alphanum_fraction": 0.6625059843063354,
"avg_line_length": 33.71666717529297,
"blob_id": "bfe9e79dd95490515c2d1efb377227a5b199a07e",
"content_id": "03a0415c096122ff59d6ce6c966b96054893179f",
"detected_licenses": [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2083,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 60,
"path": "/jet_tracking/tests/test_control.py",
"repo_name": "jeannas/jet_tracking",
"src_encoding": "UTF-8",
"text": "import pytest\nfrom . import conftest\n\n\ndef test_smoke_set_beam(jet_control):\n from ..jet_control import set_beam\n set_beam(1, 2, jet_control.params)\n assert jet_control.params.beam_x_px.get() == 1\n assert jet_control.params.beam_y_px.get() == 2\n\n\[email protected](\"use_offaxis\", [False, True])\ndef test_smoke_calibrate(jet_control, injector, questar, parameters,\n offaxis_parameters, use_offaxis):\n from ..jet_control import calibrate\n params = (offaxis_parameters\n if use_offaxis\n else parameters)\n\n conftest.set_random_image(questar.image)\n conftest.set_random_image(questar.ROI_image)\n calibrate(injector=injector, camera=questar, params=params,\n offaxis=use_offaxis)\n\n\[email protected](\"use_offaxis\", [False, True])\ndef test_smoke_jet_calculate(questar, parameters,\n offaxis_parameters, use_offaxis):\n from ..jet_control import jet_calculate_off_axis, jet_calculate_inline\n conftest.set_random_image(questar.image)\n conftest.set_random_image(questar.ROI_image)\n questar.ROI.min_xyz.min_x.sim_put(1)\n questar.ROI.min_xyz.min_y.sim_put(1)\n questar.ROI.min_xyz.min_z.sim_put(1)\n if use_offaxis:\n jet_calculate_off_axis(camera=questar, params=offaxis_parameters)\n else:\n jet_calculate_inline(camera=questar, params=parameters)\n\n\[email protected](\"jet_x\", [0.0, 0.1])\ndef test_smoke_jet_move(injector, questar, parameters,\n jet_x):\n from ..jet_control import jet_move_inline\n questar.ROI.min_xyz.min_x.put(1)\n parameters.jet_x.sim_put(jet_x)\n jet_move_inline(injector=injector, camera=questar, params=parameters)\n\n\ndevices_without_table = {'Questar', 'Offaxis', 'SDS'}\n\n\ndef test_table(device_instances):\n for dev_name in dir(device_instances):\n if dev_name.startswith('_') or dev_name in devices_without_table:\n continue\n dev = getattr(device_instances, dev_name)\n print()\n print(f'-- {dev_name} --')\n print(dev.table)\n"
}
] | 12 |
chinajizhipeng/scrapy_test | https://github.com/chinajizhipeng/scrapy_test | 026537db9a1e28fa5bc1c383d339877a374498a4 | 924f98b81c41d1b45bc2d3379ec6180eab0f917e | 14cc5e5fc0a02a3fc526323c8d43cfe3a980a8f4 | refs/heads/master | 2020-03-25T08:25:54.531428 | 2018-08-06T13:19:50 | 2018-08-06T13:24:12 | 143,611,980 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5939130187034607,
"alphanum_fraction": 0.6095651984214783,
"avg_line_length": 32.85293960571289,
"blob_id": "cbd6758fdea7dfca55dfeb023aec48e6799429dc",
"content_id": "b6b2a373a543d5ac1346e0b1b7c2bc59a904a2f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1256,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 34,
"path": "/tieba.py",
"repo_name": "chinajizhipeng/scrapy_test",
"src_encoding": "UTF-8",
"text": "import requests\nfrom urllib import request\ndef tiebaSpider(url,beginPage,endPage):\n for page in range(beginPage,endPage):\n pn = (page-1) * 50\n filename = \"第\"+str(page)+\"页.html\"\n fullurl = url + \"&pn=\" + str(pn)\n html = loadPage(fullurl, filename)\n writeFile(html, filename)\n print('已完成第%d页'%page)\n\ndef loadPage(url, filename):\n print(\"正在下载\" + filename)\n headers = {\"User-Agent\": \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;\"}\n request_name = request.Request(url = url,headers=headers)\n response = request.urlopen(request_name)\n return response.read().decode('utf-8')\n\ndef writeFile(html, filename):\n print(\"正在存储\" + filename)\n with open(\"C:\\\\Users\\\\123\\\\test\\\\%s\"%filename, 'w',encoding='utf-8') as f:\n f.write(html)\n print(\"-\" * 20)\n\nif __name__ == '__main__':\n kw = input(\"请输入需要爬取的贴吧:\")\n # 输入起始页和终止页,str转成int类型\n beginPage = int(input(\"请输入起始页:\"))\n endPage = int(input(\"请输入终止页:\"))\n url_f = \"http://tieba.baidu.com/f?\"\n key = {\"kw\": kw}\n r = requests.get(url_f, params=key)\n url = r.url\n tiebaSpider(url, beginPage, endPage)"
},
{
"alpha_fraction": 0.5376154780387878,
"alphanum_fraction": 0.5706115365028381,
"avg_line_length": 40.345455169677734,
"blob_id": "613d1c21b2e5595b42d70462444796dfcd99d384",
"content_id": "cfec4bc6322c2892d046770e08c51f740fb59525",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2419,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 55,
"path": "/xueweipinggu.py",
"repo_name": "chinajizhipeng/scrapy_test",
"src_encoding": "UTF-8",
"text": "from lxml import etree\nimport pandas as pd\ndef first_class():\n html = etree.parse('http://www.chinadegrees.cn/webrms/pages/Ranking/xkpmGXZJ2016.jsp?yjxkdm=0201&xkdm=01,02,03,04,05,06',etree.HTMLParser())\n ps = html.xpath('//table//td[@width=\"13%\"]/p')\n for p in ps:\n url = p.xpath('./a/@href')[0]\n fir_name = p.xpath('./a/text()')[0]\n print(fir_name[0])\n main_url = 'http://www.chinadegrees.cn/webrms/pages/Ranking/%s'%url\n sec_class(main_url,fir_name)\ndef sec_class(main_url,fir_name):\n html = etree.parse(main_url,etree.HTMLParser())\n p2s = html.xpath('//td[@style=\"line-height:25px;\"]/p')\n for ps in p2s:\n #第二级学科网页的属性\n sec_url = ps.xpath('./a/@href')[0]\n #代码 学科\n num_class = ps.xpath('./a/text()')[0]\n #第二级学科的代码\n sec_num = num_class.split(' ')[0] \n #第二级学科的名字\n sec_class = num_class.split(' ')[1]\n #最终爬取的网页\n fin_url = 'http://www.chinadegrees.cn/webrms/pages/Ranking/%s'%sec_url\n scrapy_rating(fin_url,fir_name,sec_class,sec_num)\ndef scrapy_rating(url,fir_name,sec_class,sec_num):\n html = etree.parse(url,etree.HTMLParser())\n trs = html.xpath('//div[@style=\"height:510px; overflow:scroll;\"]//tr') \n for tr in trs:\n fir_names.append(fir_name)\n sec_nums.append(sec_num)\n sec_names.append(sec_class)\n if tr.xpath('./td[@align=\"center\"]/text()'):\n r = tr.xpath('./td[@align=\"center\"]/text()')\n rate.append(r[0])\n num_schs= tr.xpath('./td[@width=\"70%\"]/div/text()')\n num.append(num_schs[0].split('\\xa0\\xa0\\xa0\\xa0\\xa0\\xa0')[0])\n schs.append(num_schs[0].split('\\xa0\\xa0\\xa0\\xa0\\xa0\\xa0')[1])\n else:\n rate.append(r[0])\n num_schs= tr.xpath('./td[@width=\"70%\"]/div/text()')\n num.append(num_schs[0].split('\\xa0\\xa0\\xa0\\xa0\\xa0\\xa0')[0])\n schs.append(num_schs[0].split('\\xa0\\xa0\\xa0\\xa0\\xa0\\xa0')[1])\n data = {'第一大类学科':fir_names,'第二大类学科代码':sec_nums,'第二大类学科名称':sec_names,'等级':rate,'学校代码':num,'学校名称':schs}\n df_data = pd.DataFrame(data)\n df_data.to_excel('学校等级1.xlsx',header=True)\nif __name__ == '__main__':\n\trate = []\n\tnum = []\n\tschs = []\n\tfir_names = []\n\tsec_nums = []\n\tsec_names = []\n\tfirst_class()"
},
{
"alpha_fraction": 0.807692289352417,
"alphanum_fraction": 0.807692289352417,
"avg_line_length": 25,
"blob_id": "26f615bedaca206b4d437d91c0120b0f3324fda9",
"content_id": "720fe0fc0d3c1cbfabd98e94fdc651d6ff124818",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 52,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 2,
"path": "/README.md",
"repo_name": "chinajizhipeng/scrapy_test",
"src_encoding": "UTF-8",
"text": "# scrapy_test\nThe example scrapy project for Python\n"
}
] | 3 |
yongduek/conv-ensemble-str | https://github.com/yongduek/conv-ensemble-str | b96d16aca8db9534f5162d17c03f80a78d2b6d0f | 86029cb059a904ad3a6182e2f0855f0263e34d0b | 5c2813ae5214da99f329efd40d7d97b9f9635d30 | refs/heads/master | 2020-05-25T09:21:57.285964 | 2018-11-29T11:57:11 | 2018-11-29T11:57:11 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6328988671302795,
"alphanum_fraction": 0.6431674361228943,
"avg_line_length": 33.924137115478516,
"blob_id": "6629f466eb64a0a07b89f144990c79f55c5845e9",
"content_id": "fd384cdb56698198983a49389584ebd7257999cb",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5066,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 145,
"path": "/utils/utils.py",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport functools\nimport re\nimport tensorflow as tf\nimport inception_preprocessing\n\nclass CharsetMapper(object):\n \"\"\"A simple class to map tensor ids into strings.\n\n It works only when the character set is 1:1 mapping between individual\n characters and individual ids.\n\n Make sure you call tf.tables_initializer().run() as part of the init op.\n \"\"\"\n\n def __init__(self,\n filename,\n max_sequence_length=30,\n default_character='?',\n null_character=u'\\u2591'):\n \"\"\"Creates a lookup table.\n\n Args:\n charset: a dictionary with id-to-character mapping.\n \"\"\"\n self.null_character = null_character\n self.charset = self._read_charset(filename)\n self.max_sequence_length = max_sequence_length\n\n charset_array = self._dict_to_array(self.charset, default_character)\n mapping_strings = tf.constant(charset_array)\n self.table = tf.contrib.lookup.index_to_string_table_from_tensor(\n mapping=mapping_strings, default_value=default_character)\n self.invert_table = tf.contrib.lookup.index_table_from_tensor(\n mapping=mapping_strings)\n\n @property\n def num_charset(self):\n return len(self.charset)\n\n def get_text(self, ids):\n \"\"\" Returns a string corresponding to a sequence of character ids.\n\n Args:\n ids: a tensor with shape [batch_size, max_sequence_length]\n \"\"\"\n return tf.reduce_join(\n self.table.lookup(tf.to_int64(ids)), reduction_indices=1)\n\n def get_label(self, text, null_character=u'\\u2591'):\n \"\"\" Returns the ids of the corresponding text,\n\n Args:\n text: a tensor with shape [batch_size, lexicon_size]\n and type string\n null_character: a unicode character used to replace '<null>'\n character. the default value is a light shade block '░'.\n \"\"\"\n batch_size = text.shape[0].value\n lexicon_size = text.shape[1].value\n text = tf.reshape(text, [-1])\n sp_text = tf.string_split(text, delimiter='')\n sp_text = tf.sparse_reset_shape(sp_text, [batch_size*lexicon_size,\n self.max_sequence_length])\n sp_text = tf.sparse_tensor_to_dense(sp_text, default_value=null_character)\n ids = self.invert_table.lookup(sp_text)\n ids = tf.reshape(ids, [batch_size, lexicon_size, self.max_sequence_length])\n return tf.to_int32(ids)\n\n def _dict_to_array(self, id_to_char, default_character):\n num_char_classes = max(id_to_char.keys()) + 1\n array = [default_character] * num_char_classes\n for k, v in id_to_char.iteritems():\n array[k] = v\n return array\n\n def _read_charset(self, filename):\n \"\"\"Reads a charset definition from a tab separated text file.\n\n charset file has to have format compatible with the FSNS dataset.\n\n Args:\n filename: a path to the charset file.\n\n Returns:\n a dictionary with keys equal to character codes and values - unicode\n characters.\n \"\"\"\n pattern = re.compile(r'(\\d+)\\t(.+)')\n charset = {}\n with tf.gfile.GFile(filename) as f:\n for i, line in enumerate(f):\n m = pattern.match(line)\n if m is None:\n tf.logging.warning('incorrect charset file. line #%d: %s', i, line)\n continue\n code = int(m.group(1))\n char = m.group(2).decode('utf-8')\n if char == '<nul>':\n char = self.null_character\n charset[code] = char\n return charset\n\ndef augment_image(image):\n \"\"\"Augmentation the image with a random modification.\n\n Args:\n image: input Tensor image of rank 3, with the last dimension\n of size 3.\n\n Returns:\n Distorted Tensor image of the same shape.\n \"\"\"\n with tf.variable_scope('AugmentImage'):\n height = image.get_shape().dims[0].value\n width = image.get_shape().dims[1].value\n\n # Random crop cut from the street sign image, resized to the same size.\n # Assures that the crop is covers at least 0.8 area of the input image.\n bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(\n tf.shape(image),\n bounding_boxes=tf.zeros([0, 0, 4]),\n min_object_covered=0.8,\n aspect_ratio_range=[0.8, 1.2],\n area_range=[0.8, 1.0],\n use_image_if_no_bounding_boxes=True)\n distorted_image = tf.slice(image, bbox_begin, bbox_size)\n\n # Randomly chooses one of the 4 interpolation methods\n distorted_image = inception_preprocessing.apply_with_random_selector(\n distorted_image,\n lambda x, method: tf.image.resize_images(x, [height, width], method),\n num_cases=4)\n distorted_image.set_shape([height, width, 3])\n\n # Color distortion\n # TODO:incompatible with clip value in inception_preprocessing.distort_color\n distorted_image = inception_preprocessing.apply_with_random_selector(\n distorted_image,\n functools.partial(\n inception_preprocessing.distort_color, fast_mode=False),\n num_cases=4)\n distorted_image = tf.clip_by_value(distorted_image, -1.5, 1.5)\n\n return distorted_image\n"
},
{
"alpha_fraction": 0.6529656648635864,
"alphanum_fraction": 0.6584287285804749,
"avg_line_length": 33.94545364379883,
"blob_id": "dde79f6428092245bf90d92ac1753c91c85e6de9",
"content_id": "b34ab6aadfce3c74f47530fbc851746344152475",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3844,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 110,
"path": "/utils/hooks.py",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "\"\"\" Hooks used for learn.Experiment\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport abc\nimport os\nimport six\nimport tensorflow as tf\n\[email protected]_metaclass(abc.ABCMeta)\nclass Prediction(tf.train.SessionRunHook):\n \"\"\" Write predictions to file.\n \"\"\"\n def __init__(self, params, output_dir):\n self.params = params\n self.output_dir = output_dir\n\n def begin(self):\n # pylint: disable=attribute-defined-outside-init\n # fetch tensors\n self.predicted_text = tf.get_collection('prediction')[0][\"predicted_text\"]\n self.image_names = tf.get_collection('prediction')[0][\"image_names\"]\n\n # file handle\n file_name = \"{}.log\".format(self.params['dataset']['dataset_name'])\n file_name = os.path.join(self.output_dir, file_name)\n # log to file\n self.file = open(file_name, 'w')\n\n def before_run(self, _run_context):\n fetches = {}\n fetches[\"predicted_text\"] = self.predicted_text\n fetches[\"image_names\"] = self.image_names\n return tf.train.SessionRunArgs(fetches)\n\n def after_run(self, _run_context, run_values):\n predicted_text_batch = run_values.results[\"predicted_text\"]\n image_name_batch = run_values.results[\"image_names\"]\n assert len(predicted_text_batch) == len(image_name_batch)\n for i in range(len(predicted_text_batch)):\n image_name = image_name_batch[i]\n text = predicted_text_batch[i].decode('utf-8').replace(u'\\u2591', '')\n text = text.encode(\"ascii\")\n line = '{}, \"{}\"'.format(image_name, text)\n tf.logging.info(line)\n self.file.write(line + '\\r\\n')\n self.file.flush()\n\n def end(self, _session):\n # disable log to file\n self.file.close()\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass FalsePrediction(tf.train.SessionRunHook):\n \"\"\" Write false predictions to file.\n \"\"\"\n def __init__(self, params, output_dir):\n self.params = params\n self.output_dir = output_dir\n\n def begin(self):\n # pylint: disable=attribute-defined-outside-init\n # fetch tensors\n self.predicted_text = tf.get_collection('prediction')[0][\"predicted_text\"]\n self.gt_text = tf.get_collection('prediction')[0][\"gt_text\"]\n self.image_names = tf.get_collection('prediction')[0][\"image_names\"]\n self.predicted_mask = tf.get_collection('predicted_mask')[0]\n\n # file handle\n file_name = \"{}-false.log\".format(self.params['dataset']['dataset_name'])\n file_name = os.path.join(self.output_dir, file_name)\n # log to file\n self.file = open(file_name, 'w')\n self.file.write(\"image name, ground-truth text, predicted text\\r\\n\")\n\n def before_run(self, _run_context):\n fetches = {}\n fetches[\"predicted_text\"] = self.predicted_text\n fetches[\"gt_text\"] = self.gt_text\n fetches[\"image_names\"] = self.image_names\n fetches[\"predicted_mask\"] = self.predicted_mask\n return tf.train.SessionRunArgs(fetches)\n\n def after_run(self, _run_context, run_values):\n predicted_text = run_values.results[\"predicted_text\"]\n true_text = run_values.results[\"gt_text\"]\n image_names = run_values.results[\"image_names\"]\n predicted_mask = run_values.results[\"predicted_mask\"]\n assert len(predicted_text) == len(image_names)\n assert len(true_text) == len(predicted_mask)\n for i in range(len(predicted_text)):\n if predicted_mask[i]:\n # true prediction\n continue\n image_name = image_names[i]\n pt_text = predicted_text[i].decode('utf-8').replace(u'\\u2591', '')\n pt_text = pt_text.encode(\"ascii\")\n gt_text = true_text[i].decode('utf-8').replace(u'\\u2591', '')\n gt_text = gt_text.encode(\"ascii\")\n line = '{}, {}, {}\\r\\n'.format(image_name, gt_text, pt_text)\n self.file.write(line)\n self.file.flush()\n\n def end(self, _session):\n # disable log to file\n self.file.close()\n"
},
{
"alpha_fraction": 0.6291035413742065,
"alphanum_fraction": 0.6329444050788879,
"avg_line_length": 37.87042236328125,
"blob_id": "1be2efb9a20417a19effd93fc26f95e70bbc4f8a",
"content_id": "d59b1676fb323465b0697ef57775db2a0b0b82ea",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13799,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 355,
"path": "/model/model.py",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "\"\"\" Top view model interface.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport logging\nimport yaml\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.learn import ModeKeys\nfrom tensorflow.python.framework import ops\nfrom tensorflow.contrib.framework.python.framework import init_from_checkpoint\nfrom tensorflow.contrib.framework.python.framework import list_variables\nfrom tensorflow.contrib.framework.python.ops import get_variables_to_restore\nfrom tensorflow.contrib.seq2seq.python.ops.decoder import _transpose_batch_time\nfrom model.encoder_resnet import EncoderResnet\nfrom model.decoder_conv import DecoderConv\nfrom utils.utils import augment_image\nfrom utils.utils import CharsetMapper\n\nclass Model(object):\n \"\"\" Top view model.\n \"\"\"\n def __init__(self, params, mode):\n # params\n self.params = params\n self.mode = mode\n\n # charset\n charset_file = os.path.join(self.params['dataset']['dataset_dir'],\n self.params['dataset']['charset_filename'])\n self.charset = CharsetMapper(charset_file,\n self.params['dataset']['max_sequence_length'])\n\n # endcoder and decoder\n self.encoder = EncoderResnet(params, mode)\n self.decoder = DecoderConv(params, mode, self.charset.num_charset)\n\n tf.logging.info(\"Model params in mode=%s\", self.mode)\n tf.logging.info(\"\\n%s\", yaml.dump({\"Model\": self.params}))\n\n def __call__(self, features, labels):\n with tf.variable_scope('model'):\n # Pre-process features and labels\n tf.logging.info('Preprocess data.')\n features, labels = self._preprocess(features, labels)\n tf.logging.info('Create encoder.')\n encoder_output = self.encoder(features)\n tf.logging.info('Create decoder.')\n decoder_output = self.decoder(encoder_output, labels)\n\n if self.params['checkpoint']:\n self._restore_variables(self.params['checkpoint'])\n\n # loss is zero during eval\n loss = tf.zeros([])\n train_op = None\n if self.mode == ModeKeys.TRAIN:\n tf.logging.info('Compute loss.')\n loss = self._compute_loss(decoder_output, labels)\n # TODO(Shancheng): gradient multipliers\n train_op = self._build_train_op(loss)\n tf.logging.info('Compute Statistics.')\n self._compute_statistics()\n\n if self.params['summary']:\n tf.logging.info('Create summaries.')\n self._create_summaries(decoder_output, features, labels)\n\n tf.logging.info('Create predictions.')\n predictions = self._create_predictions(decoder_output, features, labels)\n tf.logging.info('Model done.')\n return predictions, loss, train_op\n\n def _restore_variables(self, checkpoint):\n \"\"\" restore variables from checkpoint as much as possible\n \"\"\"\n checkpoint_variables_map = list_variables(checkpoint)\n valid_variable = lambda name: name.startswith('model/encoder') or \\\n name.startswith('model/decoder')\n checkpoint_variable_names = [name for (name, _) in checkpoint_variables_map\n if valid_variable(name)]\n\n variables = get_variables_to_restore()\n variable_names = [v.name.split(':')[0] for v in variables]\n assignment_map = {}\n for var in checkpoint_variable_names:\n if var in variable_names:\n assignment_map[var] = var\n\n init_from_checkpoint(checkpoint, assignment_map)\n\n def _preprocess(self, features, labels):\n \"\"\" Data augmentation and label process.\n \"\"\"\n with tf.variable_scope('preprocess'):\n with tf.variable_scope('image'):\n features['image_orig'] = features['image']\n image = tf.image.convert_image_dtype(features['image_orig'],\n dtype=tf.float32)\n if self.mode == ModeKeys.TRAIN:\n images = tf.unstack(image)\n images = [augment_image(img) for img in images]\n image = tf.stack(images)\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n features['image'] = image\n\n if labels is None:\n return features, None\n\n with tf.variable_scope('label'):\n # TODO(Shancheng): use start token and end token rather constant 0\n # labels for decoder input\n labels['label_input'] = tf.concat([labels['label'][:, -1:],\n labels['label'][:, 0:-1]], axis=1)\n # from text length to training label length\n labels['length'] = tf.reshape(labels['length'], [-1])\n labels['length'] = labels['length'] + 1\n\n return features, labels\n\n def _build_train_op(self, loss, gradient_multipliers=None):\n \"\"\"Creates the training operation\"\"\"\n # Creates the optimizer\n name = self.params[\"optimizer\"]\n optimizer = tf.contrib.layers.OPTIMIZER_CLS_NAMES[name](\n learning_rate=self.params[\"learning_rate\"],\n **self.params[\"optimizer_params\"])\n\n train_op = tf.contrib.layers.optimize_loss(\n loss=loss,\n global_step=tf.contrib.framework.get_global_step(),\n learning_rate=self.params[\"learning_rate\"],\n learning_rate_decay_fn=None,\n clip_gradients=self.params['clip_gradients'],\n optimizer=optimizer,\n gradient_multipliers=gradient_multipliers,\n summaries=[\"learning_rate\", \"loss\", \"gradients\", \"gradient_norm\"])\n\n return train_op\n\n def _create_predictions(self, decoder_output, features, labels=None):\n \"\"\"Creates the dictionary of predictions that is returned by the model.\n \"\"\"\n with tf.name_scope(\"create_predictions\"):\n predicted_ids = _transpose_batch_time(decoder_output.predicted_ids)\n predicted_text = self.charset.get_text(predicted_ids)\n attention_scores = decoder_output.attention_scores\n original_images = features[\"image_orig\"]\n prediction = {\"predicted_ids\": predicted_ids,\n \"predicted_text\": predicted_text,\n \"images\": original_images,\n \"attention_scores\": attention_scores}\n if \"name\" in features:\n prediction[\"image_names\"] = features['name']\n if labels:\n gt_text = self.charset.get_text(labels[\"label\"])\n prediction[\"gt_text\"] = gt_text\n tf.add_to_collection(\"prediction\", prediction)\n return prediction\n\n def _create_summaries(self, decoder_output, features, labels=None):\n \"\"\"Create summaries for tensorboard.\n \"\"\"\n with tf.name_scope(\"create_summaries\"):\n max_outputs = self.params['max_outputs']\n\n # input images\n image = features['image']\n tf.summary.image(self._sname('image'), image, max_outputs)\n if self.mode == ModeKeys.TRAIN:\n image_orig = features['image_orig']\n tf.summary.image(self._sname('image_orig'), image_orig, max_outputs)\n\n # ground-truth text\n if self.mode != ModeKeys.INFER:\n gt_text = self.charset.get_text(labels[\"label\"][:max_outputs, :])\n tf.summary.text(self._sname('text/gt'), gt_text)\n\n # predicted text\n predicted_ids = _transpose_batch_time(decoder_output.predicted_ids)\n predicted_ids = tf.to_int64(predicted_ids[:max_outputs, :])\n predicted_text = self.charset.get_text(predicted_ids)\n tf.summary.text(self._sname('text/pt'), predicted_text)\n\n def add_attention_summary(att_scores, family='attention'):\n for att_score in att_scores:\n name = att_score.name.replace(\":\", \"_\")\n shape = tf.shape(att_score)\n # pylint: disable=invalid-name\n N, M, H, W = shape[0], shape[1], shape[2], shape[3]\n score = tf.reshape(att_score, [N, M * H, W])\n score = tf.expand_dims(score, 3)\n tf.summary.image(name, score, max_outputs=max_outputs, family=family)\n\n def add_std_max_summary(tensors, family):\n for tensor in tensors:\n name = tensor.name.replace(\":\", \"_\")\n _, var = tf.nn.moments(tf.reshape(tensor, [-1]), [0])\n tf.summary.scalar(name, tf.sqrt(var), family=family + \"_std\")\n max_value = tf.reduce_max(tensor)\n tf.summary.scalar(name, max_value, family=family + \"_max\")\n\n # attention scores [N, L, M, H, W]\n attention_scores = decoder_output.attention_scores\n # unstack layer\n attention_scores = tf.unstack(attention_scores, axis=1)\n add_attention_summary(attention_scores, 'attention')\n\n # weight\n weigths = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n add_std_max_summary(weigths, 'weights')\n\n # conv1 and encoder output activation\n encoder_outputs = tf.get_collection('model/encoder/')\n add_std_max_summary(encoder_outputs, 'activation')\n\n # encoder activation\n encoder_outputs = tf.get_collection('model/encoder/resnet/_end_points')\n add_std_max_summary(encoder_outputs, 'activation')\n\n # decoder activation\n decoder_outputs = tf.get_collection('model/decoder')\n add_std_max_summary(decoder_outputs, 'activation')\n\n # TODO(Shancheng): use tensorflow loss interface\n def _compute_loss(self, decoder_output, labels):\n \"\"\"Computes the loss for this model.\n \"\"\"\n with tf.name_scope(\"compute_loss\"):\n language_logit = decoder_output.logits[0]\n attention_logit = decoder_output.logits[1]\n batch_size = self.params['dataset']['batch_size']\n\n language_losses = self._cross_entropy_sequence_loss(\n logits=language_logit,\n targets=tf.transpose(labels[\"label\"], [1, 0]),\n sequence_length=labels[\"length\"])\n attention_losses = self._cross_entropy_sequence_loss(\n logits=attention_logit,\n targets=tf.transpose(labels[\"label\"], [1, 0]),\n sequence_length=labels[\"length\"])\n\n language_loss = tf.reduce_sum(language_losses) / batch_size\n attention_loss = tf.reduce_sum(attention_losses) / batch_size\n loss = language_loss + attention_loss\n\n return loss\n\n def _cross_entropy_sequence_loss(self, logits, targets, sequence_length):\n \"\"\"Calculates the per-example cross-entropy loss for a sequence of logits\n and masks out all losses passed the sequence length.\n\n Args:\n logits: Logits of shape `[T, B, vocab_size]`\n targets: Target classes of shape `[T, B]`\n sequence_length: An int32 tensor of shape `[B]` corresponding\n to the length of each input\n\n Returns:\n A tensor of shape [T, B] that contains the loss per example,\n per time step.\n \"\"\"\n with tf.name_scope(\"cross_entropy_sequence_loss\"):\n losses = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=targets)\n\n # Mask out the losses we don't care about\n loss_mask = tf.sequence_mask(\n tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))\n losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])\n\n return losses\n\n def _compute_statistics(self):\n \"\"\" Compute parameter number and flops.\n \"\"\"\n # log to file\n output_dir = self.params['output_dir']\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n output_dir = os.path.join(output_dir, 'statistics.log')\n log = logging.getLogger('tensorflow')\n handle = logging.FileHandler(output_dir)\n log.addHandler(handle)\n\n # FLOPS\n encoder_flops, decoder_flops = 0, 0\n encoder_count, decoder_count = 0, 0\n graph = tf.get_default_graph()\n for operation in graph.get_operations():\n flops = ops.get_stats_for_node_def(graph, operation.node_def,\n 'flops').value\n if flops is None:\n continue\n if operation.name.startswith('model/encoder'):\n # encoder\n encoder_flops += flops\n encoder_count += 1\n tf.logging.info('encoder operation %s : %d', operation.name, flops)\n elif operation.name.startswith('model/decoder'):\n # decoder\n decoder_flops += flops\n decoder_count += 1\n tf.logging.info('decoder operation %s : %d', operation.name, flops)\n else:\n # gradient\n pass\n tf.logging.info('flops of %d encoder tensor: %d',\n encoder_count, encoder_flops)\n tf.logging.info('flops of %d decoder tensor: %d',\n decoder_count, decoder_flops)\n tf.logging.info('flops of total %d tensor: %d',\n encoder_count + decoder_count,\n encoder_flops + decoder_flops)\n # parameters\n encoder_parameters, decoder_parameters = 0, 0\n encoder_count, decoder_count = 0, 0\n for var in tf.trainable_variables():\n parameters = np.prod(var.get_shape().as_list())\n if var.name.startswith('model/encoder'):\n # encoder\n encoder_parameters += parameters\n encoder_count += 1\n tf.logging.info('encoder variable %s : %d', var.name, parameters)\n elif var.name.startswith('model/decoder'):\n # decoder\n decoder_parameters += parameters\n decoder_count += 1\n tf.logging.info('decoder variable %s : %d', var.name, parameters)\n\n tf.logging.info('parameters of %d encoder tensor: %d',\n encoder_count, encoder_parameters)\n tf.logging.info('parameters of %d decoder tensor: %d',\n decoder_count, decoder_parameters)\n tf.logging.info('parameters of total %d tensor: %d',\n encoder_count + decoder_count,\n encoder_parameters + decoder_parameters)\n # disable log to file\n log.removeHandler(handle)\n\n def _sname(self, label):\n \"\"\" Utility.\n \"\"\"\n if self.mode == ModeKeys.TRAIN:\n prefix = 'train'\n elif self.mode == ModeKeys.EVAL:\n prefix = 'eval'\n else:\n prefix = 'infer'\n return '%s/%s' % (prefix, label)\n"
},
{
"alpha_fraction": 0.6635293960571289,
"alphanum_fraction": 0.6650000214576721,
"avg_line_length": 30.924882888793945,
"blob_id": "0e8f9966e7e24ab646fa80a55b43035969e0b01d",
"content_id": "25d3a826eb3151be0570603cf8cd0a5990abe2ef",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6800,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 213,
"path": "/train_eval.py",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# Copyright 2017 IIE, CAS.\n# Written by Shancheng Fang\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n\"\"\"Main script to run training and evaluation of models.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport logging\nimport tensorflow as tf\nfrom tensorflow.contrib.learn.python.learn import Experiment\nfrom tensorflow.contrib.learn.python.learn import learn_runner\nfrom tensorflow.contrib.learn.python.learn.estimators import run_config\n\nimport config\nimport datasets\nfrom model.model import Model\nfrom utils.hooks import Prediction, FalsePrediction\nfrom utils.metrics import sequence_accuracy, char_accuracy\n\nFLAGS = tf.flags.FLAGS\nconfig.define()\n\ndef _create_dataset_params():\n \"\"\"Create dataset params\n \"\"\"\n dparams = {\n \"dataset_name\": FLAGS.dataset_name,\n \"dataset_dir\": FLAGS.dataset_dir,\n \"batch_size\": FLAGS.batch_size\n }\n\n if FLAGS.schedule == 'train':\n split_name = FLAGS.split_name or 'train'\n dparams.update({\n 'shuffle': True,\n 'smaller_final_batch': False,\n 'num_epochs': None,\n 'split_name': split_name})\n elif FLAGS.schedule == 'evaluate':\n split_name = FLAGS.split_name or 'test'\n dparams.update({\n 'shuffle': False,\n 'smaller_final_batch': True,\n 'num_epochs': 1,\n 'split_name': split_name})\n else:\n split_name = FLAGS.split_name or 'test'\n dparams.update({\n 'shuffle': False,\n 'smaller_final_batch': False,\n 'num_epochs': None,\n 'split_name': split_name})\n return dparams\n\ndef _create_model_params(dataset):\n \"\"\"Create model params\n \"\"\"\n mparams = {\n \"optimizer\": FLAGS.optimizer,\n \"learning_rate\": FLAGS.learning_rate,\n \"clip_gradients\": FLAGS.clip_gradients,\n \"dataset\": dataset.params,\n \"optimizer_params\": {\n \"momentum\": FLAGS.momentum,\n \"use_nesterov\": FLAGS.use_nesterov\n },\n \"summary\": FLAGS.summary,\n \"max_outputs\": FLAGS.max_outputs,\n \"beam_width\": FLAGS.beam_width,\n \"output_dir\": FLAGS.output_dir,\n \"checkpoint\": FLAGS.checkpoint\n }\n return mparams\n\ndef _create_hooks(mparams, output_dir):\n \"\"\"Create hooks\n \"\"\"\n # Create training hooks\n train_hooks = []\n # Create evaluating hooks and eval config\n eval_hooks = []\n\n # Write prediction to file\n prediction_hook = Prediction(mparams, FLAGS.output_dir)\n eval_hooks.append(prediction_hook)\n\n # Write false prediction to file\n false_prediction_hook = FalsePrediction(mparams, FLAGS.output_dir)\n eval_hooks.append(false_prediction_hook)\n\n if FLAGS.schedule == 'continuous_eval':\n eval_output_dir = os.path.join(output_dir, 'eval_continuous')\n eval_hooks.append(tf.contrib.training.SummaryAtEndHook(eval_output_dir))\n elif FLAGS.schedule == 'evaluate':\n # stop until data are exhausted\n FLAGS.eval_steps = None\n\n if FLAGS.debug:\n from tensorflow.python import debug as tf_debug\n debug_hook = tf_debug.LocalCLIDebugHook()\n train_hooks.append(debug_hook)\n eval_hooks.append(debug_hook)\n return train_hooks, eval_hooks\n\ndef _create_experiment(output_dir):\n \"\"\"\n Creates a new Experiment instance.\n\n Args:\n output_dir: Output directory for model checkpoints and summaries.\n \"\"\"\n # Runconfig\n session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(\n per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction,\n allow_growth=FLAGS.gpu_allow_growth))\n estimator_config = run_config.RunConfig(\n session_config=session_config,\n gpu_memory_fraction=FLAGS.gpu_memory_fraction,\n tf_random_seed=FLAGS.tf_random_seed,\n log_step_count_steps=FLAGS.log_step,\n save_checkpoints_secs=FLAGS.save_checkpoints_secs,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours)\n\n # Dataset\n mode = tf.contrib.learn.ModeKeys.TRAIN if FLAGS.schedule == 'train' \\\n else tf.contrib.learn.ModeKeys.EVAL\n dataset = datasets.create_dataset(\n def_dict=_create_dataset_params(),\n mode=mode,\n use_beam_search=FLAGS.beam_width)\n\n # Model function\n def model_fn(features, labels, params, mode):\n \"\"\"Builds the model graph\"\"\"\n model = Model(params, mode)\n predictions, loss, train_op = model(features, labels)\n eval_metrics = {\n 'character': char_accuracy(predictions['predicted_ids'],\n labels['label']),\n 'sequence': sequence_accuracy(predictions['predicted_ids'],\n labels['label'])\n }\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=eval_metrics)\n # Model parameters\n mparams = _create_model_params(dataset)\n # Estimator\n estimator = tf.estimator.Estimator(\n model_fn=model_fn,\n model_dir=output_dir,\n config=estimator_config,\n params=mparams)\n\n train_hooks, eval_hooks = _create_hooks(mparams, output_dir)\n\n if FLAGS.schedule != 'train':\n # log to file\n file_name = \"{}-tensorflow.log\".format(mparams['dataset']['dataset_name'])\n file_name = os.path.join(FLAGS.output_dir, file_name)\n log = logging.getLogger('tensorflow')\n handle = logging.FileHandler(file_name)\n log.addHandler(handle)\n\n return Experiment(\n estimator=estimator,\n train_input_fn=dataset.create_input_fn,\n eval_input_fn=dataset.create_input_fn,\n train_steps=FLAGS.train_steps,\n eval_steps=FLAGS.eval_steps,\n train_monitors=train_hooks,\n eval_hooks=eval_hooks,\n eval_delay_secs=0)\n\ndef main(_argv):\n \"\"\"Main function\n \"\"\"\n schedules = ['train', 'evaluate', 'continuous_eval']\n assert FLAGS.schedule in schedules,\\\n \"Only schedules: %s supported!\"%(','.join(schedules))\n\n learn_runner.run(\n experiment_fn=_create_experiment,\n output_dir=FLAGS.output_dir,\n schedule=FLAGS.schedule)\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n"
},
{
"alpha_fraction": 0.6786426901817322,
"alphanum_fraction": 0.6858283281326294,
"avg_line_length": 38.140625,
"blob_id": "acfa54e5ffb9f652d4451756ffbecc43122d0929",
"content_id": "fc7282a0775bb4b497430fc3a16f9ac678befa27",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2505,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 64,
"path": "/utils/metrics.py",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "\"\"\"Quality metrics for the model.\"\"\"\n\nimport tensorflow as tf\n\ndef char_accuracy(predictions, labels, rej_char=0, streaming=True,\n ignore_case=True):\n \"\"\" Evaluate in character level.\n \"\"\"\n with tf.variable_scope('CharAccuracy'):\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n targets = tf.to_int32(labels)\n if ignore_case:\n predictions = _lower_to_captical_case(predictions)\n targets = _lower_to_captical_case(targets)\n const_rej_char = tf.fill(tf.shape(targets), rej_char)\n weights = tf.to_float(tf.not_equal(targets, const_rej_char))\n correct_chars = tf.to_float(tf.equal(predictions, targets))\n accuracy_per_example = tf.div(\n tf.reduce_sum(tf.multiply(correct_chars, weights), 1),\n tf.reduce_sum(weights, 1))\n if streaming:\n streaming_mean = tf.contrib.metrics.streaming_mean(accuracy_per_example)\n return streaming_mean\n else:\n return tf.reduce_mean(accuracy_per_example)\n\ndef sequence_accuracy(predictions, labels, streaming=True, ignore_case=True):\n \"\"\" Evaluate in word level.\n \"\"\"\n with tf.variable_scope('SequenceAccuracy'):\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n\n targets = tf.to_int32(labels)\n include_predictions = predictions\n\n if ignore_case:\n include_predictions = _lower_to_captical_case(include_predictions)\n targets = _lower_to_captical_case(targets)\n\n correct_chars = tf.to_float(tf.equal(include_predictions, targets))\n correct_chars_counts = tf.cast(\n tf.reduce_sum(correct_chars, reduction_indices=[1]), dtype=tf.int32)\n target_length = targets.get_shape().dims[1].value\n target_chars_counts = tf.fill(tf.shape(correct_chars_counts), target_length)\n accuracy_per_example = tf.to_float(\n tf.equal(correct_chars_counts, target_chars_counts))\n if streaming:\n streaming_mean = tf.contrib.metrics.streaming_mean(accuracy_per_example)\n tf.add_to_collection('predicted_mask', accuracy_per_example)\n return streaming_mean\n else:\n return tf.reduce_mean(accuracy_per_example)\n\ndef _lower_to_captical_case(src):\n # ranks of src can be any\n low, high = 11, 62\n space = (high - low + 1) / 2\n mid_tf = tf.fill(tf.shape(src), high - space + 1)\n high_tf = tf.fill(tf.shape(src), high)\n mid_mask = tf.greater_equal(src, mid_tf)\n high_mask = tf.less_equal(src, high_tf)\n case_mask = tf.logical_and(mid_mask, high_mask)\n return tf.where(case_mask, src - space, src)\n"
},
{
"alpha_fraction": 0.5849207043647766,
"alphanum_fraction": 0.5948225259780884,
"avg_line_length": 37.5040397644043,
"blob_id": "0dad90cc9c80d623409ce87a299c35afd375e1cc",
"content_id": "ffa1f69fd04dbe12005d9f0de230ca0cfae70cbc",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23834,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 619,
"path": "/model/decoder_conv.py",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "\"\"\"Convoltuional decoder with attention and language ensemble.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport abc\nimport collections\nimport six\nimport tensorflow as tf\nfrom tensorflow.contrib.learn import ModeKeys\nfrom tensorflow.contrib.seq2seq.python.ops.decoder import Decoder\nfrom tensorflow.contrib.seq2seq.python.ops.decoder import _transpose_batch_time\nfrom tensorflow.contrib.seq2seq.python.ops.decoder import dynamic_decode\nfrom tensorflow.contrib.layers.python.layers import initializers\nfrom tensorflow.contrib.layers import layer_norm\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom utils import beam_search\n\nDecoderOutput = collections.namedtuple(\n \"DecoderOutput\",\n [\"logits\", \"predicted_ids\", \"attention_scores\"]\n)\n\nBeamDecoderOutput = collections.namedtuple(\n \"BeamDecoderOutput\",\n [\"logits\", \"predicted_ids\", \"attention_scores\",\n \"log_probs\", \"scores\", \"beam_parent_ids\"]\n)\n\nDECODER_DEFUALT_PARAM = {\n \"cnn_layers\": 6,\n \"cnn_hiddens\": [512, 512, 512, 512, 512, 512],\n \"cnn_kernel\": 3,\n \"position_embeddings\": True,\n \"nout_embed\": 256,\n \"embedding_dim\": 512\n}\n\nclass DecoderConv(object):\n \"\"\" Main decoder class.\n \"\"\"\n def __init__(self, params, mode, num_charset):\n self.params = params\n self.params.update(DECODER_DEFUALT_PARAM)\n self.mode = mode\n self.num_charset = num_charset\n self.max_sequence_length = self.params['dataset']['max_sequence_length']\n\n def __call__(self, encoder_output, labels):\n if self.mode == ModeKeys.TRAIN:\n with tf.variable_scope(\"decoder\"):\n outputs = self.conv_decoder_train(encoder_output, labels)\n return outputs\n else:\n outputs, _, __ = self.conv_decoder_infer(encoder_output)\n return outputs\n\n def conv_decoder_train(self, encoder_output, labels):\n label_input = labels['label_input']\n length = labels['length']\n conv_block = ConvBlock(self.params,\n self.num_charset,\n is_training=True)\n\n next_layer = self.add_embedding(label_input, length)\n\n language, attention, att_scores = conv_block(encoder_output, next_layer)\n\n language_logit = _transpose_batch_time(language)\n attention_logit = _transpose_batch_time(attention)\n ensemble_logit = language_logit + attention_logit\n\n sample_ids = tf.cast(tf.argmax(ensemble_logit, axis=-1), tf.int32)\n\n return DecoderOutput(logits=(language_logit, attention_logit),\n predicted_ids=sample_ids,\n attention_scores=att_scores)\n\n def conv_decoder_infer(self, encoder_output):\n beam_decoder = BeamDecoder(self.params, self.mode,\n encoder_output, self.num_charset)\n # As tensorflow does not support initializing variable with tensor\n # in a loop or conditional\n beam_decoder.init_params_in_loop()\n tf.get_variable_scope().reuse_variables()\n\n outputs, final_state, final_sequence_lengths = dynamic_decode(\n decoder=beam_decoder,\n output_time_major=True,\n impute_finished=False,\n maximum_iterations=self.max_sequence_length,\n scope='decoder')\n\n return outputs, final_state, final_sequence_lengths\n\n def add_embedding(self, labels, length):\n \"\"\" Add embeddings for labels\n Args:\n labels: The labels with shape [N, T], where N is batch size\n and T s time steps.\n length: The length for time steps with shape [N]\n Returns:\n The embeded labels\n \"\"\"\n with tf.variable_scope(\"embedding\"):\n # label embedding\n label_embedding = tf.get_variable(\n name=\"W\",\n shape=[self.num_charset, self.params[\"embedding_dim\"]],\n initializer=tf.random_normal_initializer(mean=0.0, stddev=1))\n next_layer = tf.nn.embedding_lookup(label_embedding, labels)\n\n # position embedding\n if self.params[\"position_embeddings\"]:\n position_embeding = tf.get_variable(\n name=\"W_pos\",\n shape=[self.max_sequence_length,\n self.params[\"embedding_dim\"]],\n initializer=tf.random_normal_initializer(mean=0.0, stddev=1))\n\n # Replicate encodings for each element in the batch\n batch_size = tf.shape(length)[0]\n pe_batch = tf.tile([position_embeding], [batch_size, 1, 1])\n\n # Mask out positions that are padded\n positions_mask = tf.sequence_mask(\n lengths=length, maxlen=self.max_sequence_length, dtype=tf.float32)\n positions_embed = pe_batch * tf.expand_dims(positions_mask, 2)\n\n next_layer = tf.add(next_layer, positions_embed)\n\n return next_layer\n\[email protected]_metaclass(abc.ABCMeta)\nclass BeamDecoder(Decoder):\n \"\"\"Decoder using beam search in eval and infer.\n \"\"\"\n def __init__(self, params, mode, encoder_outputs, num_charset):\n self.params = params\n self.mode = mode\n self.num_charset = num_charset\n self.max_sequence_length = self.params['dataset']['max_sequence_length']\n self.initial_state = encoder_outputs\n self.fm_height = encoder_outputs.get_shape()[1]\n self.fm_width = encoder_outputs.get_shape()[2]\n\n # convolution net\n self.conv_block = ConvBlock(self.params,\n self.num_charset,\n is_training=False)\n\n # TODO(Shancheng): use start token and end token rather constant 0\n self.start_token = 0\n self.end_token = 0\n\n # beam search config\n self.config = beam_search.BeamSearchConfig(\n beam_width=self.params['beam_width'],\n vocab_size=self.num_charset,\n eos_token=self.end_token,\n length_penalty_weight=1.0,\n choose_successors_fn=beam_search.choose_top_k)\n\n @property\n def batch_size(self):\n return self.params['beam_width']\n\n @property\n def output_size(self):\n return BeamDecoderOutput(\n logits=tf.TensorShape([self.num_charset]),\n predicted_ids=tf.TensorShape([]),\n attention_scores=tf.TensorShape([self.params['cnn_layers'],\n self.fm_height,\n self.fm_width]),\n log_probs=tf.TensorShape([]),\n scores=tf.TensorShape([]),\n beam_parent_ids=tf.TensorShape([]))\n\n @property\n def output_dtype(self):\n return BeamDecoderOutput(\n logits=tf.float32,\n predicted_ids=tf.int32,\n attention_scores=tf.float32,\n log_probs=tf.float32,\n scores=tf.float32,\n beam_parent_ids=tf.int32)\n\n def initialize(self, name=None):\n finished = tf.tile([False], [self.params['beam_width']])\n\n start_tokens_batch = tf.fill([self.params['beam_width']], self.start_token)\n first_inputs = self.add_embedding(start_tokens_batch, time=tf.constant(0))\n\n zeros_padding = tf.zeros([self.params['beam_width'],\n self.max_sequence_length - 1,\n first_inputs.get_shape().as_list()[-1]])\n first_inputs = tf.concat([first_inputs, zeros_padding], axis=1)\n\n beam_state = beam_search.create_initial_beam_state(self.config)\n\n encoder_output = tf.tile(self.initial_state,\n [self.params['beam_width'], 1, 1, 1])\n\n return finished, first_inputs, (encoder_output, beam_state)\n\n def step(self, time, inputs, state, name=None):\n encoder_output, beam_state = state\n cur_inputs = inputs[:, 0:time + 1, :]\n zeros_padding = inputs[:, time + 2:, :]\n\n language, attention, scores = self.conv_block(encoder_output,\n cur_inputs)\n # TODO(Shancheng): now it is add operation\n logits = language + attention\n shape = logits.get_shape().as_list()\n logits = tf.reshape(logits, [-1, shape[-1]])\n\n bs_output, beam_state = beam_search.beam_search_step(\n time_=time,\n logits=logits,\n beam_state=beam_state,\n config=self.config)\n\n finished, next_inputs = self.next_inputs(bs_output.predicted_ids, (time+1))\n\n next_inputs = tf.reshape(next_inputs,\n [self.params['beam_width'], 1,\n inputs.get_shape().as_list()[-1]])\n next_inputs = tf.concat([cur_inputs, next_inputs], axis=1)\n next_inputs = tf.concat([next_inputs, zeros_padding], axis=1)\n next_inputs.set_shape([self.params['beam_width'],\n self.max_sequence_length,\n inputs.get_shape().as_list()[-1]])\n\n outputs = BeamDecoderOutput(\n logits=logits,\n predicted_ids=bs_output.predicted_ids,\n attention_scores=scores,\n log_probs=beam_state.log_probs,\n scores=bs_output.scores,\n beam_parent_ids=bs_output.beam_parent_ids)\n return outputs, (encoder_output, beam_state), next_inputs, finished\n\n def finalize(self, output, final_state, sequence_lengths):\n # Gather according to beam search result\n # now predicted_ids is [M, N/B]\n predicted_ids = beam_search.gather_tree(output.predicted_ids,\n output.beam_parent_ids)\n # TODO(Shancheng): pay attention\n beam_width = output.beam_parent_ids.get_shape().as_list()\n parent_ids = tf.concat([tf.zeros([1, beam_width[-1]], dtype=tf.int32),\n output.beam_parent_ids[:-1, :]], 0)\n # now logits is [M, N/B, C]\n logits = beam_search.gather_tree(output.logits,\n parent_ids)\n # now attention scores is [M, N/B, L, H, W]\n attention_scores = beam_search.gather_tree(output.attention_scores,\n parent_ids)\n # orginal length is the length of ungathered logits\n sequence_lengths = math_ops.not_equal(predicted_ids, self.end_token)\n sequence_lengths = tf.to_int32(sequence_lengths)\n sequence_lengths = tf.reduce_sum(sequence_lengths, axis=0) + 1\n\n # choose the top score item\n predicted_ids = predicted_ids[:, 0:1]\n logits = logits[:, 0:1]\n attention_scores = attention_scores[:, 0:1]\n # mask out\n length = sequence_lengths[0]\n logits = logits[0:length, :]\n attention_scores = attention_scores[0:length, :]\n\n final_outputs = DecoderOutput(\n logits=self._padding(logits),\n predicted_ids=self._padding(predicted_ids),\n attention_scores=attention_scores)\n\n return final_outputs, final_state\n\n def add_embedding(self, labels, time):\n \"\"\" Add embedding in current time step.\n Args:\n labels: The labels with shape [beam_width,] or [beam_width,1]\n time: The time index\n Rreturn:\n The embeded labels\n \"\"\"\n with tf.variable_scope(\"embedding\"):\n rank = len(labels.shape)\n assert rank == 1 or rank == 2, \"labels must be rank 1 or 2\"\n if rank == 1:\n labels = tf.expand_dims(labels, axis=1)\n # label embedding\n label_embedding = tf.get_variable(\n name=\"W\",\n shape=[self.num_charset, self.params[\"embedding_dim\"]],\n initializer=tf.random_normal_initializer(mean=0.0, stddev=1))\n next_input = tf.nn.embedding_lookup(label_embedding, labels)\n\n # position embedding\n if self.params[\"position_embeddings\"]:\n position_embeding = tf.get_variable(\n name=\"W_pos\",\n shape=[self.max_sequence_length,\n self.params[\"embedding_dim\"]],\n initializer=tf.random_normal_initializer(mean=0.0, stddev=1))\n\n seq_pos_embed = position_embeding[time, :]\n seq_pos_embed = tf.reshape(seq_pos_embed, [1, 1, -1])\n seq_pos_embed_batch = tf.tile(seq_pos_embed,\n [self.params['beam_width'], 1, 1])\n next_input = tf.add(next_input, seq_pos_embed_batch)\n\n return next_input\n\n def next_inputs(self, sample_ids, time):\n def true_fn():\n # If we're in the last time step\n finished = tf.fill(sample_ids.get_shape().as_list(), True)\n next_inputs = tf.zeros([self.params['beam_width'], 1,\n self.params[\"embedding_dim\"]])\n return finished, next_inputs\n\n def false_fn():\n finished = math_ops.equal(sample_ids, self.end_token)\n all_finished = math_ops.reduce_all(finished)\n end_tokens = tf.tile([self.end_token], [self.params['beam_width']])\n next_inputs = control_flow_ops.cond(\n all_finished,\n # If we're finished, the next_inputs value doesn't matter\n lambda: self.add_embedding(end_tokens, time),\n lambda: self.add_embedding(sample_ids, time))\n return finished, next_inputs\n\n finished = (time >= self.max_sequence_length)\n return control_flow_ops.cond(finished, true_fn, false_fn)\n\n def init_params_in_loop(self):\n with tf.variable_scope(\"decoder\"):\n _, initial_inputs, initial_state = self.initialize()\n enc_output, _ = initial_state\n # pylint: disable=attribute-defined-outside-init\n self.conv_block.is_init = True\n self.conv_block(enc_output, initial_inputs)\n self.conv_block.is_init = False\n\n def _padding(self, tensor):\n \"\"\" Pad output to max_sequence length,\n for example, paddings = [[0, pad_time],[0,0]]\n \"\"\"\n shape = tf.shape(tensor)\n pad_time = tf.expand_dims(self.max_sequence_length - shape[0], 0)\n zeros = tf.zeros_like(shape, dtype=shape.dtype)\n paddings = tf.concat([pad_time, zeros[1:]], 0)\n paddings = tf.stack([zeros, paddings], 1)\n return tf.pad(tensor, paddings)\n\nclass ConvBlock(object):\n \"\"\"Basic operation.\n \"\"\"\n def __init__(self, params, num_charset, is_training=True):\n self.num_charset = num_charset\n self.is_training = is_training\n self.params = params\n self.max_sequence_length = self.params['dataset']['max_sequence_length']\n\n def __call__(self, encoder_output, input_embed):\n output_collection = tf.get_variable_scope().name\n next_layer = input_embed\n att_scores = []\n\n # 1D convolution\n for layer_idx in range(self.params['cnn_layers']):\n with tf.variable_scope(\"conv\" + str(layer_idx)):\n nout = self.params['cnn_hiddens'][layer_idx]\n\n # language module\n # special process here, first padd then conv,\n # because tf does not suport padding other than SAME and VALID\n kernal_width = self.params['cnn_kernel']\n paddings = [[0, 0], [kernal_width - 1, kernal_width - 1], [0, 0]]\n language_layer = tf.pad(next_layer, paddings, \"CONSTANT\")\n # 1D convolution\n language_layer = self.conv1d_weightnorm(\n inputs=language_layer,\n out_dim=nout * 2,\n kernel_size=kernal_width,\n padding=\"VALID\",\n output_collection=output_collection)\n # to avoid using future information\n language_layer = language_layer[:, 0:-kernal_width + 1, :]\n\n # add GLU\n language_layer = self.gated_linear_units(language_layer,\n output_collection)\n\n # shortcut and layer norm\n language_layer = language_layer + next_layer\n language_layer = layer_norm(language_layer,\n begin_norm_axis=2,\n scope='glu')\n\n # attention module\n att_out, att_score = self.make_attention(input_embed,\n encoder_output,\n next_layer,\n output_collection)\n\n # shortcut and layer norm\n attention_layer = att_out + next_layer\n attention_layer = layer_norm(attention_layer,\n begin_norm_axis=2,\n scope='attention')\n\n # TODO(Shancheng): now it is add operation\n next_layer = language_layer + attention_layer\n\n if self.is_training:\n tf.add_to_collection(output_collection, next_layer)\n att_scores.append(att_score)\n\n # shape=[layer_num, batch_size / beam_width, step, height, width]\n att_scores = tf.stack(att_scores)\n # shape=[batch_size / beam_width, layer_num, step, height, width]\n att_scores = tf.transpose(att_scores, [1, 0, 2, 3, 4])\n\n language_logit, scores = self.create_logit(language_layer,\n att_scores,\n output_collection,\n \"language_logit\")\n attention_logit, scores = self.create_logit(attention_layer,\n att_scores,\n output_collection,\n \"attention_logit\")\n return language_logit, attention_logit, scores\n\n def create_logit(self, next_layer, att_scores, output_collection, scope):\n # output\n with tf.variable_scope(scope):\n if not self.is_training:\n # only keep the last time step\n # [N/B, M, C] --> [N/B, 1, C]\n next_layer = next_layer[:, -1:, :]\n # [N/B, L, M, H, W] --> [N/B, L, H, W]\n att_scores = att_scores[:, :, -1, :, :]\n\n next_layer = self.linear_mapping_weightnorm(\n next_layer,\n out_dim=self.params[\"nout_embed\"],\n output_collection=output_collection)\n next_layer = layer_norm(next_layer, begin_norm_axis=2)\n next_layer = self.linear_mapping_weightnorm(\n next_layer,\n out_dim=self.num_charset,\n var_scope_name=\"liear_logits\",\n output_collection=output_collection)\n\n return next_layer, att_scores\n\n def linear_mapping_weightnorm(self, inputs, out_dim,\n var_scope_name=\"linear\",\n output_collection=None):\n with tf.variable_scope(var_scope_name):\n # pylint: disable=invalid-name\n input_shape = inputs.get_shape().as_list() # static shape. may has None\n # use weight normalization (Salimans & Kingma, 2016) w = g* v/2-norm(v)\n V = tf.get_variable(\n name='V',\n shape=[int(input_shape[-1]), out_dim],\n dtype=tf.float32,\n initializer=initializers.variance_scaling_initializer())\n # V shape is M*N, V_norm shape is N\n V_norm = tf.norm(V.initialized_value(), axis=0)\n g = tf.get_variable('g', dtype=tf.float32, initializer=V_norm)\n # weightnorm bias is init zero\n b = tf.get_variable(\n name='b',\n shape=[out_dim],\n dtype=tf.float32,\n initializer=tf.zeros_initializer())\n\n assert len(input_shape) == 3\n if input_shape[0] is None:\n input_shape = tf.shape(inputs)\n inputs = tf.reshape(inputs, [-1, input_shape[-1]])\n inputs = tf.matmul(inputs, V)\n inputs = tf.reshape(inputs, [input_shape[0], -1, out_dim])\n\n # g/2-norm(v)\n scaler = tf.div(g, tf.norm(V, axis=0))\n # x*v g/2-norm(v) + b\n inputs = tf.reshape(scaler, [1, out_dim]) * inputs + tf.reshape(b, [1, out_dim])\n\n if self.is_training:\n tf.add_to_collection(output_collection, inputs)\n return inputs\n\n def conv1d_weightnorm(self, inputs, out_dim, kernel_size, padding=\"SAME\",\n var_scope_name=\"conv1d\", output_collection=None):\n with tf.variable_scope(var_scope_name):\n # pylint: disable=invalid-name\n in_dim = int(inputs.get_shape()[-1])\n V = tf.get_variable(\n name='V',\n shape=[kernel_size, in_dim, out_dim],\n dtype=tf.float32,\n initializer=initializers.variance_scaling_initializer())\n # V shape is M*N*k, V_norm shape is k\n V_norm = tf.norm(V.initialized_value(), axis=[0, 1])\n g = tf.get_variable('g', dtype=tf.float32, initializer=V_norm)\n b = tf.get_variable(\n name='b',\n shape=[out_dim],\n dtype=tf.float32,\n initializer=tf.zeros_initializer())\n\n # use weight normalization (Salimans & Kingma, 2016)\n W = tf.reshape(g, [1, 1, out_dim]) * tf.nn.l2_normalize(V, [0, 1])\n inputs = tf.nn.conv1d(value=inputs, filters=W, stride=1, padding=padding)\n inputs = tf.nn.bias_add(inputs, b)\n\n if self.is_training:\n tf.add_to_collection(output_collection, inputs)\n return inputs\n\n def gated_linear_units(self, inputs, output_collection=None):\n input_shape = inputs.get_shape().as_list()\n assert len(input_shape) == 3\n input_pass = inputs[:, :, 0:int(input_shape[2] / 2)]\n input_gate = inputs[:, :, int(input_shape[2] / 2):]\n input_gate = tf.sigmoid(input_gate)\n inputs = tf.multiply(input_pass, input_gate)\n\n if self.is_training:\n tf.add_to_collection(output_collection, inputs)\n return inputs\n\n def make_attention(self, target_embed, encoder_output,\n decoder_hidden, output_collection):\n with tf.variable_scope(\"attention\"):\n embed_size = target_embed.get_shape().as_list()[-1]\n hidden_size = decoder_hidden.get_shape().as_list()[-1]\n\n decoder_rep = decoder_hidden + target_embed\n # character project to image\n decoder_rep = self.linear_mapping_weightnorm(\n decoder_rep,\n out_dim=embed_size,\n var_scope_name=\"linear_query\",\n output_collection=output_collection)\n\n att_out, att_score = self.attention_score_pooling(decoder_rep,\n encoder_output)\n # image project to character\n att_out = self.linear_mapping_weightnorm(\n att_out,\n out_dim=hidden_size,\n var_scope_name=\"linear_out\",\n output_collection=output_collection)\n\n return att_out, att_score\n\n def attention_score_pooling(self, dec_rep, encoder_output):\n # pylint: disable=invalid-name\n # static shape\n N, H, W, C = encoder_output.get_shape().as_list()\n # static shape in train, dynamic shape in infer\n N = N or tf.shape(dec_rep)[0]\n M = dec_rep.get_shape().as_list()[1] or tf.shape(dec_rep)[1]\n\n encoder_reshape = tf.reshape(encoder_output, [N, H * W, C]) # N*(H*W)*C\n\n # N*M*C ** N*(H*W)*C --> N*M*(H*W)\n att_score = tf.matmul(dec_rep, encoder_reshape,\n transpose_b=True) * tf.sqrt(1.0 / C)\n\n att_score = tf.transpose(att_score, [0, 2, 1]) # N*(H*W)*M\n att_score = tf.reshape(att_score, [N, H, W, M]) # N*H*W*M\n att_score = tf.pad(att_score,\n [[0, 0], [1, 1], [1, 1], [0, 0]],\n \"SYMMETRIC\")\n att_score = tf.nn.avg_pool(att_score,\n [1, 3, 3, 1],\n [1, 1, 1, 1],\n padding='VALID') # N*H*W*M\n att_score = tf.reshape(att_score, [N, H * W, M]) # N*(H*W)*M\n att_score = tf.transpose(att_score, [0, 2, 1]) # N*M*(H*W)\n att_score = tf.nn.softmax(att_score) # N*M*(H*W)\n\n # N*M*(H*W) ** N*(H*W)*C --> N*M*C\n att_out = tf.matmul(att_score, encoder_reshape)\n att_score = tf.reshape(att_score, [N, M, H, W])\n return att_out, att_score\n\n def attention_score(self, dec_rep, encoder_output):\n # pylint: disable=invalid-name\n # static shape\n N, H, W, C = encoder_output.get_shape().as_list()\n # static shape in train, dynamic shape in infer\n N = N or tf.shape(dec_rep)[0]\n M = dec_rep.get_shape().as_list()[1] or tf.shape(dec_rep)[1]\n\n encoder_reshape = tf.reshape(encoder_output, [N, H * W, C]) # N*(H*W)*C\n\n # N*M*C ** N*(H*W)*C --> N*M*(H*W)\n att_score = tf.matmul(dec_rep, encoder_reshape,\n transpose_b=True) * tf.sqrt(1.0 / C)\n\n att_score = tf.nn.softmax(att_score) # N*M*(H*W)\n\n # N*M*(H*W) ** N*(H*W)*C --> N*M*C\n att_out = tf.matmul(att_score, encoder_reshape)\n att_score = tf.reshape(att_score, [N, M, H, W])\n return att_out, att_score\n"
},
{
"alpha_fraction": 0.5196192264556885,
"alphanum_fraction": 0.5286742448806763,
"avg_line_length": 46.32966995239258,
"blob_id": "3b5be1e27cafc141514a5e74bfaac3c179cfaced",
"content_id": "10954eba7d288a10c9e1b610ebb9fab78eb743e1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4307,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 91,
"path": "/config.py",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "# Copyright 2017 IIE, CAS.\n# Written by Shancheng Fang\n# ==============================================================================\n\n\"\"\"Define flags are common for train_eval.py scripts.\"\"\"\nimport tensorflow as tf\n\n\ndef define():\n \"\"\"Define common flags.\"\"\"\n # yapf: disable\n tf.flags.DEFINE_string(\"output_dir\", \"/tmp/workdir\",\n \"\"\"The directory to write model checkpoints and\n summaries. If None, a local temporary directory\n is created.\"\"\")\n tf.flags.DEFINE_string(\"checkpoint\", None,\n \"\"\"checkpoint to restore variables\"\"\")\n tf.flags.DEFINE_boolean(\"debug\", False,\n \"\"\"use tfdbg to debug\"\"\")\n\n # Model config\n tf.flags.DEFINE_integer(\"beam_width\", 5,\n \"\"\"beam width. 0 for close beam search.\"\"\")\n\n # Model hyper parameters\n tf.flags.DEFINE_string(\"optimizer\", \"Momentum\",\n \"\"\"the optimizer to use\"\"\")\n tf.flags.DEFINE_float(\"learning_rate\", 0.01,\n \"\"\"learning rate\"\"\")\n tf.flags.DEFINE_float(\"clip_gradients\", 20.0,\n \"\"\"number of clipped gradients\"\"\")\n tf.flags.DEFINE_float(\"momentum\", 0.9,\n \"\"\"momentum value for the momentum optimizer if\n used\"\"\")\n tf.flags.DEFINE_boolean(\"use_nesterov\", True,\n \"\"\"use nesterov\"\"\")\n\n # Dataset config\n tf.flags.DEFINE_string(\"dataset_name\", \"MJSynth\",\n \"\"\"Name of the dataset. Supported: fsns\"\"\")\n tf.flags.DEFINE_string(\"dataset_dir\", None,\n \"\"\"Dataset root folder.\"\"\")\n tf.flags.DEFINE_string(\"split_name\", None,\n \"\"\"Name of the dataset split.\"\"\")\n tf.flags.DEFINE_integer(\"batch_size\", 128,\n \"\"\"Batch size used for training and evaluation.\"\"\")\n\n # Training and evaluating parameters\n tf.flags.DEFINE_string(\"schedule\", \"train\",\n \"\"\"Estimator function to call, defaults to\n continuous_train_and_eval for local run\"\"\")\n tf.flags.DEFINE_integer(\"train_steps\", 1000000,\n \"\"\"Maximum number of training steps to run.\n If None, train forever.\"\"\")\n tf.flags.DEFINE_integer(\"eval_steps\", 500,\n \"Run N steps evaluation.\")\n\n # RunConfig Flags\n tf.flags.DEFINE_integer(\"tf_random_seed\", None,\n \"\"\"Random seed for TensorFlow initializers. Setting\n this value allows consistency between reruns.\"\"\")\n tf.flags.DEFINE_integer(\"save_checkpoints_secs\", 900,\n \"\"\"Save checkpoints every this many seconds.\n Can not be specified with save_checkpoints_steps.\"\"\")\n tf.flags.DEFINE_integer(\"save_checkpoints_steps\", None,\n \"\"\"Save checkpoints every this many steps.\n Can not be specified with save_checkpoints_secs.\"\"\")\n tf.flags.DEFINE_integer(\"keep_checkpoint_max\", 5,\n \"\"\"Maximum number of recent checkpoint files to keep.\n As new files are created, older files are deleted.\n If None or 0, all checkpoint files are kept.\"\"\")\n tf.flags.DEFINE_integer(\"keep_checkpoint_every_n_hours\", 4,\n \"\"\"In addition to keeping the most recent checkpoint\n files, keep one checkpoint file for every N hours of\n training.\"\"\")\n tf.flags.DEFINE_float(\"gpu_memory_fraction\", 1.0,\n \"\"\"Fraction of GPU memory used by the process on\n each GPU uniformly on the same machine.\"\"\")\n tf.flags.DEFINE_boolean(\"gpu_allow_growth\", False,\n \"\"\"Allow GPU memory allocation to grow\n dynamically.\"\"\")\n tf.flags.DEFINE_integer(\"log_step\", 100,\n \"\"\"log_step_count_steps\"\"\")\n\n # Summary config\n tf.flags.DEFINE_boolean(\"summary\", True,\n \"\"\"log to summary\"\"\")\n tf.flags.DEFINE_integer(\"max_outputs\", 4,\n \"\"\"the max outputs number to summary images and text\n in a batch\"\"\")\n # yapf: enable\n"
},
{
"alpha_fraction": 0.5924255847930908,
"alphanum_fraction": 0.603922426700592,
"avg_line_length": 36.27730941772461,
"blob_id": "1d84a32d69d18248b54275471fe99e443e8f975b",
"content_id": "8e992807797cbed7a7ca2d9d016457d77753b6d8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4436,
"license_type": "permissive",
"max_line_length": 103,
"num_lines": 119,
"path": "/tools/make_tfrecord_datasets.py",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\"\"\" A tool to convert mjsynth datasets to tfrecords datasets.\n\"\"\"\nimport os\nimport tensorflow as tf\nimport cv2\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\n\nSPLITS = ['train', 'test', 'val']\nORIGINAL_DIR = '/home/data/Dataset/mjsynth'\nOUTPUT_DIR = '/home/data/Dataset/tf-data/tf-mjsynth'\nCHARSET = 'data/charset_size=63.txt'\nMAX_WORD_LENGTH = 30\nNULL_CODE = 0\nNORM_SIZE = (100, 32)\nFILE_NUM_LIMIT = 3000\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\ndef _get_charset():\n charset_path = os.path.join(ORIGINAL_DIR, CHARSET)\n assert tf.gfile.Exists(charset_path), charset_path\n charset = {}\n with open(charset_path) as file:\n for line in file:\n value, key = line.split()\n charset[key] = value\n return charset\n\ndef _get_label(text, charset):\n unpadded_label = []\n for c in text:\n label = int(charset[c])\n unpadded_label.append(label)\n label = [unpadded_label[i] if i < len(unpadded_label) else NULL_CODE for i in range(MAX_WORD_LENGTH)]\n return label, unpadded_label\n\ndef _get_text(path):\n return path.split('_')[1].strip()\n\ndef run(split, charset):\n annotation = os.path.join(ORIGINAL_DIR, 'annotation_%s.txt'%(split))\n assert tf.gfile.Exists(annotation), annotation\n split_dir = os.path.join(OUTPUT_DIR, split)\n tf.gfile.MkDir(split_dir)\n\n with tf.Graph().as_default():\n with tf.Session('') as sess:\n image_placeholder = tf.placeholder(tf.uint8)\n encoded_image = tf.image.encode_png(image_placeholder)\n with open(annotation, 'r') as file:\n lines = file.readlines()\n with open('/tmp/fsc-error.log', 'a') as efile:\n fold_index = 0\n for index, line in enumerate(lines):\n image_name, word_label = line.split()\n split_name = os.path.splitext(os.path.split(image_name)[1])[0]\n text = _get_text(image_name)\n text_length = len(text)\n label, unpadded_label = _get_label(text, charset)\n image_path = os.path.join(ORIGINAL_DIR, image_name)\n try:\n image = plt.imread(image_path)\n image = cv2.resize(image, NORM_SIZE)\n width, height = image.shape[0:2]\n\n jpg_image = sess.run(encoded_image, feed_dict={image_placeholder:image})\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/encoded': _bytes_feature([jpg_image]),\n 'image/format': _bytes_feature([\"PNG\"]),\n 'image/class': _int64_feature(label),\n 'image/unpadded_class': _int64_feature(unpadded_label),\n 'image/text': _bytes_feature([text]),\n 'image/text_length': _int64_feature([text_length]),\n 'image/height': _int64_feature([height]),\n 'image/width': _int64_feature([width]),\n 'image/word_class': _int64_feature([int(word_label)]),\n 'image/name':_bytes_feature([split_name])}))\n if index % FILE_NUM_LIMIT == 0:\n fold_name = '%s-%04d'%(split, fold_index)\n fold_dir = os.path.join(split_dir, fold_name)\n tf.gfile.MkDir(fold_dir)\n fold_index += 1\n\n tfrecords_filename = '%s.tfrecord'%(os.path.splitext(os.path.basename(image_name))[0])\n tfrecords_filename = os.path.join(fold_dir, tfrecords_filename)\n print '[%.2f%%] Writing %s'%(100.0*index/len(lines), tfrecords_filename)\n writer = tf.python_io.TFRecordWriter(tfrecords_filename)\n writer.write(example.SerializeToString())\n except IOError:\n message = 'bad image: %s\\n'%image_path\n efile.write(message)\n efile.flush()\n except Exception as e:\n efile.write('something error:%s\\n'%(e.message))\n efile.flush()\n\ndef main():\n assert tf.gfile.Exists(ORIGINAL_DIR)\n if tf.gfile.Exists(OUTPUT_DIR):\n tf.gfile.DeleteRecursively(OUTPUT_DIR)\n tf.gfile.MkDir(OUTPUT_DIR)\n \n tf.gfile.Copy(os.path.join(ORIGINAL_DIR, CHARSET),\n os.path.join(OUTPUT_DIR, CHARSET))\n\n charset = _get_charset()\n for split in SPLITS:\n run(split, charset)\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6045845150947571,
"alphanum_fraction": 0.6217765212059021,
"avg_line_length": 32.238094329833984,
"blob_id": "eae57899cf3e998889db40dacae9a2fd5128d522",
"content_id": "dd19d772da4c675a0adb29aec8aa8261817703fa",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2094,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 63,
"path": "/model/encoder_resnet.py",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "\"\"\"Residual encoder.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport tensorflow as tf\nfrom tensorflow.contrib.learn import ModeKeys\nfrom tensorflow.contrib import slim\nfrom tensorflow.contrib.slim.nets import resnet_utils\nfrom tensorflow.contrib import layers as layers_lib\nfrom tensorflow.contrib.framework.python.ops import arg_scope\nfrom model import resnet_v2\n\nENCODER_DEFUALT_PARAM = {\n \"block_name\": [\"block1\", \"block2\", \"block3\", \"block4\"],\n \"base_depth\": [16, 32, 64, 128],\n \"num_units\" : [2, 2, 2, 6],\n \"stride\" : [2, 1, 1, 1]\n}\n\nclass EncoderResnet(object):\n \"\"\" Residual encoder using off-the-shelf interface.\n \"\"\"\n def __init__(self, params, mode):\n self.params = params\n self.mode = mode\n self.encoder_params = ENCODER_DEFUALT_PARAM\n\n def __call__(self, features):\n \"\"\" Define tf graph.\n \"\"\"\n inputs = features['image']\n\n with tf.variable_scope('encoder') as vsc:\n with slim.arg_scope(resnet_v2.resnet_arg_scope()):\n # conv1\n with arg_scope(\n [layers_lib.conv2d], activation_fn=None, normalizer_fn=None):\n net = resnet_utils.conv2d_same(inputs, 16, 5, stride=2, scope='conv1')\n tf.add_to_collection(vsc.original_name_scope, net)\n\n # resnet blocks\n blocks = []\n for i in range(len(self.encoder_params['block_name'])):\n block = resnet_v2.resnet_v2_block(\n scope=self.encoder_params['block_name'][i],\n base_depth=self.encoder_params['base_depth'][i],\n num_units=self.encoder_params['num_units'][i],\n stride=self.encoder_params['stride'][i])\n blocks.append(block)\n net, _ = resnet_v2.resnet_v2(\n net,\n blocks,\n is_training=(self.mode == ModeKeys.TRAIN),\n global_pool=False,\n output_stride=2,\n include_root_block=False,\n scope='resnet')\n\n tf.add_to_collection(vsc.original_name_scope, net)\n return net\n"
},
{
"alpha_fraction": 0.634067952632904,
"alphanum_fraction": 0.6483011841773987,
"avg_line_length": 31.507463455200195,
"blob_id": "a257d53a23c461633a0ab3a167a4625f75e1a436",
"content_id": "f3a45dce30fd29436bd4d73df0fdd44b5e65117e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2178,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 67,
"path": "/demo.py",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# Copyright 2017-2018 IIE, CAS.\n# Written by Shancheng Fang\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n\"\"\" A quick demo to recognize text.\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport argparse\nimport numpy as np\nimport tensorflow as tf\n\nfrom PIL import Image\nfrom tensorflow.contrib.learn import ModeKeys\nfrom model.model import Model\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--path', type=str, required=True,\n help='path to image file.')\n parser.add_argument('--checkpoint', type=str, default='data/model.ckpt',\n help='path to image file.')\n args = parser.parse_args()\n\n params = {\n 'checkpoint': args.checkpoint,\n 'dataset':{\n 'dataset_dir': 'data',\n 'charset_filename': 'charset_size=63.txt',\n 'max_sequence_length': 30,\n },\n 'beam_width': 1,\n 'summary': False\n }\n model = Model(params, ModeKeys.INFER)\n image = tf.placeholder(tf.uint8, (1, 32, 100, 3), name='image')\n predictions, _, _ = model({'image': image}, None)\n\n assert os.path.exists(args.path), '%s does not exists!' % args.path\n raw_image = Image.open(args.path).convert('RGB')\n raw_image = raw_image.resize((100, 32), Image.BILINEAR)\n raw_image = np.array(raw_image)[None, :]\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.tables_initializer())\n predictions = sess.run(predictions, feed_dict={image: raw_image})\n text = predictions['predicted_text'][0]\n print('%s: %s' % (args.path, text))\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.5600000023841858,
"alphanum_fraction": 0.5879999995231628,
"avg_line_length": 34.71428680419922,
"blob_id": "31e256d51989bb51b723622df733b4f011d52ad0",
"content_id": "b2169eae442854b69eb127e34d7d7d220703a1a7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 250,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 7,
"path": "/train.sh",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\nexport CUDA_VISIBLE_DEVICES=0\nexport workdir=workdir-master\n\npython train_eval.py --output_dir=${workdir} \\\n --train_steps=300000 \\\n --dataset_dir=/home/data/Dataset/tf-mjsynth\n"
},
{
"alpha_fraction": 0.7588315010070801,
"alphanum_fraction": 0.7713994383811951,
"avg_line_length": 42.29411697387695,
"blob_id": "a2a5ab2043faa716385da6aad6494316786372bc",
"content_id": "1b1c0059e101fd83c6a7f4be113896041894fca9",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2944,
"license_type": "permissive",
"max_line_length": 650,
"num_lines": 68,
"path": "/README.md",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "## Attention and Language Ensemble for Scene Text Recognition with Convolutional Sequence Modeling\n\n\n### Requirements\n\n- Tensorflow-gpu r1.3.0\n- Python 2.7\n- CUDA 8.0\n\n### Introduction\n\n\n\nThe proposed architecture consists of an encoder that extracts abstractive image features, and a decoder that generates character sequences. The encoder is a residual convolutional network, and the decoder is also based on the deep convolutional network.\n\nConcretely, the first layer of the encoder is a base convolutional layer which converts images to feature maps in specific distribution. Then a deep encoder is built by stacking residual blocks. Also, there is a similar structure in the decoder. After embedding input symbols, the stacked decoder blocks predict the output sequences. Inside each decoder block, an attention module and a language\nmodule are designed equally as an ensemble. The attention module focuses on interest region from encoder feature maps, whose main operations are scaled dot-product. The language module, based on gated convolutional layers, aims to model the language sequences in character level. In our work, visual cues and linguistic rules are of the same importance. Based on this point, attention focusing and language modeling with the same input are regarded as an ensemble to boost prediction jointly. In addition, we use batch normalization in the encoder and layer normalization in the decoder to keep variances stable across the main nodes of the networks.\n\n\n### Demo\n\nA simple demo `demo.py` is provided to recognize text from an image file using pretrained model. The pretrained model can be found [here](https://www.dropbox.com/s/81j7zcr23vqd8zq/model.ckpt.tar.gz?dl=0).\n\n```\npython demo.py --path=data/demo.jpg --checkpoint=PATH_TO_PRETRAINED_MODEL\n```\n\n\n### Training\n\n1. Prepare training dataset.\n\n Prepare training datasets into tfrecord format. You can customize your datasets based our tfrecord tool under `tools/make_tfrecord_datasets.py`.\n\n2. Start training.\n\n - Train from scratch.\n ```\n ./train.sh\n ```\n\n - Or use the pretrained model by adding an additional flag: `--checkpoint=--checkpoint=PATH_TO_PRETRAINED_MODEL`\n\n3. Evaluate the model continuously during training.\n\n ```\n ./eval_continous.sh\n ```\n\n\n### Citation\n\nPlease cite our paper if our work is helpful to you.\n\n```\n@inproceedings{fang2018attention,\n title={Attention and Language Ensemble for Scene Text Recognition with Convolutional Sequence Modeling},\n author={Fang, Shancheng and Xie, Hongtao and Zha, Zheng-Jun and Sun, Nannan and Tan, Jianlong and Zhang, Yongdong},\n booktitle={2018 ACM Multimedia Conference on Multimedia Conference},\n pages={248--256},\n year={2018},\n organization={ACM}\n}\n```\n\n\nNote: Our work is based on the previous work [conv_seq2seq](https://github.com/tobyyouup/conv_seq2seq).\nThanks the authors for sharing the code.\n"
},
{
"alpha_fraction": 0.5215053558349609,
"alphanum_fraction": 0.5286738276481628,
"avg_line_length": 31.882352828979492,
"blob_id": "253ce9a1205246b4d80f8cae9bc1e9ffa813b350",
"content_id": "938030583893a5e7d59233b1709134e383d0f372",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 558,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 17,
"path": "/eval_continous.sh",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\nexport CUDA_VISIBLE_DEVICES=''\n# MJSynth, SVT, ...\ndataset=MJSynth\n# continuous_eval, evaluate\nschedule=continuous_eval\nworkdir=workdir-master\nsplit_name=test\neval_step=500\n\npython train_eval.py --output_dir=${workdir} \\\n --schedule=${schedule} \\\n --dataset_name=${dataset} \\\n --eval_steps=${eval_step} \\\n --split_name=${split_name} \\\n --beam_width=1 \\\n --dataset_dir=/home/data/Dataset/tf-mjsynth"
},
{
"alpha_fraction": 0.5491606593132019,
"alphanum_fraction": 0.5515587329864502,
"avg_line_length": 31.153846740722656,
"blob_id": "5f0f3070b92cce4b1cfda6bc602a7e0619f5a9ae",
"content_id": "6313889eb393d4912aab532973d8597d24f87221",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 417,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 13,
"path": "/eval_one_pass.sh",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env bash\nexport CUDA_VISIBLE_DEVICES=''\n# MJSynth, SVT, ...\ndataset=MJSynth\n# continuous_eval, evaluate\nschedule=evaluate\nworkdir=workdir-master\n\npython train_eval.py --output_dir=${workdir} \\\n --schedule=${schedule} \\\n --dataset_name=${dataset} \\\n --beam_width=5 \\\n --dataset_dir=/home/data/Dataset/tf-mjsynth"
},
{
"alpha_fraction": 0.6126677393913269,
"alphanum_fraction": 0.6169060468673706,
"avg_line_length": 31.5440616607666,
"blob_id": "fc423301231734afb9fb9f9a8f52bb71045b27fa",
"content_id": "1ad6986fe5a2365335e942d6fb5bfd2e359e6bbe",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8494,
"license_type": "permissive",
"max_line_length": 80,
"num_lines": 261,
"path": "/datasets.py",
"repo_name": "yongduek/conv-ensemble-str",
"src_encoding": "UTF-8",
"text": "# Copyright 2017 IIE, CAS.\n# Written by Shancheng Fang\n# ==============================================================================\n\"\"\"Define base dataset.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport abc\nimport sys\nimport copy\nimport six\n\nimport tensorflow as tf\nfrom tensorflow.contrib.slim.python.slim.data import tfexample_decoder\n\ndef create_dataset(def_dict, mode, use_beam_search):\n \"\"\"Creates an Dataset object from a dictionary definition.\n\n Args:\n def_dict: A dictionary defining the input pipeline.\n It must have \"dataset_name\", \"split_name\" and \"dataset_dir\" that\n correspond to the class name and constructor parameters of\n an InputPipeline, respectively.\n mode: A value in tf.contrib.learn.ModeKeys\n use_beam_search: Whether to use beam search\n\n Returns:\n A Dataset object.\n \"\"\"\n if not \"dataset_name\" in def_dict:\n raise ValueError(\"Dataset definition must have a dataset_name property.\")\n\n class_ = def_dict[\"dataset_name\"]\n if not hasattr(sys.modules[__name__], class_):\n raise ValueError(\"Invalid Dataset class: {}\".format(class_))\n\n # TODO(Shancheng): to support batch_size > 1,\n # remove use_beam_search argument\n if mode != tf.contrib.learn.ModeKeys.TRAIN and use_beam_search:\n def_dict['batch_size'] = 1\n\n dataset_class = getattr(sys.modules[__name__], class_)\n return dataset_class(params=def_dict, mode=mode)\n\[email protected]_metaclass(abc.ABCMeta)\nclass Dataset():\n \"\"\"An abstract Dataset class. All datasets must inherit from this.\n This class defines how data is read, parsed, and separated into\n features and labels.\n \"\"\"\n def __init__(self, params, mode):\n self.mode = mode\n default_params = self._dataset_params()\n self.params = self._parse_params(params, default_params)\n\n @property\n def _params_template(self):\n \"\"\"Params placeholder.\n \"\"\"\n return {\n 'dataset_name': None,\n 'dataset_dir': None,\n 'batch_size': None,\n 'splits': None,\n 'charset_filename': None,\n 'image_shape': None,\n 'max_sequence_length': None,\n 'null_code': None,\n 'shuffle': None,\n 'smaller_final_batch': None,\n 'num_epochs': None,\n 'split_name': None\n }\n\n\n def _dataset_params(self):\n \"\"\"A abstract function implemented by subclass.\n \"\"\"\n raise NotImplementedError(\"Not implemented.\")\n\n def _parse_params(self, params, default_params):\n \"\"\"Parses parameter values to the types defined by the default parameters.\n Default parameters are used for missing values.\n \"\"\"\n # Cast parameters to correct types\n if params is None:\n params = {}\n result = copy.deepcopy(default_params)\n for key, value in params.items():\n # If param is unknown, drop it to stay compatible with past versions\n if key not in default_params:\n raise ValueError(\"%s is not a valid model parameter\" % key)\n # Param is a dictionary\n if isinstance(value, dict):\n default_dict = default_params[key]\n if not isinstance(default_dict, dict):\n raise ValueError(\"%s should not be a dictionary\", key)\n if default_dict:\n value = self._parse_params(value, default_dict)\n else:\n # If the default is an empty dict we do not typecheck it\n # and assume it's done downstream\n pass\n if value is None:\n continue\n if default_params[key] is None:\n result[key] = value\n else:\n result[key] = type(default_params[key])(value)\n return result\n\n def _read_from_data_provider(self, data_provider):\n \"\"\"Utility function to read all available items from a DataProvider.\n \"\"\"\n list_items = set(data_provider.list_items())\n assert self.items.issubset(list_items), \\\n \"items are unavailable in data_provider!\"\n\n item_values = data_provider.get(list(self.items))\n items_dict = dict(zip(self.items, item_values))\n return items_dict\n\n def _make_data_provider(self, **kwargs):\n \"\"\"Create data provider\n \"\"\"\n split_name = self.params['split_name']\n if split_name not in self.params['splits']:\n raise ValueError('split name %s was not recognized.' % split_name)\n\n decoder = tfexample_decoder.TFExampleDecoder(self.keys_to_features,\n self.items_to_handlers)\n\n file_pattern = os.path.join(self.params['dataset_dir'],\n self.params['splits'][split_name])\n\n tf.logging.info(\"Create dataset.\")\n dataset = tf.contrib.slim.dataset.Dataset(\n data_sources=file_pattern,\n reader=tf.TFRecordReader,\n decoder=decoder,\n num_samples=None,\n items_to_descriptions={})\n\n return tf.contrib.slim.dataset_data_provider.DatasetDataProvider(\n dataset=dataset,\n shuffle=self.params[\"shuffle\"],\n num_epochs=self.params[\"num_epochs\"],\n **kwargs)\n\n def create_input_fn(self):\n \"\"\"Creates an input function that can be used with tf.learn estimators.\n Note that you must pass \"factory funcitons\" for both the data provider and\n featurizer to ensure that everything will be created in the same graph.\n \"\"\"\n with tf.variable_scope(\"input_fn\"):\n batch_size = self.params['batch_size']\n data_provider = self._make_data_provider()\n features_and_labels = self._read_from_data_provider(data_provider)\n\n tf.logging.info(\"Start batch queue.\")\n batch = tf.train.batch(\n tensors=features_and_labels,\n enqueue_many=False,\n batch_size=batch_size,\n dynamic_pad=True,\n capacity=3000 + 16 * batch_size,\n allow_smaller_final_batch=self.params['smaller_final_batch'],\n name=\"batch_queue\",\n num_threads=int((batch_size+1)/2)\n )\n\n # Separate features and labels\n features_batch = {k: batch[k] for k in self.feature_keys}\n if set(batch.keys()).intersection(self.label_keys):\n labels_batch = {k: batch[k] for k in self.label_keys}\n else:\n labels_batch = None\n\n return features_batch, labels_batch\n\n @property\n def keys_to_features(self):\n \"\"\"Key to features\n \"\"\"\n default = {\n 'image/encoded':\n tf.FixedLenFeature((), tf.string, default_value=''),\n 'image/format':\n tf.FixedLenFeature((), tf.string, default_value='jpeg'),\n 'image/text_length':\n tf.FixedLenFeature([1], tf.int64,\n default_value=tf.zeros([1], dtype=tf.int64)),\n 'image/class':\n tf.FixedLenFeature([self.params['max_sequence_length']], tf.int64),\n 'image/text':\n tf.FixedLenFeature([1], tf.string, default_value=''),\n 'image/name':\n tf.FixedLenFeature((), tf.string, default_value='')\n }\n return default\n\n @property\n def items_to_handlers(self):\n \"\"\"Items to handlers\n \"\"\"\n default = {\n 'image': tfexample_decoder.Image(\n shape=self.params['image_shape'],\n image_key='image/encoded',\n format_key='image/format'),\n 'label': tfexample_decoder.Tensor(tensor_key='image/class'),\n 'text': tfexample_decoder.Tensor(tensor_key='image/text'),\n 'length': tfexample_decoder.Tensor(tensor_key='image/text_length'),\n 'name': tfexample_decoder.Tensor(tensor_key='image/name')\n }\n return default\n\n @property\n def items(self):\n \"\"\"items\n \"\"\"\n return self.feature_keys.union(self.label_keys)\n\n @property\n def feature_keys(self):\n \"\"\"Only image and name supported.\n \"\"\"\n return set([\"image\", \"name\"])\n\n @property\n def label_keys(self):\n \"\"\"Only label and length supported.\n \"\"\"\n return set([\"label\", \"length\"])\n\nclass MJSynth(Dataset):\n \"\"\"Training dataset.\n \"\"\"\n def _dataset_params(self):\n dataset_params = {\n 'dataset_name': 'MJSynth',\n 'dataset_dir': '/opt/fsc/tf-mjsynth',\n 'batch_size': 64,\n 'splits': {\n 'train': 'train/train-*/*.tfrecord',\n 'test': 'test/test-*/*.tfrecord',\n 'validation': 'val/val-*/*.tfrecord'\n },\n 'charset_filename': 'charset_size=63.txt',\n 'image_shape': (32, 100, 3),\n 'max_sequence_length': 30,\n 'null_code': 0,\n }\n default_params = self._params_template\n default_params.update(dataset_params)\n return default_params\n"
}
] | 15 |
ssshreyy/TV-Viewership-Prediction | https://github.com/ssshreyy/TV-Viewership-Prediction | df9301e6a4bc18c62f26311fe83df23d3e5f66bc | 6da7e3bc46b6142c1cd0edf3421e63caebfaa9ce | 903fd34eb064f755995fa0dabca90b68700ca07d | refs/heads/master | 2020-04-25T21:50:56.254950 | 2019-04-24T05:44:05 | 2019-04-24T05:44:05 | 173,092,342 | 0 | 2 | null | null | null | null | null | [
{
"alpha_fraction": 0.8823529481887817,
"alphanum_fraction": 0.8823529481887817,
"avg_line_length": 34,
"blob_id": "902eac091781b997b14d01536c2bc4d7f3f812d8",
"content_id": "afa0156c78459a00e466b5e0f0792116fe17f90c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 34,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 1,
"path": "/README.md",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "# Television-Viewership-Prediction"
},
{
"alpha_fraction": 0.5770010352134705,
"alphanum_fraction": 0.6079027652740479,
"avg_line_length": 42.86666488647461,
"blob_id": "72cd2e11691b84190741405a5e6b3f1244f7c998",
"content_id": "a1a5a9e1068b07bf1825580fa4f70b2bc38dfdb3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 1974,
"license_type": "permissive",
"max_line_length": 378,
"num_lines": 45,
"path": "/ng-pi-admin/src/app/pages/index/index.component.html",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "<div class=\"row\">\n <div class=\"col-md-12\">\n <card cardTitle='Television Viewership Prediction Using Sentiment Analysis of Tweets' isCollapse=\"true\">\n <ol class=\"col-md-10\">\n <b>Major Project by:</b>\n <li>Anuj Joshi   0801CS151010</li>\n <li>Ayushi Gupta  0801EE151015</li>\n <li>Shrey Jain   0801CS151070</li>\n <li>Vrishti Jain   0801EC151087</li>\n <li>Yash Agrawal  0801CS151095</li>\n </ol>\n <div class=\"col-md-10\">\n <b>Guided by:</b>\n Prof. D.A. Mehta\n </div>\n </card>\n <card cardTitle='Problem Statement' isCollapse=\"true\">\n Prediction of future viewership information for any Television show is essential to designing advertisements, optimising profits from these ads, and also analysing success and efficiency of past promotional campaigns. However, usually, such predictions are based on just previous viewership records and promotions without taking user opinion, such as tweets, in account.\n </card>\n <card cardTitle='Goals and Objectives' isCollapse=\"true\">\n <ul class='col-md-10'>\n <li>Identifying and visualising the relation between general twitter user opinion and TV viewership of television shows.</li>\n <li>Forecasting, i.e., predicting future values of audience count of these events based on tweets and historical viewership data</li>\n </ul>\n </card>\n <card cardTitle='Tools and Technologies Used' isCollapse=\"true\">\n <ol class='col-md-10'>\n <li>Python\n <ul>Major Libraries Used:\n <li>Pandas</li>\n <li>Scikit-learn</li>\n <li>NumPy</li>\n <li>NLTK- Vader</li>\n <li>SciPy</li>\n <li>JSON</li>\n </ul>\n </li>\n <li>Angular 6</li>\n <li>Flask</li>\n <li>PyCharm</li>\n <li>Microsoft Visual Code</li>\n </ol>\n </card>\n </div>\n</div>\n"
},
{
"alpha_fraction": 0.6483221650123596,
"alphanum_fraction": 0.6505593061447144,
"avg_line_length": 31.39130401611328,
"blob_id": "714de05602b36d1cf667b84b59d5812e73dd9eba",
"content_id": "548beb9124fd69ed0ae6e74a5336ccf366fa452d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2235,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 69,
"path": "/Tweet_Preprocessing.py",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "import warnings\nimport ML_Sentiment\nimport pandas as pd\nfrom nltk.stem.porter import *\nfrom nltk.stem import WordNetLemmatizer\n\nwarnings.filterwarnings(\"ignore\", category = DeprecationWarning)\n\n\ndef remove_http(txt):\n txt = str(txt)\n lst = list()\n for x in txt.split():\n if not x.startswith('http'):\n lst.append(x)\n return \" \".join(lst)\n\n\ndef remove_pattern(txt,pattern):\n txt = str(txt)\n return \" \".join(filter(lambda x: x[0] != pattern, txt.split()))\n\n\ndef main(fileName):\n\n print(\"Tweet Preprocessing Started\")\n\n train = pd.read_csv(fileName, usecols = range(12), encoding = 'utf-8', index_col = False, low_memory = False)\n print(\"File Read Successful\")\n\n train['Tidy_Tweet'] = [remove_pattern(x,'@') for x in train['Text']]\n print(\"Removed @Handle\")\n\n # train['Tidy_Tweet'] = [remove_http(x) for x in train['Tidy_Tweet']]\n train['Tidy_Tweet'] = [re.sub( '((www\\.[^\\s]+)|(https?://[^\\s]+))' , ' ' , tweet ) for tweet in train['Tidy_Tweet']]\n print(\"Removed URLs\")\n\n train['Tidy_Tweet'] = train['Tidy_Tweet'].str.replace(\"[^a-zA-Z#]\", \" \")\n print(\"Removed Special Characters, Numbers, Punctuations\")\n\n train['Tidy_Tweet'] = train['Tidy_Tweet'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>2]))\n print(\"Removed Short Words\")\n\n tokenized_tweet_train = train['Tidy_Tweet'].apply(lambda x : x.split())\n print(\"Tokenization Done\")\n\n stemmer = PorterStemmer()\n tokenized_tweet_train = tokenized_tweet_train.apply(lambda x: [stemmer.stem(i) for i in x])\n print(\"Stemming Done\")\n\n lemmatizer = WordNetLemmatizer()\n tokenized_tweet_train = tokenized_tweet_train.apply(lambda x: [lemmatizer.lemmatize(i) for i in x])\n print(\"Lammatization Done\")\n\n for i in range(len(tokenized_tweet_train)):\n tokenized_tweet_train[i] = ' '.join(tokenized_tweet_train[i])\n\n train['Tidy_Tweet'] = tokenized_tweet_train\n\n outputFileName = './Preprocessed_data/tweet_data_preprocessed.csv'\n train.to_csv(outputFileName, index=False)\n\n print('Tweet Preprocessing Complete. Output file generated \"%s\".' % outputFileName )\n\n # ML_Sentiment.main(outputFileName)\n\n\nif __name__ == \"__main__\" :\n main( './Tweet_data/tweet_data.csv' )\n"
},
{
"alpha_fraction": 0.7664783596992493,
"alphanum_fraction": 0.7664783596992493,
"avg_line_length": 28.5,
"blob_id": "de7227075a7b0ecb51749f9be470871d47532974",
"content_id": "b045f6fb0217d8f494e72c545e200fbbc6ae99bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 531,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 18,
"path": "/ML_Sentiment.py",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "import Train\nimport Sentiment_Analysis\nimport Sentimental_Data_Preprocessing\n\n\ndef main(fileName):\n\n trainFileName = './Sentiment_training_data/sentiment_training_data.csv'\n preprocessedTrainFileName = Sentimental_Data_Preprocessing.main( trainFileName )\n\n outputFileName = Sentiment_Analysis.main(preprocessedTrainFileName)\n episodeFileName = './Prediction_data/simpsons_episodes.csv'\n\n Train.main(outputFileName, episodeFileName)\n\n\nif __name__ == \"__main__\":\n main('./Preprocessed_data/tweet-preprocessed.csv')\n"
},
{
"alpha_fraction": 0.6661576628684998,
"alphanum_fraction": 0.6730291843414307,
"avg_line_length": 37.52206039428711,
"blob_id": "d9f38cafe06019435a85475d136b377e5e56db4f",
"content_id": "80a4ea968406fd8f169d6b28a29d41feca5037d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5239,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 136,
"path": "/Viewership_Prediction.py",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "import pickle\nimport pandas as pd\nfrom sklearn.svm import SVC\nfrom sklearn import neighbors\nfrom sklearn import preprocessing\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.ensemble import RandomForestRegressor\n# from __future__ import absolute_import\n# from __future__ import division\n# from __future__ import print_function\n\nimport itertools\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams\nimport matplotlib\n\n\n\ndef train_classifier(features_train, features_test, label_train, label_test, c,i):\n if c == \"Linear\":\n model = LinearRegression()\n elif c == \"Random_Forest\":\n model= RandomForestRegressor(n_estimators=50)\n elif c == \"Polynomial\":\n poly_features = PolynomialFeatures(degree=1)\n features_train_poly = poly_features.fit_transform(features_train)\n model = LinearRegression()\n model.fit(features_train_poly, label_train)\n # predicting on training data-set\n y_train_predicted = model.predict(features_train_poly)\n\n # predicting on test data-set\n y_test_predict = model.predict(poly_features.fit_transform(features_test))\n\n # evaluating the model on training dataset\n rmse_train = np.sqrt(mean_squared_error(label_train, y_train_predicted))\n r2_train = r2_score(label_train, y_train_predicted)\n\n # evaluating the model on test dataset\n rmse_test = np.sqrt(mean_squared_error(label_test, y_test_predict))\n r2_test = r2_score(label_test, y_test_predict)\n\n # print(\"The model performance for the training set\")\n # print(\"RMSE of training set is {}\".format(rmse_train))\n # print(\"R2 score of training set is {}\".format(r2_train))\n # print(\"The model performance for the test set\")\n # print(\"RMSE of test set is {}\".format(rmse_test))\n # print(\"R2 score of test set is {}\".format(r2_test))\n elif c == \"Kmeans\":\n knn = neighbors.KNeighborsRegressor()\n params = {'n_neighbors': [2, 3, 4, 5, 6, 7, 8, 9]}\n model = GridSearchCV(knn, params, cv=5)\n else:\n print(\"Incorrect Selection Of Classifier\")\n\n model.fit(features_train, label_train)\n\n # fileName = './Prediction_models/' + classifier + '.pickle'\n # with open(fileName, 'wb') as file:\n # pickle.dump(model, file)\n # print(\"Pickle File Created %s\" % fileName)\n if i ==1:\n accuracy = model.score(features_test, label_test)\n # print(\"Accuracy Is:\", accuracy)\n\n return model\n\ndef main(simpsons_file):\n\n print('Viewership Prediction Started')\n viewer_data = pd.read_csv(simpsons_file, dtype={'Unique_Users': float, 'US_Viewers_In_Millions': float}, usecols = range(19), index_col = False, low_memory = False)\n viewer_data.dropna( inplace = True )\n print('Episode Data File Read Successful')\n\n x = viewer_data.loc[:, ['Views', 'IMDB_Rating', 'IMDB_Votes', 'Retweets', 'Favorites', 'Vader_Score', 'Sentiment_Score', 'Tweets_Per_Day', 'Unique_Users']]\n y = viewer_data.loc[:, ['US_Viewers_In_Millions']]\n x_temp =x\n y_temp =y\n scaler = MinMaxScaler( feature_range = (0, 1) )\n x = scaler.fit_transform(x)\n y = scaler.fit_transform(y)\n print('Data Rescaling Complete')\n\n x = preprocessing.scale(x)\n y = preprocessing.scale(y)\n print('Data Standardization Complete')\n\n # x = preprocessing.normalize(x)\n # y = preprocessing.normalize(y)\n # # print('Data Normalization Complete')\n\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)\n print(\"Shape of x_train: \", x_train.shape)\n print(\"Shape of y_train: \", y_train.shape)\n print(\"Shape of x_test: \", x_test.shape)\n print(\"Shape of y_test\", y_test.shape)\n print('Data Sliced In Training And Testing Sets')\n\n print(\"Model Training Started\")\n algorithm = \"Random_Forest\"\n model = train_classifier(x_train, x_test, y_train, y_test, algorithm,1)\n print(\"Model Training Complete\")\n\n flat_list = []\n for sublist in y:\n for item in sublist:\n flat_list.append(item)\n\n\n scaler = MinMaxScaler( feature_range = (0, 1))\n x = scaler.fit_transform(x_temp)\n x = preprocessing.scale(x)\n x = preprocessing.normalize(x)\n x_train, x_test, y_train, y_test = train_test_split(x, y_temp, test_size = 0.2, random_state = 0)\n model = train_classifier(x_train, x_test, y_train, y_test, \"Random_Forest\",1)\n viewer_data['Predicted_Viewership'] = model.predict(x)\n viewer_data.to_csv('./Prediction_data/predicted_file.csv')\n plt.scatter(viewer_data['Predicted_Viewership'], y_temp, label='skitscat')\n plt.xlabel('Predicted Viewership')\n plt.ylabel('Actual Viewership')\n plt.title('Prediction vs Reality')\n plt.legend()\n plt.show()\n print(\"Done\")\n\nif __name__ == '__main__':\n main('./Prediction_data/simpsons_episodes.csv')\n"
},
{
"alpha_fraction": 0.6455647945404053,
"alphanum_fraction": 0.6485974192619324,
"avg_line_length": 31.170732498168945,
"blob_id": "2aed5f6850ea0b37eb131008e307e015c4a1bd73",
"content_id": "1922c02c5d9cb7a9f54e99ba8bc740f6e6e96a3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2638,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 82,
"path": "/Sentimental_Data_Preprocessing.py",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nfrom nltk.stem.porter import *\nfrom nltk.stem import WordNetLemmatizer\n\n\ndef import_tweets(filename, header = None):\n\n train_dataset = pd.read_csv(filename, usecols = range(6), encoding = 'Latin-1', index_col = False, low_memory = False, header = header)\n train_dataset.columns = ['Sentiment', 'Id', 'Date', 'Flag', 'User', 'Text']\n\n # for i in ['Flag','Id','User','Date']:\n # del train_dataset[i]\n # train_dataset.sentiment = train_dataset.sentiment.replace(4,1)\n # train_dataset.sentiment = train_dataset.sentiment.replace(0,-1)\n\n return train_dataset\n\n\ndef preprocess_tweet(tweet):\n\n if type(float) == type(tweet):\n return '-'\n\n tweet.lower()\n\n tweet = re.sub('@[^\\s]+', ' ', tweet)\n\n tweet = re.sub('((www\\.[^\\s]+)|(https?://[^\\s]+))', ' ', tweet)\n\n tweet = tweet.replace(\"[^a-zA-Z#]\", \" \")\n\n # convert \"#topic\" to just \"topic\"\n # tweet = re.sub(r'#([^\\s]+)', r'\\1', tweet)\n\n tweet = re.sub('[\\s]+', ' ', tweet)\n\n return tweet\n\n\ndef main(fileName):\n\n print(\"Sentiment Training Data Preprocessing Started\")\n\n train_dataset = import_tweets(fileName)\n train_dataset.Text = train_dataset.Text.fillna(value = \"\")\n print(\"Sentiment Training File read\")\n\n train_dataset['Tidy_Tweet'] = train_dataset['Text'].apply(preprocess_tweet)\n print(\"Removed @Handle\")\n print(\"Removed URLs\")\n print(\"Removed Special Characters, Numbers, Punctuations\")\n print(\"Extra White Spaces Removed\")\n\n train_dataset['Tidy_Tweet'] = train_dataset['Tidy_Tweet'].apply(\n lambda x: ' '.join([w for w in x.split() if len(w) > 2]))\n print(\"Removed Short Words\")\n\n tokenized_tweet_train = train_dataset['Tidy_Tweet'].apply( lambda x : x.split() )\n print(\"Tokenization Done\")\n\n stemmer = PorterStemmer()\n tokenized_tweet_train = tokenized_tweet_train.apply( lambda x : [stemmer.stem( i ) for i in x] )\n print(\"Stemming Done\")\n\n lemmatizer = WordNetLemmatizer()\n tokenized_tweet_train = tokenized_tweet_train.apply( lambda x : [lemmatizer.lemmatize( i ) for i in x] )\n print(\"Lammatization Done\")\n\n for i in range(len(tokenized_tweet_train)):\n tokenized_tweet_train[i] = ' '.join(tokenized_tweet_train[i])\n\n train_dataset['Tidy_Tweet'] = tokenized_tweet_train\n\n outputFileName = './Preprocessed_data/preprocessed_training_data.csv'\n train_dataset.to_csv(outputFileName, index = False)\n\n print('Sentiment Train Data Preprocessing Complete. Output file generated \"%s\".' % outputFileName)\n return outputFileName\n\n\nif __name__ == '__main__':\n main(\"./Sentiment_training_data/sentiment_training_data.csv\")\n"
},
{
"alpha_fraction": 0.6126510500907898,
"alphanum_fraction": 0.6325515508651733,
"avg_line_length": 22.450000762939453,
"blob_id": "60ed7da412ca9e259b9bfab56d12f0f29c84d63e",
"content_id": "b99c747fcfbe534a13ef6f923f7abc1c2f0c685f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 1407,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 60,
"path": "/ng-pi-admin/src/app/pages/sentiment/sentiment.component.ts",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "\nimport { Component, OnInit } from '@angular/core';\nimport { HttpClient } from '@angular/common/http';\nimport { HttpHeaders } from '@angular/common/http';\n\n@Component({\n selector: 'app-visual',\n templateUrl: './sentiment.component.html',\n styleUrls: ['./sentiment.component.scss']\n})\nexport class SentimentComponent implements OnInit {\n\n serverData: JSON;\n analysedData: JSON;\n arrayOfKeys;\n pageSize = 10;\n pageNumber = 1;\n pre=false;\n show=false;\n\n // constructor() { }\n constructor(private httpClient: HttpClient) {\n }\n\n ngOnInit() {\n }\n\n showTweets(){\n this.httpClient.get(\"http://127.0.0.1:5003/show\").subscribe((data) => {\n this.serverData = data as JSON;\n\n console.log(this.serverData)\n\n this.arrayOfKeys = Object.keys(this.serverData);\n \n // this.analysedData=this.serverData;\n this.show=true;\n })\n }\n\n sentimentTweets(){\n this.httpClient.get(\"http://127.0.0.1:5003/sentiment\").subscribe((data2) => {\n this.analysedData = data2 as JSON;\n this.arrayOfKeys.forEach(element => {\n if(this.analysedData[element].Sentiment_Score==0){\n this.analysedData[element].Sentiment_Score = -1;\n }\n else{\n this.analysedData[element].Sentiment_Score = 1;\n }\n });\n this.pre=true;\n console.log(this.analysedData)\n })\n }\n\n pageChanged(pN: number): void {\n this.pageNumber = pN;\n }\n\n}"
},
{
"alpha_fraction": 0.6520190238952637,
"alphanum_fraction": 0.7066508531570435,
"avg_line_length": 29.10714340209961,
"blob_id": "a56cc8dfc5d867e2ce8fe9ed2445570034d69d7c",
"content_id": "30fe9d3b39a184511f1f2c985113962c751c027f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 842,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 28,
"path": "/Visualisation.py",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "# -*- coding: latin-1 -*-\n\nimport pandas as pd\nimport numpy as np\nfrom nltk.stem.porter import *\nfrom nltk.stem import WordNetLemmatizer\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\n\nimport warnings\n\ndataFiles = ['tweet-2009', 'tweet-2010', 'tweet-2011', 'tweet-2012', 'tweet-2013', 'tweet-2014', 'tweet-2015']\n\nfor j in dataFiles:\n\ttest = pd.read_csv('PreprocessedData/'+j+'-preprocessed.csv', encoding='Latin-1', low_memory=False)\n\n\t# print(test.iloc[:, 1])\n\n\tall_words = ' '.join([text for text in test.iloc[:, 1]])\n\n\twordcloud = WordCloud(width=800, height=500, random_state=21, max_font_size=110).generate(all_words)\n\n\tplt.figure(figsize=(10, 7))\n\tplt.imshow(wordcloud, interpolation=\"bilinear\")\n\tplt.axis('off')\n\t# plt.show()\n\tplt.savefig('Plots/'+j+'-wordcloud.png', bbox_inches='tight')\n\tprint(j+' wordcloud saved')"
},
{
"alpha_fraction": 0.631426215171814,
"alphanum_fraction": 0.6525704860687256,
"avg_line_length": 32.26896667480469,
"blob_id": "ad2fbd246f61ec6224643a40faa97aecd9db5cdc",
"content_id": "5eb5e0a953758d0a1b82afd306351dbb35e9104c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4824,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 145,
"path": "/Train.py",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "import pickle\nimport pandas as pd\nimport bisect, datetime\nfrom sklearn.svm import SVC\nfrom sklearn import neighbors\nfrom sklearn import linear_model\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\n\n\ndef train_classifier(features_train, features_test, label_train, label_test, classifier):\n if classifier == \"Logistic_Regression\":\n model = LogisticRegression(C=1.)\n elif classifier == \"Naive_Bayes\":\n model = MultinomialNB()\n elif classifier == \"SVM\":\n model = SVC()\n elif classifier == \"Random_Forest\":\n model = RandomForestClassifier(n_estimators=400, random_state=11)\n else:\n print(\"Incorrect Selection Of Classifier\")\n\n model.fit(features_train, label_train)\n print(\"Model Fitting Done\")\n\n fileName = './Sentiment_models/' + classifier + '.pickle'\n with open(fileName, 'wb') as file:\n pickle.dump(model, file)\n print(\"Pickle File Created %s\" % fileName)\n\n accuracy = model.score(features_test, label_test)\n print(\"Accuracy Is:\", accuracy)\n\n # Make prediction on the test data\n probability_to_be_positive = model.predict_proba(features_test)[:,1]\n\n # Check AUC(Area Under the Roc Curve) to see how well the score discriminates between negative and positive\n print(\"AUC (Train Data):\", roc_auc_score(label_test, probability_to_be_positive))\n\n # Print top 10 scores as a sanity check\n print(\"Top 10 Scores: \", probability_to_be_positive[:10])\n\n return model\n\n\ndef computeAverage(tweet_data, start, end):\n if start == end:\n return 0\n\n score = 0\n\n for i in range(start, end+1):\n score += (float(tweet_data['Retweets'][i]) + 1) * float(tweet_data['Vader_Score'][i])\n\n return score\n\n\ndef date_change(str_date):\n if str_date:\n return datetime.datetime.strptime(str_date, '%d-%m-%Y').strftime('%Y-%m-%d')\n\n\ndef viewers_change(str_views):\n if str_views == 'NaN':\n return '0'\n return str(int(float(str_views) * 1000000))\n\n\n# def date_change(str_date):\n# return datetime.datetime.strptime(str_date, '%B %d, %Y')\n\n\n# def viewers_change(str_views):\n# return str(int(float(str_views.strip().split('[')[0]) * 1000000))\n\n\ndef main(prediction_file, simpsons_file):\n\n algorithm = \"Logistic_Regression\"\n print('Viewership Prediction Started')\n viewer_data = pd.read_csv(simpsons_file, usecols=range(13), index_col=False, low_memory = False)\n print('Episode Data File Read Successful')\n\n tweet_data = pd.read_csv(prediction_file, usecols=range(15), index_col=False, low_memory = False)\n print('Tweet Data File Read Successful')\n\n viewer_data['Air_Date'] = list(map(date_change, viewer_data['Air_Date']))\n tweet_data['Date'] = list(map(date_change, tweet_data['Date']))\n print('Date Columns Altered')\n\n viewer_data['US_Viewers_In_Millions'] = list(map(viewers_change, viewer_data['US_Viewers_In_Millions']))\n print('Viewer Column Altered')\n\n first_date = bisect.bisect_left(viewer_data['Air_Date'], '2009-01-01')\n last_date = bisect.bisect_left(viewer_data['Air_Date'], '2015-01-01')\n y_train = list(map(int, viewer_data['US_Viewers_In_Millions'][first_date+1:last_date]))\n\n x_train = list()\n count = 1\n print('Extracting Training Features')\n for i in range(first_date, last_date - 1):\n temp1 = str(viewer_data['Air_Date'][i])\n temp2 = str(viewer_data['Air_Date'][i + 1])\n temp3 = []\n start = bisect.bisect_left(tweet_data['Date'], temp1)\n end = bisect.bisect_left(tweet_data['Date'], temp2)\n temp3.append(computeAverage(tweet_data, start, end))\n\n # print(count, temp2, viewer_data['Title'][i + 1], temp3)\n\n count += 1\n x_train.append(temp3)\n\n print('4')\n\n first_date = bisect.bisect_left(viewer_data['Air_Date'], '2015-01-01')\n last_date = bisect.bisect_left(viewer_data['Air_Date'], '2016-01-01')\n\n y_test = list(map(int, viewer_data['US_Viewers_In_Millions'][first_date + 1:last_date]))\n\n x_test = list()\n print('6')\n count = 1\n for i in range(first_date, last_date - 1):\n temp1 = str(viewer_data['Air_Date'][i])\n temp2 = str(viewer_data['Air_Date'][i + 1])\n temp3 = []\n start = bisect.bisect_left(tweet_data['Date'], temp1)\n end = bisect.bisect_left(tweet_data['Date'], temp2)\n temp3.append(computeAverage(tweet_data, start, end))\n\n print(count, temp2, viewer_data['Title'][i + 1], temp3)\n\n count += 1\n x_test.append(temp3)\n\n print('7')\n\n model = train_classifier(x_train, y_train, x_test, y_test, algorithm)\n\n\nif __name__ == \"__main__\":\n main('./Prediction_data/tweet_predict.csv', './Prediction_data/simpsons_episodes.csv')\n"
},
{
"alpha_fraction": 0.6331614851951599,
"alphanum_fraction": 0.6546391844749451,
"avg_line_length": 21.384614944458008,
"blob_id": "2ff494c3d8b810e1bfae27322f6a454106d27b00",
"content_id": "9072bbc63943cfb1ddd3c72537c22605710064b5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 1164,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 52,
"path": "/ng-pi-admin/src/app/pages/preprocess/preprocess.component.ts",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\nimport { HttpClient } from '@angular/common/http';\nimport { HttpHeaders } from '@angular/common/http';\n\n@Component({\n selector: 'app-visual',\n templateUrl: './preprocess.component.html',\n styleUrls: ['./preprocess.component.scss']\n})\nexport class PreprocessComponent implements OnInit {\n\n serverData: JSON;\n preprocessedData: JSON;\n arrayOfKeys;\n pageSize = 10;\n pageNumber = 1;\n pre=false;\n show=false;\n\n // constructor() { }\n constructor(private httpClient: HttpClient) {\n }\n\n ngOnInit() {\n }\n\n showTweets(){\n this.httpClient.get(\"http://127.0.0.1:5003/show\").subscribe((data) => {\n this.serverData = data as JSON;\n\n console.log(this.serverData)\n\n this.arrayOfKeys = Object.keys(this.serverData);\n \n // this.preprocessedData=this.serverData;\n this.show=true;\n })\n }\n\n preprocessTweets(){\n this.httpClient.get(\"http://127.0.0.1:5003/preprocess\").subscribe((data2) => {\n this.preprocessedData = data2 as JSON;\n this.pre=true;\n console.log(this.preprocessedData)\n })\n }\n\n pageChanged(pN: number): void {\n this.pageNumber = pN;\n }\n\n}\n"
},
{
"alpha_fraction": 0.6260162591934204,
"alphanum_fraction": 0.6365853548049927,
"avg_line_length": 24.102041244506836,
"blob_id": "883582598164f65567ab2ce726b022ecb2a1725d",
"content_id": "21ccf43887f2cd4b5dc55c919a0eb862602a1e4f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 1230,
"license_type": "permissive",
"max_line_length": 169,
"num_lines": 49,
"path": "/ng-pi-admin/src/app/pages/form/components/form-layouts/form-layouts.component.ts",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\nimport { HttpClient } from '@angular/common/http';\nimport { HttpHeaders } from '@angular/common/http';\n\n@Component({\n selector: 'app-form-layouts',\n templateUrl: './form-layouts.component.html',\n styleUrls: ['./form-layouts.component.scss']\n})\n\nexport class FormLayoutsComponent implements OnInit {\n\n serverData: JSON;\n arrayOfKeys;\n pageSize = 10;\n pageNumber = 1;\n\n // constructor(){}\n constructor(private httpClient: HttpClient) {\n }\n \n ngOnInit() {\n }\n\n searchTweet(tweet) {\n console.log(tweet);\n\n const httpOptions = {\n headers: new HttpHeaders({\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Authorization': 'my-auth-token'\n })\n };\n var body = \"username=\" + tweet.username + \"&query=\" + tweet.query + \"&since=\" + tweet.since + \"&until=\" + tweet.until + \"&maxNo=\" + tweet.maxNo +\"&top=\" + tweet.top;\n this.httpClient.post(\"http://127.0.0.1:5003/search\", body, httpOptions).subscribe((data) => {\n this.serverData = data as JSON;\n\n console.log(this.serverData)\n\n this.arrayOfKeys = Object.keys(this.serverData)\n \n })\n }\n\n pageChanged(pN: number): void {\n this.pageNumber = pN;\n }\n\n}\n"
},
{
"alpha_fraction": 0.5514018535614014,
"alphanum_fraction": 0.5539507269859314,
"avg_line_length": 30.386667251586914,
"blob_id": "6b5235653c85d28a4d16ab3d8df8ccac6def4f63",
"content_id": "2149371e6c556e76bc5c57d42a6047663591951d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2354,
"license_type": "no_license",
"max_line_length": 248,
"num_lines": 75,
"path": "/Exporter.py",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "# Exporter initial\nimport sys\nimport getopt\nimport codecs\nimport Tweet_Preprocessing\n\nif sys.version_info[0] < 3:\n import got\nelse:\n import got3 as got\n\ndef main(argv):\n if len(argv) == 0:\n print('You must pass some parameters.')\n return\n\n try:\n opts, _ = getopt.getopt(argv, \"\", (\n \"username=\", \"near=\", \"within=\", \"since=\", \"until=\", \"querysearch=\", \"toptweets\", \"maxtweets=\", \"output=\"))\n\n tweetCriteria = got.manager.TweetCriteria()\n outputFileName = \"./Tweet_data/tweet_data.csv\"\n\n for opt, arg in opts:\n if opt == '--username':\n tweetCriteria.username = arg\n\n elif opt == '--since':\n tweetCriteria.since = arg\n\n elif opt == '--until':\n tweetCriteria.until = arg\n\n elif opt == '--querysearch':\n tweetCriteria.querySearch = arg\n\n elif opt == '--toptweets':\n tweetCriteria.topTweets = True\n\n elif opt == '--maxtweets':\n tweetCriteria.maxTweets = int(arg)\n\n elif opt == '--near':\n tweetCriteria.near = '\"' + arg + '\"'\n\n elif opt == '--within':\n tweetCriteria.within = '\"' + arg + '\"'\n\n elif opt == '--output':\n outputFileName = arg\n\n outputFile = codecs.open(outputFileName, \"w+\", \"utf-8\")\n\n outputFile.write('ID,Username,Author ID,Date,Time,Retweets,Favorites,Text,Mentions,Hashtags,Permalink,URL')\n\n print('Tweets Extraction Started\\n')\n\n def receiveBuffer(tweetss):\n for t in tweetss:\n outputFile.write(('\\n%s,%s,%s,%s,%s,%d,%d,\"\"\"%s\"\"\",%s,%s,%s,%s' % (t.id, t.username, t.author_id, t.date.strftime(\"%Y-%m-%d\"), t.date.strftime(\"%H:%M\"), t.retweets, t.favorites, t.text, t.mentions, t.hashtags, t.permalink, t.urls)))\n outputFile.flush()\n print('More %d Saved On File...\\n' % len(tweetss))\n\n got.manager.TweetManager.getTweets(tweetCriteria, receiveBuffer)\n\n except Exception as e:\n print('Arguments Parser Error, try -h ' + arg)\n finally:\n outputFile.close()\n print('Tweet Extraction Complete. Output file generated \"%s\".' % outputFileName)\n Tweet_Preprocessing.main(outputFileName)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n"
},
{
"alpha_fraction": 0.3807641565799713,
"alphanum_fraction": 0.3807641565799713,
"avg_line_length": 18.973684310913086,
"blob_id": "2ce15fc4815e4754a5594eb5ea7c63d0a5f3e16d",
"content_id": "7b245bbc2e9aa39e538c6382341531ba4c96270d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 759,
"license_type": "permissive",
"max_line_length": 37,
"num_lines": 38,
"path": "/ng-pi-admin/src/app/pages/menu.ts",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "export let MENU_ITEM = [\n {\n path: 'index',\n title: 'Home',\n icon: 'home'\n },\n {\n path: 'form/form-layouts',\n title: 'Live Tweet Search',\n icon: 'search'\n },\n {\n path: 'preprocess',\n title: 'Tweet Preprocessing',\n icon: 'edit'\n },\n {\n path: 'sentiment',\n title: 'Sentiment Analysis',\n icon: 'database'\n },\n {\n path: 'visuals',\n title: 'Visualisation',\n icon: 'bar-chart'\n },\n // {\n // path: 'charts',\n // title: 'Charts',\n // icon: 'bar-chart',\n // children: [\n // {\n // path: 'echarts',\n // title: 'Echarts'\n // }\n // ]\n // },\n];\n"
},
{
"alpha_fraction": 0.28820347785949707,
"alphanum_fraction": 0.30232325196266174,
"avg_line_length": 25.558509826660156,
"blob_id": "916a52fa413fca5081e2f27bfaa44f7656747539",
"content_id": "47d23be8a1e303d525c0c2c2f73278a44ac7e702",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 9986,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 376,
"path": "/ng-pi-admin/src/app/pages/visuals/visuals.service.ts",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "import { Injectable } from '@angular/core';\n\n@Injectable()\nexport class VisualsService {\n xAxisData = [];\n data1 = [];\n data2 = [];\n constructor() {\n for (var i = 0; i < 100; i++) {\n this.xAxisData.push('Type ' + i);\n this.data1.push((Math.sin(i / 5) * (i / 5 - 10) + i / 6) * 5);\n this.data2.push((Math.cos(i / 5) * (i / 5 - 10) + i / 6) * 5);\n }\n }\n\n BarOption;\n PieOption;\n LineOption;\n AnimationBarOption;\n ScatterOption;\n\n\n getGradientOption(x,y) {\n return {\n visualMap: [{\n show: false,\n type: 'continuous',\n seriesIndex: 0,\n min: 8,\n max: 4\n }],\n \n tooltip: {\n trigger: 'axis'\n },\n xAxis: [{\n name: 'Air Date',\n data: x,\n splitLine: {show: true}\n }, {\n name: 'Air Date',\n data: x,\n gridIndex: 1,\n splitLine: {show: true}\n }],\n yAxis: [{\n splitLine: {show: true}\n }, {\n splitLine: {show: true},\n gridIndex: 1\n }],\n grid: [{\n bottom: '15%'\n }, {\n top: '30%'\n }],\n series: [{\n type: 'line',\n showSymbol: false,\n data: y\n }]\n };\n }\n\n getScatterOption(scatterData) {\n var markLineOpt = {\n animation: false,\n label: {\n normal: {\n formatter: 'y =x',\n textStyle: {\n align: 'right'\n }\n }\n },\n lineStyle: {\n normal: {\n type: 'dashed'\n }\n },\n tooltip: {\n formatter: 'y = x'\n },\n data: [[{\n coord: [0, 0],\n symbol: 'none'\n }, {\n coord: [12000000, 12000000],\n symbol: 'none'\n }]]\n };\n\n this.ScatterOption = {\n type: 'value',\n xAxis: {\n name: 'Actual Viewership'\n },\n yAxis: {\n name: 'Predicted Viewership'\n\n },\n series: [{\n symbolSize: 6,\n data: scatterData,\n type: 'scatter',\n markLine: markLineOpt\n }],\n color: ['DeepSkyBlue']\n };\n\n return this.ScatterOption;\n }\n\n getBarOption(year,pos,neg) {\n this.BarOption ={\n tooltip: {\n trigger: 'axis',\n axisPointer: {\n type: 'cross',\n crossStyle: {\n color: '#999'\n }\n }\n },\n toolbox: {\n feature: {\n dataView: {show: true, readOnly: false},\n magicType: {show: true, type: ['line', 'bar']},\n restore: {show: true},\n saveAsImage: {show: true}\n }\n },\n legend: {\n data:['Positive','Negative']\n },\n xAxis: [\n {\n type: 'category',\n name: 'Year',\n data: year,\n axisPointer: {\n type: 'shadow'\n }\n }\n ],\n yAxis: [\n {\n type: 'value',\n name: 'Number of Tweets',\n min: 0,\n max: 160000,\n interval: 10000,\n axisLabel: {\n formatter: '{value}'\n }\n }\n ],\n series: [\n {\n name:'Negative',\n type:'bar',\n data:neg\n },\n {\n name:'Positive',\n type:'bar',\n data:pos\n }\n ],\n color: ['#DC143C','#32CD32']\n };\n return this.BarOption;\n }\n\n getLineOption(xLineData, yLineData) {\n this.LineOption = {\n xAxis: {\n type: 'category',\n data: xLineData,\n splitLine: {show: true},\n name: 'Year'\n },\n yAxis: {\n name: 'Number of Tweets',\n type: 'value',\n splitLine: {show: true}\n },\n series: [{\n data: yLineData,\n type: 'line',\n smooth: true\n }],\n color: ['green']\n };\n return this.LineOption;\n }\n\n getGradientOption2(xLineData, yLineData, yLineData2) {\n return {\n title: {\n \n },\n tooltip: {\n trigger: 'axis'\n },\n legend: {\n data:['Actual','Predicted']\n },\n grid: {\n left: '3%',\n right: '4%',\n bottom: '3%',\n containLabel: true\n },\n toolbox: {\n feature: {\n saveAsImage: {}\n }\n },\n xAxis: {\n type: 'category',\n boundaryGap: false,\n data: xLineData\n },\n yAxis: {\n type: 'value'\n },\n series: [\n {\n name:'Actual Viewership',\n type:'line',\n data:yLineData\n },\n {\n name:'Predicted Viewership',\n type:'line',\n data:yLineData2\n }\n ],\n color:['red','aqua']\n };\n \n\n // return {\n \n // visualMap: [{\n // show: false,\n // type: 'continuous',\n // seriesIndex: 0,\n // min: 0,\n // max: 15000000\n // },\n // {\n // show: false,\n // type: 'continuous',\n // seriesIndex: 0,\n // min: 0,\n // max: 15000000\n // }],\n \n // tooltip: {\n // trigger: 'axis'\n // },\n // legend: {\n // data: ['Example1', 'Example2']\n // },\n // xAxis: [{\n // name: 'Air Date',\n // data: xLineData,\n // splitLine: {show: true}\n // }, {\n // name: 'Air Date',\n // data: xLineData,\n // gridIndex: 1,\n // splitLine: {show: true}\n // }],\n // yAxis: [{\n // name:'Viewership',\n // splitLine: {show: true}\n // }, {\n // // name:'Viewership',\n // splitLine: {show: true},\n // gridIndex: 1\n // }],\n // grid: [{\n // bottom: '15%'\n // }, {\n // top: '30%'\n // }],\n // series: [\n // {\n // type: 'line',\n // showSymbol: false,\n // data: yLineData\n // },\n // {\n // type:'line',\n // showSymbol: false,\n // data: yLineData2\n // }\n // ],\n // color: ['#000080','aqua']\n // };\n }\n\n getPieOption() {\n this.PieOption = {\n tooltip: {\n trigger: 'item',\n formatter: '{a} <br/>{b}: {c} ({d}%)'\n },\n legend: {\n orient: 'vertical',\n x: 'left',\n data: ['Example1', 'Example2', 'Example3']\n },\n roseType: 'angle',\n series: [\n {\n name: 'PieChart',\n type: 'pie',\n radius: [0, '50%'],\n data: [\n { value: 235, name: 'Example1' },\n { value: 210, name: 'Example2' },\n { value: 162, name: 'Example3' }\n ]\n }\n ]\n }\n return this.PieOption;\n }\n\n getAnimationBarOption(xAxisData,data1,data2) {\n this.AnimationBarOption = {\n legend: {\n data: ['Actual Viewership', 'Predicted Viewership'],\n align: 'left'\n },\n tooltip: {},\n xAxis: {\n data: xAxisData,\n silent: false,\n splitLine: {\n show: true\n },\n name:'Air Date'\n },\n yAxis: {\n name: 'Viewership'\n },\n series: [{\n name: 'Actual',\n type: 'bar',\n data: data1,\n animationDelay: function (idx) {\n return idx * 10;\n }\n }, {\n name: 'Predicted',\n type: 'bar',\n data: data2,\n animationDelay: function (idx) {\n return idx * 10 + 100;\n }\n }],\n animationEasing: 'elasticOut',\n animationDelayUpdate: function (idx) {\n return idx * 5;\n },\n color: ['black','yellow']\n };\n\n return this.AnimationBarOption;\n }\n}\n"
},
{
"alpha_fraction": 0.5870367288589478,
"alphanum_fraction": 0.6060073971748352,
"avg_line_length": 34.587501525878906,
"blob_id": "9c68e718a1bd99e2ed38c72ddd13c476c0d1f2b6",
"content_id": "cd0a6fdee7ce90bb4ee451f2b76beada57b79f7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5693,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 160,
"path": "/Server.py",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, redirect, url_for\nfrom flask_cors import CORS, cross_origin\nfrom flask_restful import Resource, Api\nfrom json import dumps\nfrom flask_jsonpify import jsonify\nfrom flask import render_template\nimport LiveTweetSearch\nimport pandas as pd\nimport os,inspect\nimport Sentiment_Analysis\nimport Tweet_Preprocessing\nimport json\n\napp = Flask(__name__)\n\nAPP_ROOT = os.path.dirname(os.path.abspath(__file__))\n\napi = Api(app)\n\nCORS(app)\n\[email protected](\"/wordcloud\", methods = ['POST','GET'])\ndef wordcloud():\n if request.method == 'POST':\n show = request.form.get('show')\n year = request.form.get('year')\n\n path = os.path.realpath('/assets/images/tweet-' + year + '-wordcloud.png')\n return path\n\n else:\n return 0\n\[email protected](\"/show\", methods = ['POST','GET'])\ndef show():\n if request.method == 'GET':\n data = pd.read_csv('Tweet_data/tweet_data.csv', usecols=range(12), index_col=False, low_memory=False)\n return(pd.DataFrame.to_json(data, orient='index'))\n\[email protected](\"/preprocess\", methods = ['POST','GET'])\ndef preprocess():\n if request.method == 'GET':\n Tweet_Preprocessing.main('Tweet_data/tweet_data.csv')\n data = pd.read_csv('Preprocessed_data/tweet_data_preprocessed.csv', usecols=range(13), index_col=False, low_memory=False)\n return(pd.DataFrame.to_json(data, orient='index'))\n\[email protected](\"/scatter\", methods = ['POST','GET'])\ndef scatter():\n if request.method == 'GET':\n data = pd.read_csv('Prediction_data/predicted_file.csv', usecols=range(21), index_col=False, low_memory=False)\n return(pd.DataFrame.to_json(data, orient='index'))\n\[email protected](\"/line1\", methods = ['POST','GET'])\ndef line1():\n if request.method == 'GET':\n res=[]\n yearList=['2009','2010','2011','2012','2013','2014','2015','2016','2017']\n for x in yearList:\n with open('Tweet_Data/tweet_'+x+'.csv') as f:\n res.append(sum(1 for line in f))\n return(json.dumps(res))\n\[email protected](\"/line2\", methods = ['POST','GET'])\ndef line2():\n if request.method == 'GET':\n res=[]\n data = pd.read_csv('Prediction_data/simpsons_episodes.csv', usecols=range(19), index_col=False, low_memory=False)\n data.dropna(inplace = True)\n res = {\n \"ep\": data['Air_Date'].tolist(),\n \"imdb\": data['IMDB_Rating'].tolist()\n }\n return(json.dumps(res))\n\n\[email protected](\"/line3\", methods = ['POST','GET'])\ndef line3():\n if request.method == 'GET':\n res=[]\n data = pd.read_csv('Prediction_data/predicted_file.csv', usecols=range(21), index_col=False, low_memory=False)\n data.dropna(inplace = True)\n res = {\n \"ep\": data['Air_Date'].tolist(),\n \"views\": data['US_Viewers_In_Millions'].tolist(),\n \"predicted\": data['Predicted_Viewership'].tolist()\n }\n return(json.dumps(res))\n\[email protected](\"/bar\", methods = ['POST','GET'])\ndef bar():\n if request.method == 'GET':\n data = pd.read_csv('Prediction_data/predicted_file.csv', usecols=range(21), index_col=False, low_memory=False)\n return(pd.DataFrame.to_json(data, orient='index'))\n\n\[email protected](\"/bar2\", methods = ['POST','GET'])\ndef bar2():\n if request.method == 'GET':\n fileNames = ['2009','2010','2011','2012','2013','2014','2015','2016','2017']\n df = pd.DataFrame()\n pos=[]\n neg=[]\n for x in fileNames:\n data = pd.read_csv('Prediction_data/tweet_'+x+'_predict.csv', usecols=range(15), index_col=False, low_memory=False)\n temp = data['Sentiment_Score'].value_counts()\n # print(temp)\n pos.append(temp[4])\n neg.append(temp[0])\n df['Year'] = fileNames\n df['Pos'] = pos\n df['Neg'] = neg\n print(df)\n return(pd.DataFrame.to_json(df, orient='index'))\n\[email protected](\"/sentiment\", methods = ['POST','GET'])\ndef sentiment():\n if request.method == 'GET':\n Sentiment_Analysis.main('./Preprocessed_data/preprocessed_training_data.csv')\n data = pd.read_csv('Prediction_data/tweet_data_predict.csv', usecols=range(15), index_col=False, low_memory=False)\n return(pd.DataFrame.to_json(data, orient='index'))\n\[email protected](\"/search\", methods = ['POST','GET'])\ndef search():\n if request.method == 'POST':\n username = request.form.get('username')\n query = request.form.get('query')\n since = request.form.get('since')\n until = request.form.get('until')\n maxNo = request.form.get('maxNo')\n top = request.form.get('top')\n tweetSearchParameters=[]\n if username:\n tweetSearchParameters.append('--username')\n tweetSearchParameters.append(username)\n if query:\n tweetSearchParameters.append('--query')\n tweetSearchParameters.append(query)\n if since:\n tweetSearchParameters.append('--since')\n tweetSearchParameters.append(since)\n if until:\n tweetSearchParameters.append('--until')\n tweetSearchParameters.append(until)\n if maxNo:\n tweetSearchParameters.append('--maxtweets')\n tweetSearchParameters.append(maxNo)\n if top == True:\n tweetSearchParameters.append('--toptweets')\n\n LiveTweetSearch.main(tweetSearchParameters)\n\n data = pd.read_csv('Tweet_data/tweet_data.csv', usecols=range(12), index_col=False, low_memory=False)\n # return jsonify([{'tweetSearchParameters':data['Text'][1]}])\n return(pd.DataFrame.to_json(data, orient='index'))\n\n else:\n return 0\n\nif __name__ == '__main__':\n app.run(port=5003)"
},
{
"alpha_fraction": 0.6439560651779175,
"alphanum_fraction": 0.6571428775787354,
"avg_line_length": 35.193180084228516,
"blob_id": "55884e398cda41a8b5084edd22f8aac61aaed0ef",
"content_id": "460529701822446eaa4cc524f250c6b1b6c66e72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3185,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 88,
"path": "/Feature_Extraction.py",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport bisect, datetime\nimport Viewership_Prediction\n\n\ndef date_change(str_date):\n if str_date:\n return datetime.datetime.strptime(str_date, '%d-%m-%Y').strftime('%Y-%m-%d')\n\n\ndef viewers_change(str_views):\n if str_views == 'NaN':\n return '0'\n return str(int(float(str_views) * 1000000))\n\n\ndef main(prediction_file, simpsons_file):\n\n print('Prediction Features Extraction Started')\n viewer_data = pd.read_csv(simpsons_file, usecols=range(13), index_col=False, low_memory = False)\n print('Episode Data File Read Successful')\n\n tweet_data = pd.read_csv(prediction_file, usecols=range(15), index_col=False, low_memory = False)\n print('Tweet Data File Read Successful')\n\n viewer_data['Air_Date'] = list(map(date_change, viewer_data['Air_Date']))\n tweet_data['Date'] = list(map(date_change, tweet_data['Date']))\n print('Date Columns Altered')\n\n viewer_data['US_Viewers_In_Millions'] = list(map(viewers_change, viewer_data['US_Viewers_In_Millions']))\n print('Viewer Column Altered')\n\n first_date = bisect.bisect_left(viewer_data['Air_Date'], '2010-01-01')\n last_date = bisect.bisect_left(viewer_data['Air_Date'], '2017-01-01')\n\n retweets = list()\n favorites = list()\n vaderScore = list()\n sentimentScore = list()\n tweetsPerDay = list()\n uniqueUsers = list()\n\n retweets.append(0)\n favorites.append(0)\n vaderScore.append(0)\n sentimentScore.append(0)\n tweetsPerDay.append(0)\n uniqueUsers.append(0)\n\n count = 1\n for i in range(first_date, last_date - 1):\n print(viewer_data['Air_Date'][i])\n temp1 = str(viewer_data['Air_Date'][i])\n temp2 = str(viewer_data['Air_Date'][i + 1])\n\n start = bisect.bisect_left(tweet_data['Date'], temp1)\n end = bisect.bisect_left(tweet_data['Date'], temp2)\n uniqueSortedDates = sorted(set(tweet_data['Date'][start:end]))\n\n tweetsPerDayCount = (end - start) / len(uniqueSortedDates)\n uniqueUsersCount = len(set(tweet_data['Author_ID'][start:end]))\n for i in range(start, end):\n count = uniqueSortedDates.index(str(tweet_data['Date'][i])) + 1\n retweetCount = count * float(tweet_data['Retweets'][i])\n favoriteCount = count * float(tweet_data['Favorites'][i])\n vaderCount = count * float(tweet_data['Vader_Score'][i])\n sentimentCount = count * float(tweet_data['Sentiment_Score'][i])\n\n retweets.append(retweetCount)\n favorites.append(favoriteCount)\n vaderScore.append(vaderCount)\n sentimentScore.append(sentimentCount)\n tweetsPerDay.append(tweetsPerDayCount)\n uniqueUsers.append(uniqueUsersCount)\n\n viewer_data['Retweets'] = retweets\n viewer_data['Favorites'] = favorites\n viewer_data['Vader_Score'] = vaderScore\n viewer_data['Sentiment_Score'] = sentimentScore\n viewer_data['Tweets_Per_Day'] = tweetsPerDay\n viewer_data['Unique_Users'] = uniqueUsers\n viewer_data.to_csv(simpsons_file, index = False)\n\n Viewership_Prediction.main(simpsons_file)\n\n\nif __name__ == \"__main__\":\n main('./Prediction_data/tweet_predict.csv', './Prediction_data/simpsons_episodes.csv')\n"
},
{
"alpha_fraction": 0.7072387933731079,
"alphanum_fraction": 0.7142570614814758,
"avg_line_length": 39.217742919921875,
"blob_id": "f5a89579e6b8daee608458c4e134e4c45effa994",
"content_id": "8e31e635f551f24a49f5de9947a899a9c6cd8509",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4987,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 124,
"path": "/Sentiment_Analysis.py",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "import pickle\nimport numpy as np\nimport pandas as pd\nfrom sklearn.svm import SVC\nfrom scipy.sparse import hstack\nfrom nltk.sentiment import vader\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n\ndef train_classifier(features_train, features_test, label_train, label_test, classifier):\n if classifier == \"Logistic_Regression\":\n model = LogisticRegression(C=1.)\n elif classifier == \"Naive_Bayes\":\n model = MultinomialNB()\n elif classifier == \"SVM\":\n model = SVC()\n elif classifier == \"Random_Forest\":\n model = RandomForestClassifier(n_estimators=400, random_state=11)\n else:\n print(\"Incorrect Selection Of Classifier\")\n\n model.fit(features_train, label_train)\n print(\"Model Fitting Done\")\n\n fileName = './Sentiment_models/' + classifier + '.pickle'\n with open(fileName, 'wb') as file:\n pickle.dump(model, file)\n print(\"Pickle File Created %s\" % fileName)\n\n accuracy = model.score(features_test, label_test)\n print(\"Accuracy Is:\", accuracy)\n\n # Make prediction on the test data\n probability_to_be_positive = model.predict_proba(features_test)[:,1]\n\n # Check AUC(Area Under the Roc Curve) to see how well the score discriminates between negative and positive\n print(\"AUC (Train Data):\", roc_auc_score(label_test, probability_to_be_positive))\n\n # Print top 10 scores as a sanity check\n print(\"Top 10 Scores: \", probability_to_be_positive[:10])\n\n return model\n\n\ndef calculate_vader(tweet):\n\n if type(float) == type(tweet):\n return 0\n sia = vader.SentimentIntensityAnalyzer()\n return sia.polarity_scores(tweet)['compound']\n\n\ndef main(fileName):\n\n print('Sentiment Analysis Model Training Started')\n inputFileName = './Preprocessed_data/tweet_data_preprocessed.csv'\n outputFileName = './Prediction_data/tweet_data_predict.csv'\n algorithm = 'Logistic_Regression'\n\n train_dataset = pd.read_csv(fileName, usecols = range(7), encoding = 'Latin-1', index_col = False, low_memory = False)\n train_dataset.Tidy_Tweet = train_dataset.Tidy_Tweet.fillna(value=\"\")\n print('Preprocessed Sentiment Training File read')\n\n x = np.array(train_dataset.Tidy_Tweet)\n y = np.array(train_dataset.sentiment)\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 42)\n data_train = x_train\n label_train = y_train\n data_test = x_test\n label_test = y_test\n print('Data Sliced In Training And Testing Sets')\n\n tfv = TfidfVectorizer(sublinear_tf = True , stop_words = \"english\")\n Tfidf_features_train = tfv.fit_transform(data_train)\n Tfidf_features_test = tfv.transform(data_test)\n print(\"TF-IDF Features Extracted\")\n\n bow_vectorizer = CountVectorizer(max_df = 0.90, min_df = 2, max_features = 1000, stop_words = 'english')\n bow_features_train = bow_vectorizer.fit_transform(data_train)\n bow_features_test = bow_vectorizer.transform(data_test)\n print(\"BOW Features Extracted\")\n\n features_final_train = hstack((Tfidf_features_train, bow_features_train))\n features_final_test = hstack((Tfidf_features_test, bow_features_test))\n print(\"Training And Testing Sparse Matrix Created\")\n\n # print(\"Model Training Started\")\n # model = train_classifier(features_final_train, features_final_test, label_train, label_test, algorithm)\n # print(\"Model Training Complete\")\n\n fileName = './Sentiment_models/' + algorithm + '.pickle'\n pickle_in = open(fileName, 'rb')\n model = pickle.load(pickle_in)\n print(\"%s Model Loaded\" % algorithm)\n\n prediction_dataset = pd.read_csv(inputFileName, usecols = range(13), encoding = 'Latin-1', index_col = False, low_memory = False)\n prediction_dataset.Tidy_Tweet = prediction_dataset.Tidy_Tweet.fillna(value = \"\")\n x_prediction = np.array(prediction_dataset.Tidy_Tweet)\n print(\"Input Tweet File Read\")\n\n features_x_prediction1 = tfv.transform(x_prediction)\n features_x_prediction2 = bow_vectorizer.transform(x_prediction)\n features_x_prediction = hstack((features_x_prediction1, features_x_prediction2))\n print(\"Sparse Matrix Merged\")\n\n prediction_dataset['Sentiment_Score'] = model.predict(features_x_prediction)\n print(\"Sentimental Analysis Using %s Completed\" % algorithm)\n\n prediction_dataset['Vader_Score'] = prediction_dataset['Tidy_Tweet'].apply(calculate_vader)\n print(\"Sentimental Analysis Using Vader Completed\")\n\n prediction_dataset.to_csv(outputFileName, index = False)\n print(\"Sentimental Analysis Of Tweets Complete. Output File Generated %s\" % outputFileName)\n return outputFileName\n\n\nif __name__ == '__main__':\n main('./Preprocessed_data/preprocessed_training_data.csv')\n"
},
{
"alpha_fraction": 0.703071653842926,
"alphanum_fraction": 0.703071653842926,
"avg_line_length": 30.39285659790039,
"blob_id": "296117cbc2c34cf8714cd63fe88ad6d8b53e2718",
"content_id": "61b002c132dee225e41c3c52e5392d992def8677",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 879,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 28,
"path": "/ng-pi-admin/src/app/app.module.ts",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "import { BrowserModule } from '@angular/platform-browser';\nimport { FormsModule } from '@angular/forms';\nimport { NgModule } from '@angular/core';\nimport { PagesModule } from './pages/pages.module';\nimport { routing } from './app.routing';\nimport { AppComponent } from './app.component';\nimport { BrowserAnimationsModule } from '@angular/platform-browser/animations';\n// import { PreprocessModule } from './pages/preprocess/preprocess.module';\n// import { VisualsModule } from './pages/visuals/visuals.module';\n// import { SentimentModule } from './pages/sentiment/sentiment.module';\n\n@NgModule({\n imports: [\n BrowserModule,\n BrowserAnimationsModule,\n FormsModule,\n // PreprocessModule,\n // VisualsModule,\n PagesModule,\n routing,\n // SentimentModule\n ],\n declarations: [\n AppComponent,\n ],\n bootstrap: [AppComponent]\n})\nexport class AppModule { }\n"
},
{
"alpha_fraction": 0.6706055998802185,
"alphanum_fraction": 0.6706055998802185,
"avg_line_length": 28.434782028198242,
"blob_id": "ea00e661f4ac2d8daf943347262906030c5b4983",
"content_id": "aca382c73658dea5a6e0ec7147fc0a23fb2a8bba",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 677,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 23,
"path": "/ng-pi-admin/src/app/pages/preprocess/preprocess.module.ts",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "import { NgModule } from '@angular/core';\nimport { CommonModule } from '@angular/common';\nimport { routing } from './preprocess.routing';\nimport { SharedModule } from '../../shared/shared.module';\nimport { PreprocessComponent } from './preprocess.component';\nimport { HttpClientModule } from \"@angular/common/http\";\nimport { FormsModule } from '@angular/forms'\nimport { NgxPaginationModule } from 'ngx-pagination';\n\n@NgModule({\n imports: [\n NgxPaginationModule,\n CommonModule,\n SharedModule,\n routing,\n HttpClientModule,\n FormsModule\n ],\n declarations: [\n PreprocessComponent\n ]\n})\nexport class PreprocessModule { }\n"
},
{
"alpha_fraction": 0.5962980389595032,
"alphanum_fraction": 0.6239456534385681,
"avg_line_length": 28.034013748168945,
"blob_id": "26d14021b62dde4372eb66dc6c331d1c9c4720f1",
"content_id": "a061fec21ea1ce250a02f7f992833c764a9200db",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 4268,
"license_type": "permissive",
"max_line_length": 154,
"num_lines": 147,
"path": "/ng-pi-admin/src/app/pages/visuals/visuals.component.ts",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "import { Component, OnInit } from '@angular/core';\nimport { HttpClient } from '@angular/common/http';\nimport { HttpHeaders } from '@angular/common/http';\nimport { VisualsService } from './visuals.service';\n\n@Component({\n selector: 'app-visual',\n templateUrl: './visuals.component.html',\n styleUrls: ['./visuals.component.scss'],\n providers: [VisualsService]\n})\nexport class VisualsComponent implements OnInit {\n showloading: boolean = false;\n BarOption;\n LineOption;\n GradientOption;\n PieOption;\n ScatterOption;\n AnimationBarOption;\n serverData: JSON;\n arrayOfKeys;\n plot=0;\n argument:number[][];\n yearList=[2009,2010,2011,2012,2013,2014,2015,2016,2017];\n\n // constructor() { }\n // constructor(private httpClient: HttpClient) {}\n\n constructor(private chartsService: VisualsService, private httpClient: HttpClient) {\n }\n \n ngOnInit() {\n }\n\n scatterPredVsActual(){\n this.httpClient.get(\"http://127.0.0.1:5003/scatter\").subscribe((data) => {\n this.serverData = data as JSON;\n console.log(this.serverData);\n this.arrayOfKeys = Object.keys(this.serverData)\n this.argument = [];\n this.arrayOfKeys.forEach(element => {\n this.argument.push([this.serverData[element].US_Viewers_In_Millions, this.serverData[element].Predicted_Viewership]);\n });\n console.log(this.argument);\n this.ScatterOption = this.chartsService.getScatterOption(this.argument);\n this.plot = 1;\n })\n }\n\n lineTweetsPerYear(){\n this.httpClient.get(\"http://127.0.0.1:5003/line1\").subscribe((data) => {\n console.log(data);\n this.LineOption = this.chartsService.getLineOption(\n this.yearList,\n data\n )\n });\n this.plot=2;\n }\n\n lineImdb(){\n this.httpClient.get(\"http://127.0.0.1:5003/line2\").subscribe((data) => {\n console.log(data);\n this.GradientOption = this.chartsService.getGradientOption(\n data['ep'],\n data['imdb']\n )\n });\n this.plot=3;\n }\n\n lineViews(){\n this.httpClient.get(\"http://127.0.0.1:5003/line3\").subscribe((data) => {\n console.log(data);\n this.GradientOption = this.chartsService.getGradientOption2(\n data['ep'],\n data['views'],\n data['predicted']\n )\n });\n this.plot=5;\n }\n\n barPosNeg(){\n this.httpClient.get(\"http://127.0.0.1:5003/bar2\").subscribe((data) => {\n // console.log(data);\n console.log(data)\n var pos=[], neg=[],year=[];\n this.arrayOfKeys = Object.keys(data);\n this.arrayOfKeys.forEach(element => {\n year[element]=data[element].Year;\n pos[element]=data[element].Pos;\n neg[element]=data[element].Neg;\n });\n this.BarOption = this.chartsService.getBarOption(\n year,\n pos,\n neg\n );\n\n });\n \n this.plot=6;\n }\n\n barActualVsPredicted(){\n this.httpClient.get(\"http://127.0.0.1:5003/bar\").subscribe((data) => {\n this.serverData = data as JSON;\n console.log(this.serverData);\n this.arrayOfKeys = Object.keys(this.serverData)\n this.argument = [];\n // this.arrayOfKeys.forEach(element => {\n // this.argument.push([this.serverData[element].US_Viewers_In_Millions, this.serverData[element].Predicted_Viewership], this.serverData[element]);\n // });\n var x=[], y=[], z=[];\n this.arrayOfKeys.forEach(element => {\n x.push(data[element].Air_Date)\n y.push(data[element].US_Viewers_In_Millions);\n z.push(data[element].Predicted_Viewership);\n });\n console.log(x,y,z);\n this.AnimationBarOption = this.chartsService.getAnimationBarOption(x,y,z);\n this.plot = 4;\n });\n }\n\n wordcloud(data){\n console.log(data);\n\n const httpOptions = {\n headers: new HttpHeaders({\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Authorization': 'my-auth-token'\n })\n };\n var body = \"show=\" + data.show + \"&year=\" + data.year;\n\n this.httpClient.post(\"http://127.0.0.1:5003/search\", body, httpOptions).subscribe((data) => {\n this.serverData = data as JSON;\n // alert(JSON.stringify(this.serverData))\n console.log(this.serverData)\n // alert(typeof this.serverData)\n this.arrayOfKeys = Object.keys(this.serverData)\n // alert(this.arrayOfKeys)\n })\n }\n}\n"
},
{
"alpha_fraction": 0.6584992408752441,
"alphanum_fraction": 0.6584992408752441,
"avg_line_length": 27.39130401611328,
"blob_id": "384005967fa2200034fdcf498bf5ee181b4ec95a",
"content_id": "13960d1496d4626313e4420dde1241c2ebf11742",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "TypeScript",
"length_bytes": 653,
"license_type": "permissive",
"max_line_length": 58,
"num_lines": 23,
"path": "/ng-pi-admin/src/app/pages/visuals/visuals.module.ts",
"repo_name": "ssshreyy/TV-Viewership-Prediction",
"src_encoding": "UTF-8",
"text": "import { NgModule } from '@angular/core';\nimport { CommonModule } from '@angular/common';\nimport { routing } from './visuals.routing';\nimport { SharedModule } from '../../shared/shared.module';\nimport { VisualsComponent } from './visuals.component';\nimport { HttpClientModule } from \"@angular/common/http\";\nimport { FormsModule } from '@angular/forms'\nimport { NgxEchartsModule } from 'ngx-echarts';\n\n@NgModule({\n imports: [\n NgxEchartsModule,\n CommonModule,\n SharedModule,\n routing,\n HttpClientModule,\n FormsModule\n ],\n declarations: [\n VisualsComponent\n ]\n})\nexport class VisualsModule { }\n"
}
] | 21 |
vdeleon/clickTwitch | https://github.com/vdeleon/clickTwitch | 1b3df12169693a50db16363f3d566457ccdde459 | 78e5b1bdb82dccec410bcd17ca2c9b5c517877de | 45a16195e9b2ce502d5cf2de4544cb8b2b25ac71 | refs/heads/master | 2023-01-19T01:17:56.869550 | 2020-11-26T16:13:06 | 2020-11-26T16:13:06 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.65500408411026,
"alphanum_fraction": 0.6663954257965088,
"avg_line_length": 21.77777862548828,
"blob_id": "808156cc20e601f62621c258459462130284868d",
"content_id": "81a07d0d8149091b419a385d37448aa420095997",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1229,
"license_type": "permissive",
"max_line_length": 144,
"num_lines": 54,
"path": "/clickTwitch/__init__.py",
"repo_name": "vdeleon/clickTwitch",
"src_encoding": "UTF-8",
"text": "'''\nclickTwitch is an automation tool that claims your twitch reward while watching a stream.\n\nAPI:\n========\n\n`detectPosition()`\n\n`clickPosition()`\n\n`randomMovement()`\n\n`isLetterY()`\n\n'''\n\nimport mouse\n\ndef detectPosition():\n '''\n Waits until client presses middle button, then parses the mouse position to variables used by clickPosition().\n '''\n print('Position your mouse where the claiming button is. When you are ready, click the middle button on your mouse to confirm the location')\n mouse.wait(button='middle')\n pos = mouse.get_position()\n\n detectPosition.x = pos[0]\n detectPosition.y = pos[1]\n\ndef clickPosition(x, y):\n '''\n Moves to passed coordenates and clicks.\n '''\n mouse.move(x, y, absolute=True, duration=0.1) \n mouse.click(button='left')\n\ndef randomMovement():\n '''\n Moves to random position.\n '''\n import random\n\n xRandomPosition = random.randrange(-50, 50)\n yRandomPosition = random.randrange(-50, 50)\n mouse.move(xRandomPosition, yRandomPosition, absolute=False, duration=0.1)\n\ndef isLetterY(letter):\n '''\n Returns true is parameter is a string equal to 'y'.\n '''\n letter.lower()\n if letter == 'y':\n return True\n return False"
},
{
"alpha_fraction": 0.6707317233085632,
"alphanum_fraction": 0.6804878115653992,
"avg_line_length": 27.275861740112305,
"blob_id": "05b2ccb0ea4fd86acd6a190f0ca03bdceeb6e7b7",
"content_id": "0275fd24fe5310aaef133bb46e4d8a59c5e4a70e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 820,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 29,
"path": "/main.py",
"repo_name": "vdeleon/clickTwitch",
"src_encoding": "UTF-8",
"text": "import subprocess\nimport clickTwitch\nfrom time import sleep\n\n# Set up.\ntry:\n clickInterval = int(input('Set time in seconds the mouse will click (default = 30): '))\n if clickInterval <= 1:\n raise ValueError\nexcept ValueError:\n clickInterval = 30\nfinally:\n subprocess.run('cls', shell=True)\n \nwantsRandom = str(input('Do you want to make a random movement every second? Y/n: '))\nsubprocess.run('cls', shell=True)\nclickTwitch.detectPosition()\n\n# Loops until window is closed.\nwhile True:\n subprocess.run('cls', shell=True)\n i = 0\n clickTwitch.clickPosition(clickTwitch.detectPosition.x, clickTwitch.detectPosition.y)\n while i < clickInterval:\n sleep(1)\n i += 1\n print('%is' % i)\n if clickTwitch.isLetterY(wantsRandom):\n clickTwitch.randomMovement()\n"
},
{
"alpha_fraction": 0.7254464030265808,
"alphanum_fraction": 0.7522321343421936,
"avg_line_length": 28.866666793823242,
"blob_id": "9b69e7af1aae3f2855765e8bd606c793cbacbf20",
"content_id": "a55c0a5d7e6269299633b3ef6777484ccf488fe0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 448,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 15,
"path": "/README.md",
"repo_name": "vdeleon/clickTwitch",
"src_encoding": "UTF-8",
"text": "# clickTwitch\nMade by [samoht9277](https://github.com/samoht9277)\n\n\n\n\n### What is it?\nclickTwitch is an automation tool that claims your twitch reward while watching a stream.\n\n### How do i use it?\nRun `pip install -i https://test.pypi.org/simple/ clickTwitch`\n\nDownload main.py and run `python main.py`\n\nOr `import clickTwitch` to have all of the code at your disposal.\n"
},
{
"alpha_fraction": 0.7419354915618896,
"alphanum_fraction": 0.7419354915618896,
"avg_line_length": 19.66666603088379,
"blob_id": "de80cef93fe27f0a4680ad4822f7db2f65388486",
"content_id": "a68801993efa0dc89d39b2692ea14ca546d8c0f4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 62,
"license_type": "permissive",
"max_line_length": 29,
"num_lines": 3,
"path": "/TODO.md",
"repo_name": "vdeleon/clickTwitch",
"src_encoding": "UTF-8",
"text": "# TODO\n- clear terminal based on os.\n- learn github workflows\n"
}
] | 4 |
Darwinian2/vnc_virtual_display_linker | https://github.com/Darwinian2/vnc_virtual_display_linker | bc806706c693007376170d6fbd1a3dda0b919cff | 816feff2fd0415cfe4064ce5b9ff09cb6a83dd24 | ca0d10172d25d79568983ed0ac0c0869cd5bb72f | refs/heads/master | 2021-01-20T01:17:50.672038 | 2017-04-28T01:42:28 | 2017-04-28T01:42:28 | 101,284,082 | 1 | 0 | null | 2017-08-24T10:39:47 | 2017-08-24T10:39:35 | 2017-04-28T01:42:28 | null | [
{
"alpha_fraction": 0.5955148339271545,
"alphanum_fraction": 0.6046766042709351,
"avg_line_length": 35.564998626708984,
"blob_id": "aff1ca464e40154a34b628f6a5194d905eb286d1",
"content_id": "17e51ba4828c20d372bbd869edc1dff1e6d10d1c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7315,
"license_type": "no_license",
"max_line_length": 146,
"num_lines": 200,
"path": "/vnc_virtual_display_linker.py",
"repo_name": "Darwinian2/vnc_virtual_display_linker",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#title :vnc_virtual_display_linker.py\n#description :This program creates a second screen and start a VNC sever offering it\n#author :\n#date :\n#version :0.1\n#usage :vnc_virtual_display_linker.py\n#notes :\n#python_version :2.7.6\n#=======================================================================\n\n## README !!!\n## This script will let you connect an external device\n## to your X11 server as a second monitor thorugh VNC\n\n## i.e. use your tablet to extend your desktop\n## I have it working using Ubuntu 16.04\n\n## INSTALLATION:\n## $ pip install dotmap\n## $ sudo apt install x11vnc\n## then create a password!\n## $ x11vnc -storepasswd\n\n## If your tablet/phone is supported, you can connect to your\n## device via an USB cable and the ADB platform\n## but you need to install the proper tools first:\n## $ sudo apt install adb android-tools-adb android-tools-fastboot\n\n## Remember to turn on USB debugging on your device\n## A good VNC client I found is bVNC Free\n\n# =======================\n# CONFIGURATION\n# =======================\nPC_MONITOR_WIDTH = 1920\nPC_MONITOR_LENGTH = 1080\nVIRTUAL_MONITOR_WIDTH = 1280\nVIRTUAL_MONITOR_LENGTH = 800\n\n\n# =======================\n\n# Import the modules needed to run the script.\nimport sys, os, subprocess, re\nfrom dotmap import DotMap\n\n# Main definition - constants\nmenu_actions = {}\n\n# =======================\n# CLASSES\n# =======================\nclass ScreenManager:\n def __init__(self):\n self.is_landscape = True\n\n self.conf = DotMap()\n self.conf.pc_monitor.width = PC_MONITOR_WIDTH\n self.conf.pc_monitor.length = PC_MONITOR_LENGTH\n self.conf.virtual_monitor.width = VIRTUAL_MONITOR_WIDTH\n self.conf.virtual_monitor.length = VIRTUAL_MONITOR_LENGTH\n self.conf[self.get_orientation].is_monitor_created = False\n\n self.new_monitor()\n\n def new_monitor(self):\n orientation = self.get_orientation()\n conf = self.conf\n\n if orientation == 'landscape':\n self.set_xrandr_mode_and_x11vnc_clip(conf.virtual_monitor.width, conf.virtual_monitor.length)\n else:\n self.set_xrandr_mode_and_x11vnc_clip(conf.virtual_monitor.length, conf.virtual_monitor.width)\n\n conf[orientation].xrandr_mode.alias = self.get_xrandr_mode_alias(conf[orientation].xrandr_mode.data)\n\n os.system(\"xrandr --newmode \" + conf[orientation].xrandr_mode.data + \" -hsync +vsync\")\n os.system(\"xrandr --addmode VIRTUAL1 \" + conf[orientation].xrandr_mode.alias)\n os.system(\"xrandr --output VIRTUAL1 --mode \" + conf[orientation].xrandr_mode.alias)\n os.system('xrandr')\n\n self.conf[self.get_orientation].is_monitor_created = True\n\n def delete_monitor(self):\n orientation = self.get_orientation()\n conf = self.conf\n\n os.system(\"xrandr --output VIRTUAL1 --off\")\n os.system(\"xrandr --delmode VIRTUAL1 \" + conf[orientation].xrandr_mode.alias)\n os.system('xrandr')\n self.conf[self.get_orientation].is_monitor_created = False\n\n def start_vnc(self):\n os.system(\"x11vnc -nocursorshape -nocursorpos -noxinerama -solid -repeat -forever -clip \" + self.conf[self.get_orientation()].x11vnc_clip)\n\n def toggle_orientation(self):\n self.delete_monitor()\n self.is_landscape = False if self.is_landscape else True\n self.new_monitor()\n\n def get_orientation(self):\n return \"landscape\" if self.is_landscape else \"portrait\"\n\n def configure_resolution(self):\n os.system('clear')\n self.conf.pc_monitor.width = self.configure_resolution_helper('Your monitor resolution width', self.conf.pc_monitor.width)\n self.conf.pc_monitor.length = self.configure_resolution_helper('Your monitor resolution length', self.conf.pc_monitor.length)\n self.conf.virtual_monitor.width = self.configure_resolution_helper('Virtual monitor resolution width', self.conf.virtual_monitor.width)\n self.conf.virtual_monitor.length = self.configure_resolution_helper('Virtual monitor resolution length', self.conf.virtual_monitor.length)\n\n def adb_port_forwarding(self):\n os.system('adb reverse tcp:5900 tcp:5900')\n\n # PRIVATE\n def get_xrandr_mode_data(self, width, length):\n for line in subprocess.check_output(\"gtf {0} {1} 60\".format(width, length), shell=True).split(\"\\n\"):\n if \"Mode\" in line:\n return re.sub(r'Modeline (.*)-HSync.*', r'\\1', line).strip()\n\n def get_xrandr_mode_alias(self, mode_data):\n return re.sub(r'.*(\".*\").*', r'\\1', mode_data)\n\n def get_clip_param(self, width, length, pc_monitor_width):\n return \"{0}x{1}+{2}+0\".format(width, length, pc_monitor_width)\n\n def set_xrandr_mode_and_x11vnc_clip(self, width, length):\n self.conf[self.get_orientation()].xrandr_mode.data = self.get_xrandr_mode_data( width, length)\n self.conf[self.get_orientation()].x11vnc_clip = self.get_clip_param( width, length, self.conf.pc_monitor.width)\n\n def configure_resolution_helper(self, text, var):\n print text + \" [\" + str(var) + ']:'\n choice = raw_input(\" >> \").strip()\n return choice if choice != '' else var\n\n# =======================\n# MENUS FUNCTIONS\n# =======================\n\n# Main menu\ndef main_menu():\n screen_manager = ScreenManager()\n while True:\n os.system('clear')\n\n print \"WELCOME TO THE AUTOMAGICALLY 2ND DISPLAY LINKER\"\n print \"Current status:\"\n print \"\\tResolutions:\"\n print \"\\tYour monitor:\\t\" + str(screen_manager.conf.pc_monitor.width) + 'x' + str(screen_manager.conf.pc_monitor.length)\n print \"\\tVirtual:\\t\" + str(screen_manager.conf.virtual_monitor.width) + 'x' + str(screen_manager.conf.virtual_monitor.length)\n print\n print \"\\tOrientation: \" + screen_manager.get_orientation()\n print \"\\tCreated 2nd monitor: \" + str(screen_manager.conf[screen_manager.get_orientation].is_monitor_created)\n print\n print \"Please choose an action:\"\n print \"N. New monitor\"\n print \"D. Delete monitor\"\n print \"T. Toggle landscape / portrait\"\n print \"S. Start VNC\"\n print \"C. Configure the resolution of the monitors\"\n print \"A. Activate ADB port forwarding\"\n print \"Q. Quit\"\n\n while True:\n choice = raw_input(\" >> \").lower()\n if choice == 'q':\n sys.exit()\n else:\n try:\n func = getattr(screen_manager, menu_actions[choice])\n func()\n print \"\\n\\n\\nPress ENTER to continue\"\n raw_input()\n break\n except KeyError:\n print \"Invalid selection, please try again.\\n\"\n# =======================\n# MENUS DEFINITIONS\n# =======================\n\n# Menu definition\nmenu_actions = {\n 'main_menu': main_menu,\n 's': 'start_vnc',\n 't': 'toggle_orientation',\n 'n': 'new_monitor',\n 'd': 'delete_monitor',\n 'c': 'configure_resolution',\n 'a': 'adb_port_forwarding',\n}\n\n# =======================\n# MAIN PROGRAM\n# =======================\n\n# Main Program\nif __name__ == \"__main__\":\n # Launch main menu\n main_menu()\n"
},
{
"alpha_fraction": 0.7636942863464355,
"alphanum_fraction": 0.7700636982917786,
"avg_line_length": 39.25640869140625,
"blob_id": "db215e51df7f744271404651869066271cda67fb",
"content_id": "9d3dac4ea7026f413673cfe0c6bdd0cd919ac3d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1570,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 39,
"path": "/README.md",
"repo_name": "Darwinian2/vnc_virtual_display_linker",
"src_encoding": "UTF-8",
"text": "# vnc_virtual_display_linker\n### A Python script to create a second virtual monitor for connecting with VNC\n\nThis script will let you connect an external device to your X11 server as a second monitor through VNC \ni.e. use your tablet to extend your desktop\n\nI have it working using Ubuntu 16.04 and an Android tablet\n\n## INSTALLATION:\n`pip install dotmap` \n`sudo apt install x11vnc`\n\nthen create a password! \n`x11vnc -storepasswd`\n\n## USAGE\n- place the script anywhere\n- you might have to grant exec permissions: `chmod +x vnc_virtual_display_linker.py`\n- launch the script `./vnc_virtual_display_linker.py`\n- press `s` to start the VNC server with the default configuration\n- `ctrl-c` to stop the server\n- follow the instructions on the screen for more functionalities\n\nOnce the server has started, on your device:\n- launch a VNC client like bVNC Free: https://play.google.com/store/apps/details?id=com.iiordanov.freebVNC&hl=it\n- configure the ip address of the server and the password you used while installing x11vnc\n- connect and enjoy your second screen!\n\n## ADB SUPPORT\nYou should be able to connect most Android tablets/phones with an USB cable to the VNC server thanks to the ADB platform.\n\nFirst, you need to install the proper tools: \n`sudo apt install adb android-tools-adb android-tools-fastboot`\n\nThen you have to:\n- connect your device (i.e. the tablet) to the PC with an USB cable \n- turn on USB debugging on your device\n- activate the ADB support in the vnc_virtual_display_linker menu\n- connect with your device to `localhost` as server address\n"
}
] | 2 |
amaan2398/EcommerceWebsiteDjango | https://github.com/amaan2398/EcommerceWebsiteDjango | b97a8d4d27a780f166ffd207c1824e17bc8ca6bc | 5173d13d60c1276c957161ce0ea37b8de5acddf4 | 08faaa986d9f823cf40d1fb125c306bd204a1132 | refs/heads/master | 2022-12-07T13:07:50.167220 | 2020-08-26T17:07:05 | 2020-08-26T17:07:05 | 285,893,547 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7703225612640381,
"alphanum_fraction": 0.7729032039642334,
"avg_line_length": 47.4375,
"blob_id": "84a04e600777c33e3f39b974b97e2fa09c118c3d",
"content_id": "0321f573cc16b25d9b5af17bdcd4865461ea7364",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 775,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 16,
"path": "/src/cart/models.py",
"repo_name": "amaan2398/EcommerceWebsiteDjango",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom datetime import datetime\nfrom django.utils import timezone\n# Create your models here.\nclass Cart(models.Model):\n customer_id = models.IntegerField(blank=False,null=False)\n product_id = models.IntegerField(blank=False,null=False)\n address_id = models.IntegerField(blank=False,null=False,default=0)\n bill_id = models.IntegerField(blank=False,null=False,default=0)\n product_quantity = models.IntegerField(blank=False,null=False)\n shipment = models.BooleanField(blank=False,null=False,default=False)\n\nclass Shipment(models.Model):\n date_time = models.DateTimeField(default=datetime.now, blank=True)\n customer_id = models.IntegerField(blank=False,null=False)\n total_amount = models.IntegerField(blank=False,null=False)\n"
},
{
"alpha_fraction": 0.6868932247161865,
"alphanum_fraction": 0.6868932247161865,
"avg_line_length": 33.41666793823242,
"blob_id": "ffe30fab13d819001a40b197983a7c59ef03e404",
"content_id": "c26f700940b667528441ab577bf4f122d1b4c299",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 412,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 12,
"path": "/src/account/urls.py",
"repo_name": "amaan2398/EcommerceWebsiteDjango",
"src_encoding": "UTF-8",
"text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"register/\",views.register,name=\"register\"),\n path(\"login/\",views.login,name=\"login\"),\n path(\"logout/\",views.logout,name=\"logout\"),\n path(\"profile/\",views.profile,name=\"profile\"),\n path(\"edit_address/\",views.edit_address,name=\"edit_address\"),\n path(\"remove_address/<int:id>\",views.remove_address,name=\"remove_address\"),\n]"
},
{
"alpha_fraction": 0.6112726330757141,
"alphanum_fraction": 0.6166020631790161,
"avg_line_length": 41.417808532714844,
"blob_id": "86ccc632c8fcbdd8d0f977f0d181cfb8c9bc15b8",
"content_id": "0bc26305aff65498854743d2c86689d5b9a5dfd4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6192,
"license_type": "permissive",
"max_line_length": 171,
"num_lines": 146,
"path": "/src/cart/views.py",
"repo_name": "amaan2398/EcommerceWebsiteDjango",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render,redirect\nfrom django.http import HttpResponseNotFound\nfrom .models import Cart,Shipment\nfrom product.models import Product\nfrom account.models import Address\nfrom django.contrib.auth.models import User,auth\n\ndef cart_data_add(cid):\n data = Cart.objects.filter(customer_id=cid,shipment=False)\n data = dict({\"count\":len(data)})\n return data\n\n# Create your views here.\ndef cart(request):\n if request.user.id != None:\n a = ['','active','']\n cdata = Cart.objects.filter(customer_id=request.user.id,shipment=False)\n data = []\n tamount = 0\n for i in cdata:\n t_d = Product.objects.filter(id=i.product_id)\n data.append(dict({\"p_id\":i.product_id,\"p_name\":t_d[0].name,\"price\":t_d[0].price,\"quantity\":i.product_quantity,\"amount_t\":(t_d[0].price * i.product_quantity)}))\n tamount += t_d[0].price * i.product_quantity\n fdata = dict({\"total\":tamount})\n cid = request.user.id\n cdata = dict({\"count\":len(data)})\n address = Address.objects.filter(customer_id=cid)\n return render(request,\"product/cart.html\",{\"a\":a,\"data\":data,\"fdata\":fdata,\"address\":address,\"cdata\":cdata})\n else:\n return redirect('login')\n\ndef addtocart(request,id):\n cid = request.user.id\n if Cart.objects.filter(customer_id= cid,product_id=id,shipment=False).exists():\n a = Cart.objects.get(customer_id= cid,product_id=id,shipment=False)\n a.product_quantity += 1\n a.save()\n del a\n else:\n a = Cart(customer_id=cid,product_id=id,product_quantity=1,shipment=False)\n a.save()\n del a\n return redirect(\"/\")\n\ndef addrm_pro_qut(request,id,v):\n cid = request.user.id\n if v == 1:\n if Cart.objects.filter(customer_id= cid,product_id=id,shipment=False).exists():\n a = Cart.objects.get(customer_id= cid,product_id=id,shipment=False)\n a.product_quantity += 1\n a.save()\n del a\n else:\n a = Cart(customer_id=cid,product_id=id,product_quantity=1,shipment=False)\n a.save()\n del a\n elif v == 0:\n if Cart.objects.filter(customer_id= cid,product_id=id).exists():\n a = Cart.objects.get(customer_id= cid,product_id=id,shipment=False)\n a.product_quantity -= 1\n a.save()\n del a\n if Cart.objects.filter(customer_id= cid,product_id=id)[0].product_quantity == 0:\n a = Cart.objects.filter(customer_id= cid,product_id=id,shipment=False)\n a.delete()\n return redirect(\"cart\")\n\ndef pro_remove(request, id):\n cid = request.user.id\n a = Cart.objects.filter(customer_id= cid,product_id=id,shipment=False)\n a.delete()\n return redirect(\"cart\")\n\ndef checkout_products(request):\n if request.method == \"POST\":\n add_id = request.POST['address']\n Cart.objects.filter(customer_id=request.user.id,shipment=False).update(address_id =add_id)\n return redirect(\"checkout\")\n else:\n cid = request.user.id\n #Cart.objects.filter(customer_id= cid,shipment=False).update(shipment = True)\n cdata = Cart.objects.filter(customer_id=request.user.id,shipment=False)\n data = []\n tamount = 0\n for i in cdata:\n t_d = Product.objects.filter(id=i.product_id)\n data.append(dict({\"p_id\":i.product_id,\"p_name\":t_d[0].name,\"price\":t_d[0].price,\"quantity\":i.product_quantity,\"amount_t\":(t_d[0].price * i.product_quantity)}))\n tamount += t_d[0].price * i.product_quantity\n fdata = dict({\"total\":tamount})\n cid = request.user.id\n address = Address.objects.filter(customer_id=cid,id = cdata[0].address_id)\n return render(request,\"product/checkout.html\",{\"data\":data,\"fdata\":fdata,\"address\":address})\n\ndef checkout_shipment(request):\n cdata = Cart.objects.filter(customer_id=request.user.id,shipment=False)\n if len(cdata) > 0:\n tamount = 0\n for i in cdata:\n t_d = Product.objects.filter(id=i.product_id)\n tamount += t_d[0].price * i.product_quantity\n cid = request.user.id\n s = Shipment(customer_id= cid,total_amount=tamount)\n s.save()\n Cart.objects.filter(customer_id= cid,shipment=False).update(shipment = True,bill_id=s.id)\n return redirect('shipment_id',s.id)\n else:\n return HttpResponseNotFound()\n\ndef shipment(request):\n if request.user.id != None:\n sdata = Shipment.objects.filter(customer_id= request.user.id)\n fdata = []\n for i in sdata:\n c_data = Cart.objects.filter(bill_id=i.id)\n #cdata = Cart.objects.filter(customer_id=request.user.id,shipment=False)\n data = []\n tamount = 0\n for j in c_data:\n t_d = Product.objects.filter(id=j.product_id)\n data.append(t_d[0].name)\n tamount += t_d[0].price * j.product_quantity\n fdata.append({'id':i.id,'data':data,'tamount':tamount})\n del data\n print(fdata)\n cdata = cart_data_add(request.user.id)\n return render(request,'product/shipment.html',{'sdata':sdata,'fdata':fdata,'cdata':cdata})\n else:\n return redirect('login')\n\ndef shipment_id(request,id):\n sdata = Shipment.objects.filter(id=id)\n user = request.user\n if user.id == sdata[0].customer_id:\n cdata = Cart.objects.filter(bill_id=id)\n #cdata = Cart.objects.filter(customer_id=request.user.id,shipment=False)\n data = []\n tamount = 0\n for i in cdata:\n t_d = Product.objects.filter(id=i.product_id)\n data.append(dict({\"p_id\":i.product_id,\"p_name\":t_d[0].name,\"price\":t_d[0].price,\"quantity\":i.product_quantity,\"amount_t\":(t_d[0].price * i.product_quantity)}))\n tamount += t_d[0].price * i.product_quantity\n address = Address.objects.filter(customer_id=user.id,id=cdata[0].address_id)\n cdata = cart_data_add(request.user.id)\n return render(request,'product/shipment_view.html',{'data':data,'sdata':sdata,'cdata':cdata,'user':user,'tamount':tamount,'address':address})\n else:\n return HttpResponseNotFound()"
},
{
"alpha_fraction": 0.600891649723053,
"alphanum_fraction": 0.6034114956855774,
"avg_line_length": 37.796993255615234,
"blob_id": "e94c2aef4311d12cf9b40ad0d1a4ce9eb29d65e7",
"content_id": "c46e2bffdb6f677e532edb54ad17fbdbb1b21e05",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5159,
"license_type": "permissive",
"max_line_length": 151,
"num_lines": 133,
"path": "/src/account/views.py",
"repo_name": "amaan2398/EcommerceWebsiteDjango",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render,redirect\nfrom django.http import HttpResponseNotFound\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User,auth\nfrom .models import Address\nfrom cart.models import Cart, Shipment\nfrom product.models import Product\n\n\ndef cart_data_add(cid):\n data = Cart.objects.filter(customer_id=cid,shipment=False)\n data = dict({\"count\":len(data)})\n return data\n\n# Create your views here.\ndef register(request):\n if request.method == \"POST\":\n uname = request.POST['uname']\n\n fname = request.POST['fname']\n lname = request.POST['lname']\n \n address = request.POST['address']\n city = request.POST['city']\n state = request.POST['state']\n country = request.POST['country']\n postcode = request.POST['postcode']\n\n email = request.POST['email']\n pass1 = request.POST['pass1']\n pass2 = request.POST['pass2']\n if pass1 == pass2:\n if User.objects.filter(username = uname).exists():\n messages.info(request,\"Username taken ...\")\n return redirect(\"register\")\n elif User.objects.filter(email = email).exists():\n messages.info(request,\"Email taken ...\")\n return redirect(\"register\")\n else:\n user = User.objects.create_user(username = uname,password = pass1, email = email,first_name = fname, last_name = lname)\n user.save()\n del user\n user = User.objects.get(username = uname)\n address = Address(customer_id=user.id,street_address=address,city=city,state=state,country=country,postcode=postcode)\n address.save()\n messages.info(request,\"Account Created\")\n return redirect(\"login\")\n else:\n messages.info(request,\"Password not matching ...\")\n return redirect(\"register\")\n elif request.method == \"GET\":\n if request.user.id == None:\n return render(request,\"accounts/register.html\",{})\n else:\n return HttpResponseNotFound()\n\ndef login(request):\n if request.method == \"POST\":\n uname = request.POST['uname']\n pass1 = request.POST['pass1']\n user = auth.authenticate(username = uname, password=pass1)\n if user is not None:\n auth.login(request,user)\n return redirect(\"/\")\n else:\n messages.info(request,\"Wrong username or password...\")\n return redirect(\"login\")\n elif request.method == \"GET\":\n if request.user.id == None:\n return render(request,\"accounts/login.html\",{})\n else:\n return HttpResponseNotFound()\n\ndef logout(request):\n auth.logout(request)\n return redirect(\"/\")\n\ndef profile(request):\n if request.method == \"POST\":\n fname = request.POST['fname']\n lname = request.POST['lname']\n user = User.objects.get(id = request.user.id)\n user.first_name = fname\n user.last_name = lname\n user.save()\n return redirect(\"profile\")\n elif request.method == \"GET\":\n data = request.user\n address = Address.objects.filter(customer_id=data.id)\n cdata = cart_data_add(request.user.id)\n sdata = Shipment.objects.filter(customer_id= request.user.id)\n fdata = []\n for i in sdata:\n c_data = Cart.objects.filter(bill_id=i.id)\n #cdata = Cart.objects.filter(customer_id=request.user.id,shipment=False)\n tamount = 0\n for j in c_data:\n t_d = Product.objects.filter(id=j.product_id)\n tamount += t_d[0].price * j.product_quantity\n fdata.append({'id':i.id,'tamount':tamount})\n return render(request,\"accounts/profile.html\",{\"data\" : data,\"address\":address,\"cdata\":cdata,'sdata':sdata,'fdata':fdata})\n\ndef edit_address(request):\n if request.method == \"POST\":\n address = request.POST['address']\n city = request.POST['city']\n state = request.POST['state']\n country = request.POST['country']\n postcode = request.POST['postcode']\n address = Address(customer_id=request.user.id,street_address=address,city=city,state=state,country=country,postcode=postcode,default_add=False)\n address.save()\n return redirect(\"profile\")\n elif request.method == \"GET\":\n data = request.user\n address = Address.objects.filter(customer_id=data.id)\n cnt = len(address)\n cdata = cart_data_add(request.user.id)\n return render(request,\"accounts/edit_address.html\",{\"data\" : data,\"address\":address,\"cnt\":cnt,\"cdata\":cdata})\n\ndef remove_address(request,id):\n data = Address.objects.filter(id = id)\n if data[0].default_add == True:\n data.delete()\n del data\n try:\n data = Address.objects.get(customer_id=request.user.id)\n except MultipleObjectsReturned:\n data = Address.objects.get(customer_id=request.user.id)[0]\n data.default_add = True\n data.save()\n else:\n data.delete()\n return redirect(\"edit_address\")"
},
{
"alpha_fraction": 0.6651982665061951,
"alphanum_fraction": 0.6651982665061951,
"avg_line_length": 24.33333396911621,
"blob_id": "01b70b13050ab2c58ed684b0f0673e6af623bb59",
"content_id": "8ce77621f39563fcb991ae7e1898a41a129abb55",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 227,
"license_type": "permissive",
"max_line_length": 69,
"num_lines": 9,
"path": "/src/product/urls.py",
"repo_name": "amaan2398/EcommerceWebsiteDjango",
"src_encoding": "UTF-8",
"text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('',views.index,name = 'index'),\n path('product/<int:id>',views.product_view,name =\"product_view\"),\n path('search/',views.search,name=\"search\"),\n]"
},
{
"alpha_fraction": 0.6829679608345032,
"alphanum_fraction": 0.6829679608345032,
"avg_line_length": 41.42856979370117,
"blob_id": "e81954625080f35069b881c1678f074033559365",
"content_id": "0865d81d8aeed7a71a23f1f1a3c81494b22a77eb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 593,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 14,
"path": "/src/cart/urls.py",
"repo_name": "amaan2398/EcommerceWebsiteDjango",
"src_encoding": "UTF-8",
"text": "from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('',views.cart,name=\"cart\"),\n path('addtocart/<int:id>',views.addtocart,name=\"addtocart\"),\n path('addrm_pro_qnt/<int:id>/<int:v>', views.addrm_pro_qut, name='addrm_pro_qut'),\n path('pro_remove/<int:id>', views.pro_remove, name='pro_remove'),\n path('checkout/',views.checkout_products,name='checkout'),\n path('checkout_shipment/',views.checkout_shipment,name='checkout_shipment'),\n path('shipment/',views.shipment,name='shipment'),\n path('shipment/<int:id>',views.shipment_id,name='shipment_id'),\n]"
},
{
"alpha_fraction": 0.7124183177947998,
"alphanum_fraction": 0.7385621070861816,
"avg_line_length": 37.25,
"blob_id": "ca87ebab168612df0a9bf3040582c9f19a4a6e30",
"content_id": "1f4fac76bcd501a3766d716f5de318b04574179f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 306,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 8,
"path": "/src/product/models.py",
"repo_name": "amaan2398/EcommerceWebsiteDjango",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\nclass Product(models.Model):\n name = models.CharField(max_length=120)\n image = models.ImageField(upload_to = 'pics')\n description = models.TextField(blank=False,null=False)\n price = models.DecimalField(decimal_places=2,max_digits= 1000)\n"
},
{
"alpha_fraction": 0.5269151329994202,
"alphanum_fraction": 0.5476190447807312,
"avg_line_length": 28.272727966308594,
"blob_id": "38f4b38a02c9291bd3eed689f97bf0f4a821634a",
"content_id": "114a9bb611657ef836fbba0c14d6af2b6874340b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 966,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 33,
"path": "/src/cart/migrations/0002_auto_20200824_2040.py",
"repo_name": "amaan2398/EcommerceWebsiteDjango",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.1 on 2020-08-24 20:40\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cart', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Shipment',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('date_time', models.DateTimeField(blank=True, default=datetime.datetime.now)),\n ('customer_id', models.IntegerField()),\n ('total_amount', models.IntegerField()),\n ],\n ),\n migrations.AddField(\n model_name='cart',\n name='address_id',\n field=models.IntegerField(default=0),\n ),\n migrations.AddField(\n model_name='cart',\n name='bill_id',\n field=models.IntegerField(default=0),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5677382349967957,
"alphanum_fraction": 0.5757749676704407,
"avg_line_length": 33.17647171020508,
"blob_id": "1781d7b629cbdc382afa4552911f7dde1d03e505",
"content_id": "6d8715c753052d850f725595ea9b4ed261a3f4a7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1742,
"license_type": "permissive",
"max_line_length": 90,
"num_lines": 51,
"path": "/src/product/views.py",
"repo_name": "amaan2398/EcommerceWebsiteDjango",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom .models import Product\nfrom cart.models import Cart\n#from django.http import HttpResponse\n\ndef cart_data_add(cid):\n data = Cart.objects.filter(customer_id=cid,shipment=False)\n data = dict({\"count\":len(data)})\n return data\n\n# Create your views here.\ndef index(request):\n data = Product.objects.all()\n for i,j in enumerate(data):\n if len(j.description) > 20:\n data[i].description = j.description[:20]+'...'\n a = ['active','','']\n cdata = cart_data_add(request.user.id)\n return render(request,\"index.html\",{'data' : data,\"a\": a,\"cdata\":cdata})\n\ndef product_view(request,id):\n data = Product.objects.get(id = id)\n a = ['','','']\n cdata = cart_data_add(request.user.id)\n return render(request,\"product/show_product.html\",{\"data\":data,\"a\": a,\"cdata\":cdata})\n\ndef search(request):\n s = request.GET['s']\n lst = s.split(' ')\n rpts = \"description LIKE '%\"\n rpte = \"%'\"\n a = ['','','']\n cdata = cart_data_add(request.user.id)\n q =\"SELECT * FROM product_product WHERE \"\n if len(lst) > 0:\n for i,j in enumerate(lst):\n if i > 0:\n q+=\" or \"\n q += rpts+j+rpte\n data = Product.objects.raw(q)\n for i,j in enumerate(data):\n if len(j.description) > 20:\n data[i].description = j.description[:20]+'...'\n return render(request,\"index.html\",{'data' : data,\"a\": a,\"cdata\":cdata,\"tag\":lst})\n\n else:\n data = Product.objects.all()\n for i,j in enumerate(data):\n if len(j.description) > 20:\n data[i].description = j.description[:20]+'...'\n return render(request,\"index.html\",{'data' : data,\"a\": a,\"cdata\":cdata})"
},
{
"alpha_fraction": 0.48946717381477356,
"alphanum_fraction": 0.5002065300941467,
"avg_line_length": 34.10144805908203,
"blob_id": "f1c435fb1775954e39dad0ab1b064afc405a509e",
"content_id": "0161b7562bfedd9be0605607059a52a5c6757806",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2423,
"license_type": "permissive",
"max_line_length": 243,
"num_lines": 69,
"path": "/src/templates/product/show_product.html",
"repo_name": "amaan2398/EcommerceWebsiteDjango",
"src_encoding": "UTF-8",
"text": "{%extends \"../navbar.html\"%}\n{% block showproduct %}\n<div class=\"page-header\">\n <h1>{{data.name}} <small>{{data.name}} </small></h1>\n </div>\n<div class=\"container\">\n <div class=\"row\">\n <div class=\"col-md-4\" style=\"padding-top: 20px;\">\n <img src=\"{{data.image.url}}\" style=\"width: 300px;\" alt=\"...\">\n </div>\n <div class=\"col-md-8\">\n <h3>Product Id: #{{data.id}}</h3>\n <div style=\"height: 150px;\">\n {{data.description}}\n </div>\n <div>\n <h3>₹ {{data.price}}</h3>\n </div>\n <div>\n {% if user.is_authenticated %}\n <p>\n <a href=\"../cart/addtocart/{{data.id}}\" class=\"btn btn-primary right\" role=\"button\"><span class=\"glyphicon glyphicon-shopping-cart\"></span> Add to Cart </a>\n </p>\n {% else %} \n <p>\n <a class=\"btn btn-primary right\" role=\"button\" disabled = \"disabled\" data-toggle=\"tooltip\" data-placement=\"top\" title=\"Log In/Sign In please!\"><span class=\"glyphicon glyphicon-shopping-cart\"></span> Add to Cart </a>\n </p>\n {% endif %}\n </div>\n </div>\n </div>\n <div>\n <span class=\"glyphicon glyphicon-star\"></span>\n <span class=\"glyphicon glyphicon-star\"></span>\n <span class=\"glyphicon glyphicon-star\"></span>\n <span class=\"glyphicon glyphicon-star\"></span>\n <span class=\"glyphicon glyphicon-star\"></span>\n (122 Reviews)\n </div>\n <div>\n <span class=\"glyphicon glyphicon-star\"></span>\n <span class=\"glyphicon glyphicon-star\"></span>\n <span class=\"glyphicon glyphicon-star\"></span>\n <span class=\"glyphicon glyphicon-star\"></span>\n (24 Reviews)\n </div>\n <div>\n <span class=\"glyphicon glyphicon-star\"></span>\n <span class=\"glyphicon glyphicon-star\"></span>\n <span class=\"glyphicon glyphicon-star\"></span>\n (12 Reviews)\n </div>\n <div>\n <span class=\"glyphicon glyphicon-star\"></span>\n <span class=\"glyphicon glyphicon-star\"></span>\n (8 Reviews)\n </div>\n <div>\n <span class=\"glyphicon glyphicon-star\"></span>\n (5 Reviews)\n </div>\n <div>\n (2 Reviews)\n </div>\n</div>\n\n\n\n{% endblock %}"
},
{
"alpha_fraction": 0.43156132102012634,
"alphanum_fraction": 0.49434909224510193,
"avg_line_length": 30.03896141052246,
"blob_id": "422187bb6b79de54a1acc69b9dadad9c097427f4",
"content_id": "c56293d99bdcbfedd542c283530d25f162e5f410",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2395,
"license_type": "permissive",
"max_line_length": 154,
"num_lines": 77,
"path": "/src/templates/product/checkout.html",
"repo_name": "amaan2398/EcommerceWebsiteDjango",
"src_encoding": "UTF-8",
"text": "{% extends '../navbar.html' %}\n{% block checkout %}\n<style>\n body{\n background:no-repeat;\n background-image: url('../../../media/svg/cart.svg');\n background-position: center;\n background-attachment: fixed;\n }\n .card {\n background: rgba(200, 200, 200, 0.6) !important;\n color: black !important;\n }\n .card-body{\n background: rgba(255, 255, 255, 0.6) !important;\n color: black !important;\n }\n </style>\n <br>\n <div class=\"card\" >\n <div class=\"card-header\"><h3> Checkout</h3></div>\n <!-- Table -->\n <div class=\"card-body\" >\n <div class=\"table-responsive\">\n <table class=\"table text-center\">\n <tr>\n <th>#</th>\n <th>Product ID</th>\n <th>Product Name</th>\n <th>Price <small style= \"font-style: italic !important; font-size: xx-small;\">(per 1 qnt.)</small></th>\n <th>Quentity</th>\n <th>Total Amount</th>\n </tr>\n {% for d in data %}\n <tr>\n <td>{{ forloop.counter }}</td>\n <td>{{d.p_id}}</td>\n <td><a href=\"../../../product/{{d.p_id}}\">{{d.p_name}}</a></td>\n <td>₹ {{d.price}}</td>\n <td> {{d.quantity}}</td>\n <td>₹ {{d.amount_t}}</td>\n </tr>\n {% endfor %}\n <tr>\n <th></th>\n <th></th>\n <th></th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n</table>\n</div>\n<div class=\"row\">\n <div class=\"col-md-7\">\n {% for i in address %}\n <address> \n <h4>Shipping Address</h4>\n <strong>{{i.street_address}},</strong><br>\n {{i.city}}, {{i.state}},<br>\n {{i.country}},<br>\n {{i.postcode}}\n </address>\n {% endfor %}\n </div>\n <div class=\"col-md-4\">\n <h4>\n <svg width=\"1em\" height=\"1em\" viewBox=\"0 0 16 16\" class=\"bi bi-cash\" fill=\"currentColor\" xmlns=\"http://www.w3.org/2000/svg\">\n <path fill-rule=\"evenodd\" d=\"M15 4H1v8h14V4zM1 3a1 1 0 0 0-1 1v8a1 1 0 0 0 1 1h14a1 1 0 0 0 1-1V4a1 1 0 0 0-1-1H1z\"/>\n <path d=\"M13 4a2 2 0 0 0 2 2V4h-2zM3 4a2 2 0 0 1-2 2V4h2zm10 8a2 2 0 0 1 2-2v2h-2zM3 12a2 2 0 0 0-2-2v2h2zm7-4a2 2 0 1 1-4 0 2 2 0 0 1 4 0z\"/>\n </svg>\n Total Amount: ₹{{fdata.total}}\n </h4>\n <a href=\"../../cart/checkout_shipment/\" role=\"button\" class=\"btn btn-success btn-lg btn-block\">Pay Bill</a>\n </div>\n</div>\n{% endblock %}"
},
{
"alpha_fraction": 0.3439536690711975,
"alphanum_fraction": 0.5242577791213989,
"avg_line_length": 63.23255920410156,
"blob_id": "4622d987cdd24ea8acca6133ae0aee108eb5dc1c",
"content_id": "35621bf2a24c36b85e772a2709b15ed9698cc0d7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 2762,
"license_type": "permissive",
"max_line_length": 496,
"num_lines": 43,
"path": "/src/templates/accounts/login.html",
"repo_name": "amaan2398/EcommerceWebsiteDjango",
"src_encoding": "UTF-8",
"text": "{% extends \"../header.html\" %}\n{% block login %}\n<h3 class=\"text-center\" >User Login</h3>\n<div class=\"row\">\n <div class=\"col-md-6\">\n {% if messages|length > 0 %}\n {% for message in messages %}\n \n {% ifnotequal message|stringformat:\"s\" \"Account Created\" %}\n <div class=\"alert alert-danger\" role=\"alert\"><strong>Oh snap! </strong>{{message}}</div> \n \n {% else %} \n <div class=\"alert alert-success\" role=\"alert\"><strong>Well done! </strong>{{message}}</div>\n {% endifnotequal %}\n {% endfor %}\n {% endif %}\n <img src=\"../../media/svg/ecommerce.svg\" width=\"70%\" alt=\"\">\n \n </div>\n <div class=\"col-md-6\">\n <form action=\"\" method=\"post\" >{% csrf_token %}\n <div class=\"form-group\">\n <label for=\"exampleInputEmail1\">\n <svg width=\"20px\" height=\"20px\" viewBox=\"2 2 18 18\" class=\"bi bi-person\" fill=\"currentColor\" xmlns=\"http://www.w3.org/2000/svg\">\n <path fill-rule=\"evenodd\" d=\"M13 14s1 0 1-1-1-4-6-4-6 3-6 4 1 1 1 1h10zm-9.995-.944v-.002.002zM3.022 13h9.956a.274.274 0 0 0 .014-.002l.008-.002c-.001-.246-.154-.986-.832-1.664C11.516 10.68 10.289 10 8 10c-2.29 0-3.516.68-4.168 1.332-.678.678-.83 1.418-.832 1.664a1.05 1.05 0 0 0 .022.004zm9.974.056v-.002.002zM8 7a2 2 0 1 0 0-4 2 2 0 0 0 0 4zm3-2a3 3 0 1 1-6 0 3 3 0 0 1 6 0z\"/>\n </svg>\n User name :</label>\n <input type=\"text\" class=\"form-control\" name=\"uname\" placeholder=\"User Name\">\n </div>\n <div class=\"form-group\">\n <label for=\"exampleInputPassword1\">\n <svg width=\"20px\" height=\"20px\" viewBox=\"0 0 18 18\" class=\"bi bi-key\" fill=\"currentColor\" xmlns=\"http://www.w3.org/2000/svg\">\n <path fill-rule=\"evenodd\" d=\"M0 8a4 4 0 0 1 7.465-2H14a.5.5 0 0 1 .354.146l1.5 1.5a.5.5 0 0 1 0 .708l-1.5 1.5a.5.5 0 0 1-.708 0L13 9.207l-.646.647a.5.5 0 0 1-.708 0L11 9.207l-.646.647a.5.5 0 0 1-.708 0L9 9.207l-.646.647A.5.5 0 0 1 8 10h-.535A4 4 0 0 1 0 8zm4-3a3 3 0 1 0 2.712 4.285A.5.5 0 0 1 7.163 9h.63l.853-.854a.5.5 0 0 1 .708 0l.646.647.646-.647a.5.5 0 0 1 .708 0l.646.647.646-.647a.5.5 0 0 1 .708 0l.646.647.793-.793-1-1h-6.63a.5.5 0 0 1-.451-.285A3 3 0 0 0 4 5z\"/>\n <path d=\"M4 8a1 1 0 1 1-2 0 1 1 0 0 1 2 0z\"/>\n </svg>\n Password :</label>\n <input type=\"password\" class=\"form-control\" name=\"pass1\" placeholder=\"Password\">\n </div>\n <button type=\"submit\" class=\"btn btn-primary\">Submit</button>\n </form>\n </div>\n</div>\n{% endblock%}\n"
},
{
"alpha_fraction": 0.7061503529548645,
"alphanum_fraction": 0.7312073111534119,
"avg_line_length": 39,
"blob_id": "202773042242503fe9c0e3471a3fc75fc2e5bdfd",
"content_id": "0ce48804799b829d9509ab687dce874e9f9bc805",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 439,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 11,
"path": "/src/account/models.py",
"repo_name": "amaan2398/EcommerceWebsiteDjango",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\n# Create your models here.\nclass Address(models.Model):\n customer_id = models.IntegerField(blank=False,null=False)\n street_address = models.CharField(max_length=125)\n city = models.CharField(max_length = 30)\n state = models.CharField(max_length = 30)\n country = models.CharField(max_length = 30)\n postcode = models.CharField(max_length=10)\n default_add = models.BooleanField(default=True)"
}
] | 13 |
CaduSouza1/pyTic-Tac-Toe | https://github.com/CaduSouza1/pyTic-Tac-Toe | ea4f5792e864a19e1db00d493d58fca2ec5104eb | 9ddd4bf8cfc5550c39dbb8202d623db182688978 | d36a0a2cf3fd15195cd2fb57f82c0379257dbcc0 | refs/heads/master | 2023-03-21T05:26:14.289084 | 2021-03-19T15:00:17 | 2021-03-19T15:00:17 | 349,139,880 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6175854802131653,
"alphanum_fraction": 0.6224703192710876,
"avg_line_length": 25.07272720336914,
"blob_id": "08f249cf72f9aa14a10740bb808a9db8250097a0",
"content_id": "220fbf2775d605750aeb4a95cae56b7dcfd40242",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1433,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 55,
"path": "/board.py",
"repo_name": "CaduSouza1/pyTic-Tac-Toe",
"src_encoding": "UTF-8",
"text": "from typing import List, Tuple\nfrom enum import IntEnum\n\n\nclass BoardCell(IntEnum):\n EMPTY = 0\n PLAYER_X = 1\n PLAYER_O = 2\n\n\ndef GetMouseCell(mouse_x: int, mouse_y: int, cell_width: int, cell_height: int) -> Tuple[int, int]:\n row = mouse_x // cell_width\n colum = mouse_y // cell_height\n\n return (row, colum)\n\n\ndef GetRows(game_board: List[List[BoardCell]]) -> List[List[BoardCell]]:\n return [row for row in game_board]\n\n\ndef GetColumns(game_board: List[List[BoardCell]]) -> List[List[BoardCell]]:\n columns = []\n\n for i in range(0, len(game_board)):\n columns.append([row[i] for row in game_board])\n\n return columns\n\n\ndef GetDiagonals(game_board: List[List[BoardCell]]) -> List[List[BoardCell]]:\n main_diag = []\n reverse_diag = []\n \n for i in range(0, len(game_board)):\n main_diag.append(game_board[i][i])\n reverse_diag.append(game_board[len(game_board) - 1 - i][i])\n\n return main_diag, reverse_diag\n\n \ndef CheckPlayerWon(player: int, game_board: List[List[BoardCell]]) -> Tuple[bool, int]:\n for row in GetRows(game_board):\n if all(map(lambda p: p == player, row)):\n return player\n \n for colum in GetColumns(game_board):\n if all(map(lambda p: p == player, colum)):\n return player\n \n for diagonal in GetDiagonals(game_board):\n if all(map(lambda p: p == player, diagonal)):\n return player\n\n return 0"
},
{
"alpha_fraction": 0.5502793192863464,
"alphanum_fraction": 0.5758845210075378,
"avg_line_length": 28.84722137451172,
"blob_id": "bd5f03c7b47f965e04ec0fa607d9a1295f696aee",
"content_id": "447be65775d4a5effa47f95f9f25c5150d466b0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2148,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 72,
"path": "/main.py",
"repo_name": "CaduSouza1/pyTic-Tac-Toe",
"src_encoding": "UTF-8",
"text": "import render\nimport pygame\nimport board\n\nnum_pass, num_fail = pygame.init()\nif num_fail:\n print(\"Failed to initialize pygame\")\n\nshould_reset = False\n\n\ndef main():\n global should_reset\n\n screen = pygame.display.set_mode((640, 480))\n clock = pygame.time.Clock()\n\n player_won = 0\n current_turn = board.BoardCell.PLAYER_X\n\n row_count, colum_count = 3, 3\n game_board = [\n [board.BoardCell.EMPTY for row in range(0, row_count)] for colum in range(0, colum_count)\n ]\n\n cell_width = screen.get_width() // len(game_board[0])\n cell_height = screen.get_height() // len(game_board)\n\n font_obj = pygame.font.Font(\"8_bit_arcade\\8-bit Arcade In.ttf\", 30)\n\n while True:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n\n if event.type == pygame.MOUSEBUTTONDOWN and not player_won:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n row, colum = board.GetMouseCell(mouse_x, mouse_y, cell_width, cell_height)\n game_board[row][colum] = current_turn\n\n player_won = board.CheckPlayerWon(current_turn, game_board)\n\n current_turn = (current_turn + 1) % len(board.BoardCell)\n if current_turn == board.BoardCell.EMPTY:\n current_turn += 1\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n should_reset = True\n return\n\n screen.fill((0, 0, 0))\n render.DrawBoard(screen, cell_width, cell_height, game_board, (255, 255, 255))\n if player_won:\n text_size = font_obj.size(\"You won\")\n text_x = screen.get_width() // 2 - text_size[0] // 2\n text_y = screen.get_height() - text_size[1]\n\n render.BlitText(screen, font_obj, text_x, text_y - text_size[1], \"You won\", (255, 255, 255))\n render.BlitText(screen, font_obj, text_x, text_y, \"Press spacebar to reset\", (255, 255, 255))\n\n pygame.display.update()\n\n\nmain()\n\nif should_reset:\n should_reset = False\n main()\n\npygame.quit()"
},
{
"alpha_fraction": 0.554741382598877,
"alphanum_fraction": 0.5676724314689636,
"avg_line_length": 39.70175552368164,
"blob_id": "cf6d73569d05a5f18b3f4d31daf79d498d5c7bfc",
"content_id": "766e590a9e5d3f542ff45da1d1098e684741eac8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2320,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 57,
"path": "/render.py",
"repo_name": "CaduSouza1/pyTic-Tac-Toe",
"src_encoding": "UTF-8",
"text": "from typing import List, Tuple\nfrom pygame.surface import Surface\nimport pygame.draw\nimport pygame.font\nfrom board import BoardCell\n\n\ndef DrawBoard(surface: Surface, cell_width: int, cell_height: int, game_board: List[List[BoardCell]], bg_color: Tuple[int, int, int]):\n for row in range(0, len(game_board[0])):\n for colum in range(0, len(game_board[row])):\n DrawCellBG(\n surface, cell_width * row, cell_height *\n colum, cell_width, cell_height, bg_color\n )\n\n if game_board[row][colum] == BoardCell.PLAYER_X:\n margin_x = cell_width // 4\n margin_y = cell_height // 4\n\n x = cell_width * row + margin_x\n y = cell_height * colum + margin_y\n\n # counting for the \"+ margin\" in the x and y initial positions\n line_length_x = cell_width - margin_x * 2\n line_length_y = cell_height - margin_y * 2\n DrawPlayerX(surface, x, y, line_length_x,\n line_length_y, (255, 0, 0))\n\n elif game_board[row][colum] == BoardCell.PLAYER_O:\n center_x = cell_width * row + cell_width // 2\n center_y = cell_height * colum + cell_height // 2\n\n radius = cell_height // 4\n DrawPlayerO(surface, center_x, center_y, radius, (0, 0, 255))\n\n\ndef DrawCellBG(surface: Surface, x: int, y: int, width: int, height: int, color: Tuple[int, int, int]):\n pygame.draw.rect(surface, color, [x, y, width, height], 1)\n\n\ndef DrawPlayerX(surface: Surface, x: int, y: int, line_length_x: int, line_length_y: int, color: Tuple[int, int, int]):\n pygame.draw.line(surface, color, (x, y),\n (x + line_length_x, y + line_length_y), 10)\n pygame.draw.line(surface, color, (x + line_length_x, y),\n (x, y + line_length_y), 10)\n\n\ndef DrawPlayerO(surface: Surface, x: int, y: int, radius: int, color: Tuple[int, int, int]):\n pygame.draw.circle(surface, color, (x, y), radius)\n pygame.draw.circle(surface, (0, 0, 0), (x, y), radius - 10)\n\n\ndef BlitText(surface: Surface, font_obj: pygame.font.Font, x: int, y: int, text: str, color: Tuple[int, int, int]):\n surface.blit(\n font_obj.render(text, False, color), \n (x, y)\n )\n"
},
{
"alpha_fraction": 0.7660256624221802,
"alphanum_fraction": 0.7660256624221802,
"avg_line_length": 33.77777862548828,
"blob_id": "5c3d4e5bba3ac23bd356c1a050fb5da67254c89f",
"content_id": "87028cecaa3f31bf7d39ec0bbe209aa5a6ccc761",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 312,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 9,
"path": "/test.py",
"repo_name": "CaduSouza1/pyTic-Tac-Toe",
"src_encoding": "UTF-8",
"text": "import board\n\nb = [\n [board.BoardCell.PLAYER_O,board.BoardCell.PLAYER_X,board.BoardCell.PLAYER_X],\n [board.BoardCell.PLAYER_O,board.BoardCell.EMPTY,board.BoardCell.EMPTY],\n [board.BoardCell.PLAYER_O,board.BoardCell.EMPTY,board.BoardCell.EMPTY]\n]\n\nprint(board.CheckPlayerWon(board.BoardCell.PLAYER_O, b))"
}
] | 4 |
Dinesh-DS/miniblog | https://github.com/Dinesh-DS/miniblog | 1075102d8f1303abf8c57ec551f085a0d0ab8886 | 6d3bec06266e041174d06eb7558fd44b56f9c8ac | 61def47e601650e87ef7512dc79fe41d1b40d84e | refs/heads/main | 2023-01-30T06:22:23.608979 | 2020-12-13T17:07:21 | 2020-12-13T17:07:21 | 321,113,258 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7106666564941406,
"alphanum_fraction": 0.7213333249092102,
"avg_line_length": 30.69565200805664,
"blob_id": "9142c19b9fcd9209131c79d1f58f39261e2ff987",
"content_id": "9be6abddd6114ecbfe3038829461803291f72bf5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 750,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 23,
"path": "/models.py",
"repo_name": "Dinesh-DS/miniblog",
"src_encoding": "UTF-8",
"text": "from django.db import models\r\nfrom django.contrib.auth.models import User\r\n# Create your models here.\r\n\r\nSTATUS_CHOICE = (('draft', 'Draft'), ('publish','Publish')) \r\n\r\nclass Post(models.Model):\r\n\tuser = models.ForeignKey(User, null=True, on_delete=models.CASCADE)\r\n\ttitle = models.CharField(max_length=200)\r\n\tbody = models.TextField()\r\n\tpublished_date = models.DateField(auto_now_add=True)\r\n\tupdated_date = models.DateField(auto_now=True)\r\n\tblog_status = models.CharField(max_length=20, choices=STATUS_CHOICE, default='draft')\r\n\r\n\r\nclass Contact(models.Model):\r\n\tname = models.CharField(max_length=100)\r\n\tmobile = models.IntegerField()\r\n\temail = models.EmailField()\r\n\tcomment = models.TextField(blank=True)\r\n\r\n\tdef __str__(self):\r\n\t\treturn self.name"
},
{
"alpha_fraction": 0.5419161915779114,
"alphanum_fraction": 0.5479041934013367,
"avg_line_length": 13.272727012634277,
"blob_id": "6f97dc7006ecd096b7033b0eeeb2e887383c6cde",
"content_id": "1b6e08fc48ca15f7ef89730ca3c3e7449ed2c536",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 334,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 22,
"path": "/templates/blog/login.html",
"repo_name": "Dinesh-DS/miniblog",
"src_encoding": "UTF-8",
"text": "{% extends 'blog/base.html' %}\r\n{% load static %}\r\n{% block content %}\r\n\r\n<div class=\"container\">\r\n\t\r\n\t\t\r\n\t<div style=\"float: right;\">\r\n\t\t<h1>Login</h1>\r\n\r\n<form method=\"POST\" novalidate=\"\">\r\n\t{% csrf_token %}\r\n\r\n\t{{form.as_p}}\r\n\r\n\t<input type=\"submit\" Value=\"Login\" class=\"btn btn-info\">\r\n</form>\r\n\r\n\t</div>\r\n</div>\r\n\r\n{% endblock %}"
},
{
"alpha_fraction": 0.6933881640434265,
"alphanum_fraction": 0.6933881640434265,
"avg_line_length": 29.728260040283203,
"blob_id": "b16bf12f34a98ab330f843100a2141862e7b25cc",
"content_id": "9a68044e9ef0b318bbf1ba59b2d22c60b1944d27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2919,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 92,
"path": "/views.py",
"repo_name": "Dinesh-DS/miniblog",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\r\nfrom blog.models import Post\r\nfrom blog.forms import PostForm, ContactForm, SignupForm\r\nfrom django.http import HttpResponseRedirect\r\nfrom django.contrib.auth.forms import AuthenticationForm\r\nfrom django.contrib.auth import authenticate, login, logout\r\nfrom django.contrib.auth.models import Group\r\n# Create your views here.\r\n\r\n\r\ndef home(request):\r\n\tposts = Post.objects.filter(blog_status='publish').order_by('-id')\r\n\treturn render(request, 'blog/home.html', {'posts': posts})\r\n\r\ndef about(request):\r\n\treturn render(request, 'blog/about.html')\r\n\r\ndef contact(request):\r\n\tform = ContactForm()\r\n\tif request.method == 'POST':\r\n\t\tform = ContactForm(request.POST)\r\n\t\tif form.is_valid():\r\n\t\t\tform.save()\r\n\t\t\treturn HttpResponseRedirect('/')\r\n\treturn render(request, 'blog/contact.html', {'form':form})\r\n\r\ndef post_detail(request, id):\r\n\tpost = Post.objects.get(pk=id)\r\n\treturn render(request, 'blog/detail.html', {'post':post})\r\n\r\ndef add_post(request):\r\n\tform = PostForm()\r\n\tif request.method == 'POST':\r\n\t\tform = PostForm(request.POST)\r\n\t\tif form.is_valid():\r\n\t\t\tform.save()\r\n\t\t\treturn HttpResponseRedirect('/profile/')\r\n\treturn render(request, 'blog/add.html', {'form': form})\r\n\r\ndef user_profile(request):\r\n\tif request.user.is_authenticated:\r\n\t\tposts = Post.objects.filter(user__username=request.user)\r\n\t\treturn render(request, 'blog/profile.html', {'posts':posts})\r\n\telse:\r\n\t\treturn HttpResponseRedirect('/login/')\r\n\r\ndef update_post(request, id):\r\n\tpost = Post.objects.get(pk=id)\r\n\tform = PostForm(instance=post)\r\n\tif request.method == 'POST':\r\n\t\tpost = Post.objects.get(pk=id)\r\n\t\tform = PostForm(request.POST, instance=post)\r\n\t\tif form.is_valid():\r\n\t\t\tform.save()\r\n\t\t\treturn HttpResponseRedirect('/profile/')\r\n\treturn render(request, 'blog/add.html', {'form':form})\r\n\r\ndef delete_post(request, id):\r\n\tpost = Post.objects.get(pk=id)\r\n\tpost.delete()\r\n\treturn HttpResponseRedirect('/profile/')\r\n\r\ndef signup_view(request):\r\n\tform = SignupForm()\r\n\tif request.method == 'POST':\r\n\t\tform = SignupForm(request.POST)\r\n\t\tif form.is_valid():\r\n\t\t\tuser = form.save()\r\n\t\t\tgroup = Group.objects.get(name='author')\r\n\t\t\tuser.groups.add(group)\r\n\t\t\tform = SignupForm()\r\n\treturn render(request, 'blog/signup.html', {'form': form})\r\n\r\ndef login_view(request):\r\n\tif not request.user.is_authenticated:\r\n\t\tform = AuthenticationForm(request=request)\r\n\t\tif request.method == 'POST':\r\n\t\t\tform = AuthenticationForm(request=request, data=request.POST)\r\n\t\t\tif form.is_valid():\r\n\t\t\t\tuname = form.cleaned_data['username']\r\n\t\t\t\tpwd = form.cleaned_data['password']\r\n\t\t\t\tuser = authenticate(username=uname, password=pwd)\r\n\t\t\t\tif user != None:\r\n\t\t\t\t\tlogin(request, user)\r\n\t\t\t\t\treturn HttpResponseRedirect('/profile/')\r\n\t\treturn render(request, 'blog/login.html', {'form': form})\r\n\telse:\r\n\t\treturn HttpResponseRedirect('/profile/')\r\n\r\ndef logout_view(request):\r\n\tlogout(request)\r\n\treturn HttpResponseRedirect('/')\r\n"
},
{
"alpha_fraction": 0.6917098164558411,
"alphanum_fraction": 0.6917098164558411,
"avg_line_length": 25.571428298950195,
"blob_id": "9ee5702f96ec633b6c76ffc5b0253330baa95505",
"content_id": "10cbc6ff4dffd3483ea69bc6c56efca706788a39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 386,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 14,
"path": "/blog/admin.py",
"repo_name": "Dinesh-DS/miniblog",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\r\nfrom blog.models import Post, Contact\r\n# Register your models here.\r\n\r\n# class PostAdmin(admin.ModelAdmin):\r\n# \tlist_display = ['id', 'title', 'blog_status']\r\n# \tlist_filter = ['blog_status', 'published_date']\r\n# \tsearch_fields = ['title', 'body']\r\n\r\n\r\n\r\nadmin.site.register(Post)\r\n# admin.site.register(Post, PostAdmin)\r\nadmin.site.register(Contact)\r\n"
},
{
"alpha_fraction": 0.6548089385032654,
"alphanum_fraction": 0.6587615013122559,
"avg_line_length": 34.095237731933594,
"blob_id": "ef74b6f48104676e947a1816560aa4a4f3384af1",
"content_id": "6fa9a11f2fbc1b33020d2301e44a60d5c0364291",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1518,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 42,
"path": "/blog/forms.py",
"repo_name": "Dinesh-DS/miniblog",
"src_encoding": "UTF-8",
"text": "from django import forms\r\nfrom blog.models import Post, Contact\r\nfrom django.contrib.auth.forms import UserCreationForm\r\nfrom django.contrib.auth.models import User\r\n\r\n\r\nclass PostForm(forms.ModelForm):\r\n\tclass Meta:\r\n\t\tmodel = Post\r\n\t\tfields = ['title', 'body', 'user']\r\n\t\tlabels = {'body': 'Description'}\r\n\r\n\r\nclass ContactForm(forms.ModelForm):\r\n\tclass Meta:\r\n\t\tmodel = Contact\r\n\t\tfields = '__all__'\r\n\t\twidgets = {\r\n\t\t\t'name': forms.TextInput(attrs = {'class': 'form-control'}),\r\n\t\t\t'mobile': forms.TextInput(attrs = {'class': 'form-control'}),\r\n\t\t\t'email': forms.TextInput(attrs = {'class': 'form-control'}),\r\n\t\t\t'comment': forms.TextInput(attrs = {'class': 'form-control'}),\r\n\t\t}\r\n\r\n\tdef clean_mobile(self):\r\n\t\tmb = str(self.cleaned_data['mobile'])\r\n\t\tif(len(mb) != 10):\r\n\t\t\traise forms.ValidationError(\"Please provide 10 digit mobile number\")\r\n\t\treturn mb\r\n\r\nclass SignupForm(UserCreationForm):\r\n\tpassword1 = forms.CharField(label='Password', widget= forms.PasswordInput(attrs = {'class': 'form-control'}))\r\n\tpassword2 = forms.CharField(label='Confirm Password', widget= forms.PasswordInput(attrs = {'class': 'form-control'}))\r\n\tclass Meta:\r\n\t\tmodel = User\r\n\t\tfields = ['username', 'first_name', 'last_name', 'email']\r\n\t\twidgets = {\r\n\t\t\t'username': forms.TextInput(attrs = {'class': 'form-control'}),\r\n\t\t\t'first_name': forms.TextInput(attrs = {'class': 'form-control'}),\r\n\t\t\t'last_name': forms.TextInput(attrs = {'class': 'form-control'}),\r\n\t\t\t'email': forms.TextInput(attrs = {'class': 'form-control'})\r\n\t\t}\r\n\r\n"
}
] | 5 |
vishnuram223/gittutorial | https://github.com/vishnuram223/gittutorial | 9ad091a01bb7318b3c365982dc061bc0e6e9fbc9 | 40f3b1559d6cf86428d12fb43c4b081c4f91ecae | fef66bca4917782492917abb51fc0e7e1d4f5d18 | refs/heads/master | 2023-02-08T20:35:51.852542 | 2020-12-29T17:36:12 | 2020-12-29T17:36:12 | 325,345,404 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5769230723381042,
"alphanum_fraction": 0.6346153616905212,
"avg_line_length": 9.600000381469727,
"blob_id": "f8a568a2973774f03acba742e6711b5c02e74c7c",
"content_id": "53ff3ac336a017d9c6b5b87e64f9dd1bfb39a34f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 52,
"license_type": "no_license",
"max_line_length": 10,
"num_lines": 5,
"path": "/vishnu.py",
"repo_name": "vishnuram223/gittutorial",
"src_encoding": "UTF-8",
"text": "a,b=20,3\nprint(a+b)\nprint(a-b)\nprint(a*b)\nprint(a/b)"
}
] | 1 |
OlivierBondu/Rosetta | https://github.com/OlivierBondu/Rosetta | 9cc566ee5e3e9ff301d1e279a6ba44f36cfe635a | 7ba805f0352bdd579dc2a53d91443923d3c086b7 | 53625b0c046d4b4829af51ac4d75590186b88485 | refs/heads/master | 2021-05-08T03:38:27.588680 | 2017-09-07T10:38:52 | 2017-09-07T10:38:52 | 108,286,721 | 0 | 0 | null | 2017-10-25T15:08:45 | 2016-11-28T09:36:25 | 2017-09-19T12:53:28 | null | [
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.75,
"avg_line_length": 29.46666717529297,
"blob_id": "702eff22ad16bea50f31d25cd285967fc68fe4c5",
"content_id": "2b005e931c36f4880fcb47329abd1b0f35577672",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 456,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 15,
"path": "/Rosetta/interfaces/eHDECAY/errors.py",
"repo_name": "OlivierBondu/Rosetta",
"src_encoding": "UTF-8",
"text": "from ...internal.errors import RosettaImportError\nfrom ..errors import RosettaInterfaceError\n\nclass eHDECAYImportError(RosettaImportError):\n '''\n Exception raised when a problem occurs when trying to import the eHDECAY \n interface package\n '''\n interface='eHDECAY'\n pass\n \nclass eHDECAYInterfaceError(RosettaInterfaceError):\n '''Exception raised when a problem occurs within the eHDECAY interface'''\n interface='eHDECAY'\n pass"
},
{
"alpha_fraction": 0.7442996501922607,
"alphanum_fraction": 0.7557003498077393,
"avg_line_length": 46.230770111083984,
"blob_id": "ad3e69bf731623fe0f7baec84fcbbca50ac397cc",
"content_id": "5dfacf5f9761608d0000a6eaea3052b8c1af124d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1228,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 26,
"path": "/dihiggs/TStest.py",
"repo_name": "OlivierBondu/Rosetta",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport os\n\nfrom python.functions import reweighter_from_histogram_and_file\nfrom python.functions import AnalyticalReweighter\n\n#os.system(\"python python/functions.py\")\n#scriptpath = \"python/functions.py\"\n# Add the directory containing your module to the Python path (wants absolute paths)\n#import sys\n#sys.path.append(os.path.abspath(\"python/functions.py\"))\n#import AnalyticalReweighter\n\nimport argparse\nparser = argparse.ArgumentParser(prog='TStest', description='Return the closest shape benchmark topoligy of a EFT point')\n\n# arguments to chose the (B)SM point in training and application\nparser.add_argument('--kl', type=float, default=1.0, help='Benchmark to calculate the limit')\nparser.add_argument('--kt', type=float, default=1.0, help='Benchmark to calculate the limit')\nparser.add_argument('--c2', type=float, default=0.0, help='Benchmark to calculate the limit')\nparser.add_argument('--cg', type=float, default=0.0, help='Benchmark to calculate the limit')\nparser.add_argument('--c2g', type=float, default=0.0, help='Benchmark to calculate the limit')\nargs = parser.parse_args()\n\nar = reweighter_from_histogram_and_file()\nBM = ar.TS_test(args.kl, args.kt, args.c2, args.cg, args.c2g)\n"
},
{
"alpha_fraction": 0.7247838377952576,
"alphanum_fraction": 0.7305475473403931,
"avg_line_length": 26.799999237060547,
"blob_id": "68e80c0c1303c1bb68636449eb896a9da51e319e",
"content_id": "989ecfe899ad8da8adf87eba24a1a5b988638486",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 694,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 25,
"path": "/Rosetta/interfaces/SignalStrengths/errors.py",
"repo_name": "OlivierBondu/Rosetta",
"src_encoding": "UTF-8",
"text": "from ...internal.errors import RosettaImportError\nfrom ..errors import RosettaInterfaceError\n\nclass SignalStrengthsImportError(RosettaImportError):\n '''\n Exception raised when a problem occurs when trying to import the \n SignalStrengths interface package\n '''\n interface='SignalStrengths'\n pass\n \nclass SignalStrengthsInterfaceError(RosettaInterfaceError):\n '''\n Exception raised when a problem occurs within the SignalStrengths \n interface\n '''\n interface='SignalStrengths'\n pass\n\nclass SqrtsError(SignalStrengthsInterfaceError):\n '''\n Exception for invalid value of sqrt(s) in TeV not in (7, 8, 13)\n '''\n interface='SignalStrengths'\n pass"
},
{
"alpha_fraction": 0.8909090757369995,
"alphanum_fraction": 0.8909090757369995,
"avg_line_length": 54,
"blob_id": "bfb66a10fd21fbcffb2323ae75614d92e16a6975",
"content_id": "38d1b7b4d76081875686f2ad24a06b984d05313a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 165,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 3,
"path": "/Rosetta/interfaces/dihiggs/__init__.py",
"repo_name": "OlivierBondu/Rosetta",
"src_encoding": "UTF-8",
"text": "from errors import DiHiggsImportError, DiHiggsInterfaceError\nfrom interface import DiHiggsInterface\n# from interface import TranslateInterface, DefaultCardInterface\n"
}
] | 4 |
jrwaller/Puzzles-and-Projects | https://github.com/jrwaller/Puzzles-and-Projects | 49921879c88f73126477230d13f6f7cb2ea6500c | 9f4c2116485a817d203553fd8adac5d6d69f68e7 | c1b71ff73e71e2e2a2a5c92bfa7041f967535cc9 | refs/heads/master | 2021-01-10T09:22:00.964413 | 2015-11-23T18:42:25 | 2015-11-23T18:42:25 | 46,735,673 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7934010028839111,
"alphanum_fraction": 0.7934010028839111,
"avg_line_length": 102.63157653808594,
"blob_id": "9af5a51d2836829078060ad626667312a5825952",
"content_id": "d3359f9c68a71ef10950bcef8665ce4b7a36a7b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1970,
"license_type": "no_license",
"max_line_length": 465,
"num_lines": 19,
"path": "/Projects/UGDownloader/README.md",
"repo_name": "jrwaller/Puzzles-and-Projects",
"src_encoding": "UTF-8",
"text": "How this project came to be:\n\nThis project started out as way to get guitar tabs more easily. \nMany of the songs I needed tabs for were being blocked in the US. \nThey could be accessed using the Hola Unblocker extension, but this was tedious every time I wanted a guitar tab.\n\nI used Chrome's inspect element tool to view the HTML of the guitar pro tab pages and found that Ultimate Guitar provides the Tab-ID in a hidden field. Using the tab ID and the default style of Ultimate Guitar's tab download URLs, I was able to create download links for any guitar tab, even if they were blocked in the US. I decided that I could write this functionality into a program so I'd never have to spend a great deal of time searching for alternate tabs. \n\nThe first iteration of this project was a Python script that used Selenium web drivers and URLLib libraries to fetch a guitar tab when I input a URL to a guitar pro tab page.\nThis proved to also be tedious since it had to launch an instance of Firefox every single time, pasting URL links into the script directly was annoying, and I had to deal with downloading issues such as having to click \"Save to File\" every single time. \nI experimented with PhantomJS as an alternative, but it was difficult to download files using it. Rather than call it quits here, I decided that the best way to do this would be to incorporate it into a Chrome extension.\n\nI used Google's tutorials for the Manifest, and I taught myself enough JS to interact with the elements on the page. After quite a bit of testing, I managed to get a working prototype.\n\nThis represents the current product in unpackaged form. \n\nUsage: Visit a Guitar Pro or Power Tab on Ultimate Guitar after loading the extension, and click the Guitar Symbol in the Extensions bar within Chrome. \n\nThe program automatically checks if the page is valid, extracts the ID from the hidden field, and crafts the download links, which it conveniently opens and downloads for you. \n"
},
{
"alpha_fraction": 0.7862595319747925,
"alphanum_fraction": 0.7862595319747925,
"avg_line_length": 31.75,
"blob_id": "7d227a8db7fdeaf537eb7d26c2a53370b8af2013",
"content_id": "bcfdb117317de634ee5b44c73f172634cc54d286",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 131,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 4,
"path": "/README.md",
"repo_name": "jrwaller/Puzzles-and-Projects",
"src_encoding": "UTF-8",
"text": "# Puzzles-and-Projects\nCoding Bat, Project Euler, and Glassdoor Interview Questions\n\nLanguages used: JavaScript, Java, Python, C++\n"
},
{
"alpha_fraction": 0.6763284802436829,
"alphanum_fraction": 0.6859903335571289,
"avg_line_length": 36.6363639831543,
"blob_id": "ef517db7585ce3586947369ce33528204d5ff02f",
"content_id": "400a9c60d856e425a892685c0b0f192e13c109d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 414,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 11,
"path": "/Projects/UGDownloader/contentscript.js",
"repo_name": "jrwaller/Puzzles-and-Projects",
"src_encoding": "UTF-8",
"text": "if (document.getElementsByName(\"tab_id\")[0] != undefined)\n{\n var id = document.getElementsByName(\"tab_id\")[0].value;\n var url = \"http://tabs.ultimate-guitar.com/tabs/download?id=\" + id\n window.open(url);\n}\nelse if (document.getElementsByName(\"id\")[0] != undefined){\n var id = document.getElementsByName(\"id\")[0].value;\n var url = \"http://tabs.ultimate-guitar.com/tabs/download?id=\" + id\n window.open(url);\n}\n"
},
{
"alpha_fraction": 0.6072695255279541,
"alphanum_fraction": 0.6313534379005432,
"avg_line_length": 28.81938362121582,
"blob_id": "90a30a0bdd47e4866c16b13fa7bc1b8a9b0071d0",
"content_id": "9d900647622a90de4212350b5f0da58585b86a68",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6768,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 227,
"path": "/Puzzles/CodingBat/CodingBat.py",
"repo_name": "jrwaller/Puzzles-and-Projects",
"src_encoding": "UTF-8",
"text": "__author__ = 'JamesWaller'\n#---------- Python Warmup-1 -----------#\n\n# CodingBat - Warmup-1 - Sleep_in\n# The parameter weekday is True if it is a weekday, and the parameter vacation is True if we are on vacation.\n# We sleep in if it is not a weekday or we're on vacation. Return True if we sleep in.\ndef sleep_in(weekday, vacation):\n if vacation:\n return True\n if not weekday:\n return True\n return False;\n\n\n\n# CodingBat - Warmup-1 - Monkey_Trouble\n# We have two monkeys, a and b, and the parameters a_smile and b_smile indicate if each is smiling.\n# We are in trouble if they are both smiling or if neither of them is smiling. Return True if we are in trouble.\ndef monkey_trouble(a_smile, b_smile):\n return a_smile == b_smile\n\n\n\n# CodingBat - Warmup-1 - Sum_Double\n# Given two int values, return their sum. Unless the two values are the same, then return double their sum.\ndef sum_double(a, b):\n if a == b:\n return 2*(a + b)\n return a + b\n\n\n\n# CodingBat - Warmup-1 - Diff21\n# Given an int n, return the absolute difference between n and 21, except return double the absolute difference if n is over 21.\ndef diff21(n):\n if n > 21:\n return 2*abs(n - 21)\n return abs(n - 21)\n\n\n\n# CodingBat - Warmup-1 - Parrot_Trouble\n# We have a loud talking parrot. The \"hour\" parameter is the current hour time in the range 0..23.\n# We are in trouble if the parrot is talking and the hour is before 7 or after 20. Return True if we are in trouble.\ndef parrot_trouble(talking, hour):\n if (hour < 7 or hour > 20) and talking:\n return True\n return False\n\n\n\n# CodingBat - Warmup-1 - Makes10\n# Given 2 ints, a and b, return True if one if them is 10 or if their sum is 10.\ndef makes10(a, b):\n if a == 10 or b == 10:\n return True\n if a + b == 10:\n return True\n return False\n\n\n\n# CodingBat - Warmup-1 - Near_Hundred\n# Given an int n, return True if it is within 10 of 100 or 200. Note: abs(num) computes the absolute value of a number.\ndef near_hundred(n):\n if abs(100 - abs(n)) <= 10:\n return True\n if abs(200 - abs(n)) <= 10:\n return True\n return False\n\n\n\n# CodingBat - Warmup-1 - Pos_Neg\n# Given 2 int values, return True if one is negative and one is positive.\n# Except if the parameter \"negative\" is True, then return True only if both are negative.\ndef pos_neg(a, b, negative):\n if negative:\n if a < 0 and b < 0:\n return True\n else:\n return False\n return (a < 0 and b > 0) or (a > 0 and b < 0)\n\n\n\n# CodingBat - Warmup-1 - Not_String\n# Given a string, return a new string where \"not \" has been added to the front.\n# However, if the string already begins with \"not\", return the string unchanged.\ndef not_string(str):\n if len(str) >= 3 and str[:3] == \"not\":\n return str\n return \"not \" + str\n\n\n\n# CodingBat - Warmup-1 - Missing_Char\n# Given a non-empty string and an int n, return a new string where the char at index n has been removed.\n# The value of n will be a valid index of a char in the original string (i.e. n will be in the range 0..len(str)-1 inclusive).\ndef missing_char(str, n):\n return str[:n] + str[(n + 1):]\n\n\n\n# CodingBat - Warmup-1 - Front_Back\n# Given a string, return a new string where the first and last chars have been exchanged.\ndef front_back(str):\n if len(str) == 1 or len(str) == 0:\n return str;\n firstLetter = str[0]\n lastLetter = str[len(str) - 1]\n base = str[1:len(str) - 1]\n return lastLetter + base + firstLetter\n\n\n\n# CodingBat - Warmup-1 - Front3\n# Given a string, we'll say that the front is the first 3 chars of the string.\n# If the string length is less than 3, the front is whatever is there. Return a new string which is 3 copies of the front.\ndef front3(str):\n firstThree = str[0:3]\n return firstThree * 3\n\n\n\n#---------- Python Warmup-2 -----------#\n\n# CodingBat - Warmup-2 - String_Times\n# Given a string and a non-negative int n, return a larger string that is n copies of the original string.\ndef string_times(str, n):\n return n * str\n\n\n\n# CodingBat - Warmup-2 - Front_Times\n# Given a string and a non-negative int n, return a larger string that is n copies of the original string.\ndef front_times(str, n):\n if len(str) <= 3:\n return n * str;\n return n * str[:3]\n\n\n\n# CodingBat - Warmup-2 - String_bits\n# Given a string, return a new string made of every other char starting with the first, so \"Hello\" yields \"Hlo\".\ndef string_bits(str):\n result = \"\"\n for i in xrange(len(str)):\n if i % 2 == 0:\n result += str[i]\n return result\n\n\n\n# CodingBat - Warmup-2 - String_Splosion\n# Given a non-empty string like \"Code\" return a string like \"CCoCodCode\".\ndef string_splosion(str):\n result = \"\"\n for i in xrange(len(str) + 1):\n result += str[:i]\n return result\n\n# CodingBat - Warmup-2 - Last2\n# Given a string, return the count of the number of times that a substring length 2 appears in the string\n# and also as the last 2 chars of the string, so \"hixxxhi\" yields 1 (we won't count the end substring).\ndef last2(str):\n if len(str) <= 2:\n return 0\n lastTwo = str[(len(str) - 2):]\n count = 0;\n for i in xrange(len(str) - 2):\n if str[i:(i + 2)] == lastTwo:\n count += 1\n return count\n\n\n\n# CodingBat - Warmup-2 - Array_Count9\n# Given an array of ints, return the number of 9's in the array.\ndef array_count9(nums):\n count = 0\n for i in xrange(len(nums)):\n if nums[i] == 9:\n count += 1\n return count\n\n\n\n# CodingBat - Warmup-2 - Array_Front9\n# Given an array of ints, return True if one of the first 4 elements in the array is a 9. The array length may be less than 4.\ndef array_front9(nums):\n if (len(nums) < 4):\n for i in xrange(len(nums)):\n if nums[i] == 9:\n return True\n else:\n for i in xrange(3):\n if nums[i] == 9:\n return True\n return False\n\n\n\n# CodingBat - Warmup-2 - Array123\n# Given an array of ints, return True if .. 1, 2, 3, .. appears in the array somewhere.\ndef array123(nums):\n for i in xrange(len(nums) - 2):\n if (nums[i] == 1 and nums[i + 1] == 2 and nums[i + 2] == 3):\n return True\n return False\n\n\n\n# CodingBat - Warmup-2 - String_Match\n# Given 2 strings, a and b, return the number of the positions where they contain the same length 2 substring.\n# So \"xxcaazz\" and \"xxbaaz\" yields 3, since the \"xx\", \"aa\", and \"az\" substrings appear in the same place in both strings.\ndef string_match(a, b):\n smaller = 0\n count = 0\n if len(a) <= len(b):\n smaller = len(a)\n else:\n smaller = len(b)\n for i in xrange(smaller - 1):\n if (a[i:(i + 2)] == b[i:(i + 2)]):\n count += 1\n return count"
},
{
"alpha_fraction": 0.520082414150238,
"alphanum_fraction": 0.5550978183746338,
"avg_line_length": 24.552631378173828,
"blob_id": "10f24a6ab1d4d4ea2688345565edf45a6ef10c53",
"content_id": "ab9ab9222635e7190468843a3c9367df4b05f8f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 971,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 38,
"path": "/Puzzles/Interview Questions/PrettyPrint.cpp",
"repo_name": "jrwaller/Puzzles-and-Projects",
"src_encoding": "UTF-8",
"text": "//\n// PrettyPrint.cpp\n//\n// Print concentric rectangular pattern in a 2d matrix.\n// The outermost rectangle is formed by A, then the next outermost is formed by A-1 and so on.\n// You will be given A as an argument to the function you need to implement, and you need to return a 2D array.\n// Example Output: A = 3\n// 33333\n// 32223\n// 32123\n// 32223\n// 33333\n\nvector<vector<int> > makeBox (int A)\n{\n int size = 2*A - 1;\n vector<vector<int> > box (size, vector<int>(size));\n int leftBound = 0;\n int rightBound = size;\n int topBound = 0;\n int bottomBound = size;\n while (leftBound < rightBound && topBound < bottomBound)\n {\n for (int i = topBOund; i <bottomBound; i++)\n {\n for (int j = leftBound; j < rightBound; j++)\n {\n box[j][i] = A;\n }\n }\n A = A- 1;\n topBound++;\n bottomBound--;\n leftBound++;\n rightBound--;\n }\n return box;\n}\n"
},
{
"alpha_fraction": 0.4792013168334961,
"alphanum_fraction": 0.5008319616317749,
"avg_line_length": 17.78125,
"blob_id": "da0bbe3241456a4cecadf53c813aa6476a29df64",
"content_id": "fe53d4d3cbffb82315d4ce1409adc7b33fedd19e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 601,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 32,
"path": "/Puzzles/Interview Questions/BinarySearch.cpp",
"repo_name": "jrwaller/Puzzles-and-Projects",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n\nusing namespace std;\n//\n// main.cpp\n// BinarySearch\n//\n// Created by James Waller on 5/4/15.\n// Copyright (c) 2015 JamesWaller. All rights reserved.\n//\n\n#include <iostream>\n\nbool binSearch(int start, int last, int nums[10], int x)\n{\n if (start <= last)\n {\n int mid = (start + last) / 2;\n if (nums[mid] == x)\n {\n return true;\n }\n else if (x < nums[mid])\n {\n return binSearch(start, mid - 1, nums, x);\n }\n else{\n return binSearch(mid+1, last, nums, x);\n }\n }\n return false;\n}\n"
},
{
"alpha_fraction": 0.45521023869514465,
"alphanum_fraction": 0.46252286434173584,
"avg_line_length": 21.32653045654297,
"blob_id": "dae897a29210f0a0664a359400e9cd5fe4ccdf7e",
"content_id": "6aba227472d0620d55f58f0724a55830ca546261",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1094,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 49,
"path": "/Puzzles/Interview Questions/Kattis-ProblemID:lineup.cpp",
"repo_name": "jrwaller/Puzzles-and-Projects",
"src_encoding": "UTF-8",
"text": "//\n// Kattis-ProblemID:lineup.cpp\n// \n// Problem description at: https://open.kattis.com/contests/biy245/problems/lineup\n\nstring lineThemUp(int num, string names[])\n{\n bool increasing == false;\n bool decreasing == false;\n for (int i = 0; i < num - 1; i++)\n {\n string first = names[i];\n string second = names[i + 1];\n int index = 0;\n if (first[index] == second[index])\n {\n while (first[index] == second[index])\n {\n index++;\n }\n }\n \n if (first[index] < second[index] && decreasing)\n {\n return \"NEITHER\";\n }\n else if (first[index] > second[index] && increasing)\n {\n return \"NEITHER\";\n }\n else if (first[index] < second[index])\n {\n increasing = true;\n }\n else if (first[index] > second[index])\n {\n decreasing = true;\n }\n index = 0;\n }\n if (increasing)\n {\n return \"INCREASING\";\n }\n else\n {\n return \"DECREASING\";\n }\n}\n"
},
{
"alpha_fraction": 0.8473684191703796,
"alphanum_fraction": 0.8473684191703796,
"avg_line_length": 190,
"blob_id": "13b8e365142172890cd106dd5db75ec13ed13163",
"content_id": "9c16baf32fb5741497464783054e82597a895c28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 190,
"license_type": "no_license",
"max_line_length": 190,
"num_lines": 1,
"path": "/Projects/README.md",
"repo_name": "jrwaller/Puzzles-and-Projects",
"src_encoding": "UTF-8",
"text": "More projects available upon request. Can only be uploaded in private repositories due to containing solution code for future projects in Computer Science courses at Old Dominion University."
}
] | 8 |
brainzest/2048-myway | https://github.com/brainzest/2048-myway | 69e59da40db339cf13e5cf49f72801b9272af84c | c55e9061107faa0f6c0ee6b7acef0e4513d41e10 | abc151fba17332de067542fa5b05ee0da19ccf60 | refs/heads/master | 2020-06-05T03:56:58.634506 | 2015-06-25T00:46:09 | 2015-06-25T00:46:09 | 38,017,601 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.49255749583244324,
"alphanum_fraction": 0.5378890633583069,
"avg_line_length": 32.91954040527344,
"blob_id": "7a999ee4f0097e739917be88527ada5b64cc2caa",
"content_id": "7f2107cb9b4cf35caf5c86a4cc173fe96aec0787",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2956,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 87,
"path": "/2048_ntek.py",
"repo_name": "brainzest/2048-myway",
"src_encoding": "UTF-8",
"text": "from tkinter import *\nfrom math import *\nimport random\nmaster = Tk()\nbox=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]\nscore =0\ndead=0\nstart_coordinate_x =100\nstart_coordinate_y=100\nbox_width=100\nbox_height=100\n\n \nw = Canvas(master, width=700, height=550)\nw.create_text(300 , 20,text=\"2048\", fill=\"brown\", font=(\"Helvetica\",22))\nw.create_text(300 , 50,text=\"GO CRAZY!\", fill=\"purple\", font=(18))\nw.create_text(450 , 70,text=\"SCORE :\"+ str(score), fill=\"green\", font=(18))\n \ndef addText(canvas_width, canvas_height, stringadd):\n w.create_text(canvas_width / 2, canvas_height / 2,text=stringadd, fill=\"white\", font=(\"Helvetica\",22))\n \ndef draw_boxes(x,y,width,height,color):\n \n w.create_rectangle(x,y,width,height,fill=color)\n # addText(width+x,height+y,\"2\")\n \nw.pack()\n\n \ndef rgb_to_hex(r,g,b):\n return '#%02x%02x%02x' % (r,g,b)\ndef create_random_tile():\n #create todo\n # Create a new tile in a randomly selected empty \n # square. The tile should be 2 80% of the time and\n # 4 20% of the time.\n available_boxes = []\n for row in range(4):\n for col in range(4):\n if box[row][col] == 0:\n available_boxes.append([row, col])\n\n if not available_boxes:\n print (\"There are no available positions.\")\n else:\n random_tile = random.choice(available_boxes)\n weighted_choices = [(2, 8), (4, 2)]\n available_values= [val for val, count in weighted_choices for i in range(count)]\n tile = random.choice(available_values)\n print(random_tile[0])\n print(tile)\n global box\n print(box)\n box[random_tile[0]][random_tile[1]]= tile\n print(box)\n \ndef restart():\n global box\n box=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]\n draw()\n score=dead=0\n create_random_tile() \n \ndef draw():\n #draw_boxes(50, 50, 150, 150,\"blue\")\n #print 'lenght', len(boxes)\n create_random_tile()\n for i in range(len(box)):\n for j in range(len(box)):\n \n x = start_coordinate_x + (start_coordinate_x)*i\n y = start_coordinate_y + (start_coordinate_y)*j\n box_width = x+ 100\n box_height = y + 100\n draw_boxes(x, y, box_width, box_height,\"grey\")\n if box[i][j] >0 :\n p =log(box[i][j])/log(2)\n color=rgb_to_hex(p*23,p*23,p*23);\n #todo :improve colour of the boxes \n draw_boxes(x, y, box_width, box_height, color)\n addText(box_width+x,box_height+y,box[i][j])\n \n\nb=Button(master, height=1,activeforeground=\"magenta\",activebackground=\"light yellow\", text=\"RESTART\" ,width=10, font=(\"Helvetica\", 10),command=restart)\nb.pack() \ndraw()\nmainloop()\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.774193525314331,
"alphanum_fraction": 0.8028674125671387,
"avg_line_length": 38.57143020629883,
"blob_id": "cc526a059a89f6e7d8c647412827393e8952905b",
"content_id": "b3114a7c6c5ae07d3ad2444460ac953a4dce3f42",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 279,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 7,
"path": "/README.md",
"repo_name": "brainzest/2048-myway",
"src_encoding": "UTF-8",
"text": "# 2048-myway\nLearning GUI in python starting 2048\n\nUsing Tkinter library, found it good for the canvas use\nNeed to write class and update the functions\nThe gui is ready, next step will be adding class and writing merge functions and key movements and random number \ngeneration \n\n"
}
] | 2 |
Suman2023/Lets-Django | https://github.com/Suman2023/Lets-Django | 0c29c56ccdd86fef2175bb91cc0ba8fa7c7881b1 | 4eec1eda14861dc396d41483e62fbc4e5b03f65d | 93b92079011f7d588194b9a57af8f3352fcbf0f8 | refs/heads/main | 2023-06-21T16:21:35.339095 | 2021-07-28T20:49:19 | 2021-07-28T20:49:19 | 384,542,811 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6070975661277771,
"alphanum_fraction": 0.6070975661277771,
"avg_line_length": 31.875,
"blob_id": "96df9b666f67a212557f7f67bcfc3263cd5d4581",
"content_id": "1b0a8650a9fc509c8296635d51991d0f078e96be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1578,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 48,
"path": "/Car_Pooling/registration/views.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import User\nfrom django.http.response import HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import login, logout, authenticate\nfrom .forms import SignUpForm, LogInForm\n\n# # # Create your views here.\n\n\ndef register(request):\n if request.method == \"POST\":\n form = SignUpForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('registration:login')\n else:\n return render(request, \"registration/signup.html\", {'form': form})\n\n return render(request, \"registration/signup.html\", {'form': SignUpForm()})\n\n\ndef logout_view(request):\n logout(request)\n return render(request, 'home/index.html')\n\n\ndef login_view(request):\n if request.method == \"POST\":\n form = LogInForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n\n user = authenticate(username=username, password=password)\n if user:\n login(request, user)\n return redirect('home:index')\n else:\n print(form)\n form.add_error(None, error='Inavlid Credentials')\n\n print(form)\n return render(request, 'registration/login.html',\n {'form': form})\n\n else:\n return render(request, 'registration/login.html', {'form': form})\n return render(request, 'registration/login.html', {'form': LogInForm()})\n"
},
{
"alpha_fraction": 0.6659340858459473,
"alphanum_fraction": 0.6659340858459473,
"avg_line_length": 34,
"blob_id": "79c74b8fc80bd538414e1afd7ce876d3243d8e34",
"content_id": "9d170380f4cc3f4efaaeca80269492773e4d6f12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 455,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 13,
"path": "/rest_api/notes/urls.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.urls import path\n\nfrom . import views\n\napp_name = \"notes\"\nurlpatterns = [\n path(\"\", views.getRoutes, name=\"routes\"),\n path(\"notes/\", views.getNotes, name=\"getNotes\"),\n path(\"notes/create/\", views.createNote, name=\"createNote\"),\n path(\"notes/<str:pk>/\", views.getNote, name=\"getNote\"),\n path(\"notes/<str:pk>/update/\", views.updateNote, name=\"updatenote\"),\n path(\"notes/<str:pk>/delete\", views.deleteNote, name=\"deleteNote\")\n]\n"
},
{
"alpha_fraction": 0.8072916865348816,
"alphanum_fraction": 0.8072916865348816,
"avg_line_length": 23.125,
"blob_id": "43467a3cdd3d8a54ce3b14eee89aa40376e37007",
"content_id": "ed425634b9d1c54e34762faed33d6ffc0a2c9d53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 8,
"path": "/Car_Pooling/home/admin.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\nfrom .models import Cities, Ride, OfferRide\n# Register your models here.\n\nadmin.site.register(Cities)\nadmin.site.register(Ride)\nadmin.site.register(OfferRide)"
},
{
"alpha_fraction": 0.8205128312110901,
"alphanum_fraction": 0.8205128312110901,
"avg_line_length": 30.399999618530273,
"blob_id": "46e74d50bccde48ceab8330572cb227f1de7e5c0",
"content_id": "92db3518914d7dd8c43208627b0a636c27afc601",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 156,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 5,
"path": "/Hello_World/modelBasics/admin.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import Person, Employee\n# Register your models here.\nadmin.site.register(Person)\nadmin.site.register(Employee)"
},
{
"alpha_fraction": 0.5769474506378174,
"alphanum_fraction": 0.5769474506378174,
"avg_line_length": 31.224489212036133,
"blob_id": "d9799c72b1c7823c476fa9793463e4eba2f45e67",
"content_id": "dbc18c4828ecf60f920f01b9374047cf87194ef6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1579,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 49,
"path": "/Car_Pooling/getride/views.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.http.response import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.db.models import Q\nfrom home.models import Cities, Ride, OfferRide\nfrom django.forms import ValidationError\n\n# Create your views here.\n\n\ndef getRide(request):\n if request.method == \"POST\":\n try:\n origin = request.POST[\"origin\"]\n destination = request.POST[\"destination\"]\n date = request.POST[\"date\"]\n seats_required = request.POST[\"seat_available\"]\n\n rides = Ride.objects.all().filter(\n origin=origin,\n destination=destination,\n seat_available__gte=seats_required,\n journey_date__gte=date)\n\n relatedRides = Ride.objects.all().filter(\n (Q(origin=origin)\n | Q(destination=destination))\n & Q(journey_date__gte=date))\n\n return render(request, \"getride/availability.html\", {\n \"rides\": rides,\n \"relatedRides\": relatedRides\n })\n except ValidationError:\n return render(\n request, \"home/index.html\", {\n 'cities': Cities.objects.all(),\n \"validationerror\": \"Invalid queries\"\n })\n\n return render(request, \"getride/availability.html\")\n\n\ndef chat(request, queryparams=None):\n if request.user.is_authenticated:\n print(request.user.username)\n return HttpResponse(\"hello world\")\n else:\n print(\"Nope\")\n return HttpResponse(\"goodbye:(\")\n"
},
{
"alpha_fraction": 0.5391414165496826,
"alphanum_fraction": 0.5391414165496826,
"avg_line_length": 44.25714111328125,
"blob_id": "45a6dfcf25374905ae7c0df936382a7239a9fec5",
"content_id": "963f2677c44cbef5fd8ab4ac0cef2d5144d5604e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1584,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 35,
"path": "/Car_Pooling/createride/views.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.http.response import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom home.models import Cities, OfferRide\n\n\n# Create your views here.\ndef createride(request):\n if request.user.is_authenticated:\n if request.method == \"POST\":\n user_name = request.user.username\n origin = request.POST[\"origin\"]\n destination = request.POST[\"destination\"]\n journey_date = request.POST[\"journey_date\"]\n seat_available = request.POST[\"seat_available\"]\n origin_location = request.POST[\"origin_location\"]\n destination_location = request.POST[\"destination_location\"]\n contact = request.POST[\"contact\"]\n fare = request.POST[\"fare\"]\n # We will add validation later on\n OfferRide.objects.create(user_name=user_name,\n origin=origin,\n destination=destination,\n journey_date=journey_date,\n seat_available=seat_available,\n origin_location=origin_location,\n destination_location=destination_location,\n contact=contact,\n fare=fare)\n return redirect('home:index')\n\n cities = Cities.objects.all()\n return render(request, 'createride/createride.html',\n {\"cities\": cities})\n else:\n return redirect('registration:login')\n"
},
{
"alpha_fraction": 0.7223650217056274,
"alphanum_fraction": 0.7223650217056274,
"avg_line_length": 38,
"blob_id": "976195e3f5f13cb457808dadb5922d4cf2a32aa4",
"content_id": "7804ac699acee83abaf9a6f91b0384ec9ba6ed49",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 389,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 10,
"path": "/rest_api/notes/serializers.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.db.models.base import Model\nfrom rest_framework.serializers import ModelSerializer\nfrom .models import Note\n\n\n# we are using this to make our objects that returns from the table to json compatible\nclass NoteSerializer(ModelSerializer):\n class Meta:\n model = Note\n fields = '__all__' # we can do like [ 'id' 'title' blah blah] but __all__ takes all the fields"
},
{
"alpha_fraction": 0.7931034564971924,
"alphanum_fraction": 0.7931034564971924,
"avg_line_length": 57,
"blob_id": "f947006a99690a1b9e641d97a42b0025d4bd3d7d",
"content_id": "5a3d939fa474284f3198da5cf7df17c7b78f33cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 58,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 1,
"path": "/Hello_World/ReadMe.md",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "# Learning the basics of Django including HTML CSS and JS\n"
},
{
"alpha_fraction": 0.6424418687820435,
"alphanum_fraction": 0.6482558250427246,
"avg_line_length": 26.520000457763672,
"blob_id": "a235c8240b423c6f659ad141c8a9e36975d1f8c9",
"content_id": "a704dcebc813dc0af2f46e1bf3e4a0ac9617081e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 688,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 25,
"path": "/Hello_World/modelBasics/forms.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from .models import Employee\nfrom django import forms\nfrom django.forms.fields import CharField\n\ngender_choices = [\n ('M', 'Male'),\n ('F', 'Female'),\n ('O', 'Other'),\n]\ndept_choices = [\n ('FS', \"Full Stack\"),\n ('FE', 'Front End'),\n ('BE', 'Backend'),\n]\n\n\nclass EmployeeForm(forms.ModelForm):\n first_name = forms.CharField(max_length=15, required=True)\n last_name = forms.CharField(max_length=15, required=True)\n gender = forms.ChoiceField(choices=gender_choices, required=True)\n department = forms.ChoiceField(choices=dept_choices, required=True)\n\n class Meta:\n model = Employee\n fields = ('first_name', 'last_name', 'department', 'gender')\n"
},
{
"alpha_fraction": 0.6819126605987549,
"alphanum_fraction": 0.6819126605987549,
"avg_line_length": 47.099998474121094,
"blob_id": "b4542731b24d0a7f5c961a2bcff3513213fa0a50",
"content_id": "ca38fe7a8417a99ea8993f230f9fd7e3b9756572",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 481,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 10,
"path": "/Hello_World/hello/urls.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . import views\n\n#There is a main urls.py for the whole project but we add for every app we create just for sake of simplicity and not cluttering the main urls.py\nurlpatterns = [\n path(\"\",views.index, name = \"index\"), #\"\" -> default route\n path(\"name\", views.name, name = \"name\"), # this has url -> hello/name\n path(\"<str:name>\",views.greet, name = \"greet\"), # here we pass any string after hello/<here> we return with Hello <here>\n \n]\n"
},
{
"alpha_fraction": 0.652314305305481,
"alphanum_fraction": 0.6576964259147644,
"avg_line_length": 27.15151596069336,
"blob_id": "c3863f1dc500b65bfb1b3fef831746e0097667b6",
"content_id": "eefd283fde4f68132fe420a23afa709364067922",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 929,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 33,
"path": "/Hello_World/connect/models.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth import get_user_model\n\nUser = get_user_model()\n\n\nclass Room(models.Model):\n group_name = models.TextField(unique=True)\n group_admin = models.ForeignKey(User, on_delete=models.CASCADE)\n\n\n# Create your models here.\nclass Message(models.Model):\n group = models.ForeignKey(\n Room,\n related_name='messages_group',\n on_delete=models.CASCADE,\n )\n author = models.ForeignKey(\n User,\n related_name='author_messages',\n on_delete=models.CASCADE,\n )\n content = models.TextField(default=\"\")\n timeStamp = models.DateTimeField(auto_now_add=True, )\n\n def __str__(self):\n return self.author.username\n\n def last_10_messages(groupName, group_admin):\n return Message.objects.order_by('-timeStamp').filter(\n group__group_name=groupName,\n group__group_admin__username=group_admin)[:10:-1]\n"
},
{
"alpha_fraction": 0.5360110998153687,
"alphanum_fraction": 0.5360110998153687,
"avg_line_length": 23.337078094482422,
"blob_id": "5156f7aa0059558ccbe6516b06a56ce111ca1ae1",
"content_id": "e629f62e5d878c885da06c11324cec550ffe8c14",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2166,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 89,
"path": "/rest_api/notes/views.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom .serializers import NoteSerializer\nfrom .models import Note\n# Create your views here.\n\n\n@api_view(['GET'])\ndef getRoutes(req):\n routes = [\n {\n 'Endpoint': '/notes/',\n 'method': 'GET',\n 'body': None,\n 'description': 'Returns all the notes'\n },\n {\n 'Endpoint': '/notes/id',\n 'method': 'GET',\n 'body': None,\n 'description': 'Returns notes with the given id'\n },\n {\n 'Endpoint': '/notes/create/',\n 'method': 'POST',\n 'body': {\n 'body': \"\"\n },\n 'description': 'Create a new note'\n },\n {\n 'Endpoint': '/notes/id/update/',\n 'method': 'PUT',\n 'body': {\n 'body': \"\"\n },\n 'description': 'Update note based on passed id'\n },\n {\n 'Endpoint': '/notes/id/delete/',\n 'method': 'DELETE',\n 'body': None,\n 'description': 'Delete note based on passed id'\n },\n ]\n return Response(routes)\n\n\n@api_view(['GET'])\ndef getNotes(req):\n notes = Note.objects.all()\n serializer = NoteSerializer(notes, many=True)\n return Response(serializer.data)\n\n\n@api_view(['GET'])\ndef getNote(req, pk):\n note = Note.objects.get(id=pk)\n serializer = NoteSerializer(note, many=False)\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef createNote(req):\n data = req.data\n note = Note.objects.create(\n title=data['title'],\n body=data['body'],\n )\n serializer = NoteSerializer(note, many=False)\n return Response(serializer.data)\n\n\n@api_view(['PUT'])\ndef updateNote(req, pk):\n data = req.data\n note = Note.objects.get(id=pk)\n serializer = NoteSerializer(note, data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n\n@api_view(['DELETE'])\ndef deleteNote(req, pk):\n note = Note.objects.get(id=pk)\n note.delete()\n return Response(\"Note deleted!\")\n"
},
{
"alpha_fraction": 0.5559566617012024,
"alphanum_fraction": 0.5613718628883362,
"avg_line_length": 35.93333435058594,
"blob_id": "c407be663f426033973eda5bd18bd4e0e3363ef8",
"content_id": "abc30915e96534586d908bf67b16002e2e6a7960",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1108,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 30,
"path": "/Hello_World/modelBasics/views.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from .models import Employee, Person\nfrom .forms import EmployeeForm\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\n\n\n# Create your views here.\ndef index(request):\n if request.method == \"POST\":\n form = EmployeeForm(request.POST)\n if form.is_valid():\n first_name = form.cleaned_data[\"first_name\"]\n last_name = form.cleaned_data[\"last_name\"]\n department = form.cleaned_data[\"department\"]\n gender = form.cleaned_data[\"gender\"]\n\n person = Person.objects.create(first_name=first_name,\n last_name=last_name,\n gender=gender)\n employee = Employee.objects.create(profile=person,\n department=department)\n\n return HttpResponse(\"Added Successfully\")\n else:\n return render(request, \"modelBasics/index.html\", {\"form\": form})\n\n return render(request, \"modelBasics/index.html\", {\n \"form\": EmployeeForm(),\n \"lst\": [1, 2, 3, 4, 5, 6]\n })\n"
},
{
"alpha_fraction": 0.637264609336853,
"alphanum_fraction": 0.6402378678321838,
"avg_line_length": 33.7931022644043,
"blob_id": "ed33f1830a86dc5933ac247898ac160adbc0404f",
"content_id": "c2da794313031b9e67d5d443a474ffed33aa9ef9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1009,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 29,
"path": "/Hello_World/tasks/views.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django import forms\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\n# Create your views here.\n\n\nclass NewTaskform(forms.Form): # creates form without writing html form code\n task = forms.CharField(label=\"New Task\")\n # priority = forms.IntegerField(label = \"priority\", min_value=1, max_value = 99)\n\n\ndef index(req):\n if \"tasks\" not in req.session:\n req.session[\"tasks\"] = []\n return render(req, \"tasks/index.html\", {\"tasks\": req.session[\"tasks\"]})\n\n\ndef add(req):\n if req.method == \"POST\":\n form = NewTaskform(\n req.POST) # now all thst is Posted by user are in a var form\n if form.is_valid():\n task = form.cleaned_data[\"task\"]\n req.session[\"tasks\"] += [task]\n return HttpResponseRedirect(reverse(\"tasks:index\"))\n else:\n return render(req, \"tasks/add.html\", {\"form\": form})\n return render(req, \"tasks/add.html\", {\"form\": NewTaskform()})\n"
},
{
"alpha_fraction": 0.774193525314331,
"alphanum_fraction": 0.774193525314331,
"avg_line_length": 29,
"blob_id": "de0ab3c00036c58a975e3719e139b8635cc505be",
"content_id": "fb4e1e95544ad808893088e5ef45dbf3b0d5be2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 31,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 1,
"path": "/README.md",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "\n# Getting started with Django\n"
},
{
"alpha_fraction": 0.5769728422164917,
"alphanum_fraction": 0.6028460264205933,
"avg_line_length": 34.1363639831543,
"blob_id": "7a420a8c29364a9bd7863abc63a6e9aabdd0012c",
"content_id": "ea8c05d3b6b71d4f9f3f9cb4951813d34052226f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 773,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 22,
"path": "/Hello_World/modelBasics/migrations/0004_employee.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.2.5 on 2021-07-17 19:15\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('modelBasics', '0003_alter_person_gender'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Employee',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('department', models.CharField(choices=[('FS', 'Full Stack'), ('FE', 'Front End'), ('BE', 'Backend')], default='department', max_length=2)),\n ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='modelBasics.person')),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.7435897588729858,
"alphanum_fraction": 0.7435897588729858,
"avg_line_length": 26.299999237060547,
"blob_id": "6fabb232b7ab2b1ce5584d8200e213890658bde8",
"content_id": "01a6991b1b3e518231b220c955d0f9dd0a0c72f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 273,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 10,
"path": "/Car_Pooling/home/views.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.contrib.auth.models import User\n\nfrom .models import Cities, Ride, OfferRide\n# Create your views here.\n\n\ndef index(request):\n cities = Cities.objects.all()\n return render(request, \"home/index.html\", {\"cities\": cities})\n"
},
{
"alpha_fraction": 0.7133182883262634,
"alphanum_fraction": 0.727990984916687,
"avg_line_length": 31.851852416992188,
"blob_id": "407409ba2505749fe1d3a1a9942fb9500f130503",
"content_id": "6cb3676c024bd84de6abfb32b646a9342617eeaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 886,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 27,
"path": "/Car_Pooling/home/models.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import User\nfrom django.db import models\n\n# # Create your models here.\n# class UserProfile(models.Models):\n# user = models.OneToOneField(User, on_delete=models.CASCADE)\n# # picture = models.TextField(null=True, blank=True)\n\n\nclass Cities(models.Model):\n city = models.CharField(max_length=15)\n code = models.CharField(max_length=4)\n\n\nclass Ride(models.Model):\n user_name = models.CharField(default=\"\", max_length=50)\n origin = models.CharField(max_length=50)\n destination = models.CharField(max_length=50)\n journey_date = models.DateTimeField(auto_now=False, auto_now_add=False)\n seat_available = models.IntegerField()\n\n\nclass OfferRide(Ride):\n origin_location = models.CharField(max_length=50)\n destination_location = models.CharField(max_length=50)\n contact = models.IntegerField()\n fare = models.IntegerField()"
},
{
"alpha_fraction": 0.6859296560287476,
"alphanum_fraction": 0.6884422302246094,
"avg_line_length": 25.600000381469727,
"blob_id": "7eb42ff558872a25102e847a655656d3edc0158a",
"content_id": "14ad1b11dee6cae7bb9aa5a6534e5929febaa7d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 398,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 15,
"path": "/Hello_World/hello/views.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.http import HttpResponse\n\n# Create your views here.\ndef index(req): #req -> request\n return render(req, \"hello\\index.html\")\n\ndef name(req):\n return HttpResponse(\"Hello, SUM4N!\")\n\ndef greet(req, name):\n return render(req,\"hello\\greet.html\",\n {\n \"name\": name.capitalize(),\n }) # passing some parameters to the link as name in dict"
},
{
"alpha_fraction": 0.6608767509460449,
"alphanum_fraction": 0.6625310182571411,
"avg_line_length": 27.785715103149414,
"blob_id": "7426077b58aa9b47d9b7a80868a779912da2ce70",
"content_id": "0f2094a1ffc20db9fdef2e84aa4d769ea366e363",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1209,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 42,
"path": "/Hello_World/static/connect/js/room.js",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "//room.html\nconst roomName = JSON.parse(document.getElementById(\"room-name\").textContent);\n\n// if we use this websocket everything gets erased on reload\n// const chatSocket = new WebSocket(\n// \"ws://\" + window.location.host + \"/ws/chat/\" + roomName + \"/\"\n// );\n\n//Instead use this\nconst chatSocket = new ReconnectingWebSocket(\n \"ws://\" + window.location.host + \"/ws/chat/\" + roomName + \"/\"\n);\n\nchatSocket.onmessage = (e) => {\n const data = JSON.parse(e.data);\n console.log(data);\n document.querySelector(\"#chat-log\").value += data.message + \"\\n\";\n};\n\nchatSocket.onclose = (e) => {\n console.error(\"Something went wrong in our side :(\");\n};\n\ndocument.querySelector(\"#chat-message-input\").focus();\ndocument.querySelector(\"#chat-message-input\").onkeyup = (e) => {\n if (e.keyCode == 13) {\n //it represents enter key\n document.querySelector(\"#chat-message-submit\").click();\n }\n};\n\ndocument.querySelector(\"#chat-message-submit\").onclick = (e) => {\n const messageInputDom = document.querySelector(\"#chat-message-input\");\n const message = messageInputDom.value;\n chatSocket.send(\n JSON.stringify({\n message: message,\n command: \"new_message\",\n })\n );\n messageInputDom.value = \"\";\n};\n"
},
{
"alpha_fraction": 0.6538461446762085,
"alphanum_fraction": 0.6538461446762085,
"avg_line_length": 31.117647171020508,
"blob_id": "d6730ac99cb602ff4db0ec94eb20ab92b730b186",
"content_id": "b5eb0a80a507b5b39dd6f2acc0a88bb71d60eca6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1092,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 34,
"path": "/Hello_World/users/views.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth import authenticate, login, logout\nfrom django.http.response import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom .models import *\n# Create your views here.\n\n\ndef index(req):\n if not req.user.is_authenticated:\n return HttpResponseRedirect(reverse(\"users:login_view\"))\n else:\n return render(req, \"users/user.html\")\n\n\ndef login_view(req):\n if req.method == \"POST\":\n username = req.POST[\"username\"]\n password = req.POST[\"password\"]\n user = authenticate(req, username=username, password=password)\n if user is not None:\n login(req, user)\n return HttpResponseRedirect(reverse(\"users:index\"))\n else:\n return render(req, \"users/login.html\",\n {\"message\": \"Invalid Credentials\"})\n return render(req, \"users/login.html\")\n\n\ndef logout_view(req):\n logout(req)\n return render(req, \"users/login.html\",\n {\"message\": \"Logged Out Successfully\"})\n"
},
{
"alpha_fraction": 0.6499999761581421,
"alphanum_fraction": 0.6499999761581421,
"avg_line_length": 32.33333206176758,
"blob_id": "d4cdd715021296ab0f3228e6e41db251a6cf0cc2",
"content_id": "db2360b13db80b5fc4635ace2c721f8103a4c152",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1000,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 30,
"path": "/Hello_World/flights/views.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom .models import *\n# Create your views here.\n\n\ndef index(req):\n return render(req, \"flights/index.html\", {\"flights\": Flight.objects.all()})\n\n\ndef flight(req, flight_id):\n flight = Flight.objects.get(id=flight_id)\n return render(\n req, \"flights/flight.html\", {\n \"flight\": flight,\n \"passengers\": flight.passengers.all(),\n \"non_passengers\": Passenger.objects.exclude(flights=flight).all()\n })\n\n\ndef bookFlight(req, flight_id):\n if req.method == \"POST\":\n flight = Flight.objects.get(id=flight_id)\n passenger = Passenger.objects.get(\n pk=int(req.POST[\"passenger\"])\n ) #we can use pk or id same thing as id is primary key but pk is more appropriate so from nxt pk\n passenger.flights.add(flight)\n return HttpResponseRedirect(\n reverse(\"flights:flight\", args=(flight.id, )))\n"
},
{
"alpha_fraction": 0.5601118206977844,
"alphanum_fraction": 0.5657036304473877,
"avg_line_length": 27.236841201782227,
"blob_id": "b32d947c81929cf227143e1c196fecda694403ab",
"content_id": "90e58736861e6527e56c72d92fde66e87f4577ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1073,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 38,
"path": "/Hello_World/modelBasics/models.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.db.models.aggregates import Max\nfrom django.db.models.deletion import CASCADE\n\n# Create your models here.\n\ngender_choices = [\n ('M', 'Male'),\n ('F', 'Female'),\n ('O', 'Other'),\n]\n\n\nclass Person(models.Model):\n gender_choices = gender_choices\n first_name = models.CharField(max_length=15)\n last_name = models.CharField(max_length=15)\n gender = models.CharField(max_length=1,\n choices=gender_choices,\n help_text=\"Select your gender\")\n\n def __str__(self):\n return self.first_name\n\n\nclass Employee(models.Model):\n dept_choices = [\n ('FS', \"Full Stack\"),\n ('FE', 'Front End'),\n ('BE', 'Backend'),\n ]\n profile = models.ForeignKey('Person', on_delete=models.CASCADE)\n department = models.CharField(max_length=2,\n choices=dept_choices,\n default=\"department\")\n\n def __str__(self):\n return self.profile.first_name + '(' + self.department + ')'\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 27,
"blob_id": "5a3f0b1acd106c85c402525b473d3c902eee67c8",
"content_id": "1f6fa383debdd6d1581d79d4e135ef7bd9391185",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 252,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 9,
"path": "/Hello_World/users/urls.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . import views\n\napp_name = \"users\"\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login_view\", views.login_view, name=\"login_view\"),\n path(\"logout_view\", views.logout_view, name=\"logout_view\")\n]\n"
},
{
"alpha_fraction": 0.5325077176094055,
"alphanum_fraction": 0.548606812953949,
"avg_line_length": 35.70454406738281,
"blob_id": "ee3fc4a12ee4cab556435916cfae3b4e3a563af5",
"content_id": "028b682de37f3d79cc4c3c72001beb578dd1bdb3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1615,
"license_type": "no_license",
"max_line_length": 184,
"num_lines": 44,
"path": "/Car_Pooling/home/migrations/0001_initial.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "# Generated by Django 3.2.5 on 2021-07-14 14:11\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Cities',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('city', models.CharField(max_length=15)),\n ('code', models.CharField(max_length=4)),\n ],\n ),\n migrations.CreateModel(\n name='Ride',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('origin', models.CharField(max_length=50)),\n ('destination', models.CharField(max_length=50)),\n ('journey_date', models.DateField()),\n ('seat_available', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='OfferRide',\n fields=[\n ('ride_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='home.ride')),\n ('origin_location', models.CharField(max_length=50)),\n ('destination_location', models.CharField(max_length=50)),\n ('contact', models.IntegerField()),\n ('fare', models.IntegerField()),\n ],\n bases=('home.ride',),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6993007063865662,
"alphanum_fraction": 0.6993007063865662,
"avg_line_length": 19.428571701049805,
"blob_id": "eb674b9e86da093550cf5eb8cbe6e2a71f272278",
"content_id": "19d24b0be528ceedab4cf182272d3fb81863acc6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 143,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 7,
"path": "/Car_Pooling/createride/urls.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . import views\n\napp_name = \"createride\"\nurlpatterns = [\n path('', views.createride, name=\"createride\"),\n]\n"
},
{
"alpha_fraction": 0.6701030731201172,
"alphanum_fraction": 0.6701030731201172,
"avg_line_length": 23.25,
"blob_id": "adbd3686cda8138df064664d59bd75eb7c0f9bb8",
"content_id": "bfb8759d9e978b3e6d1e54236c44e93a3672b8a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 194,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 8,
"path": "/Car_Pooling/getride/urls.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . import views\n\napp_name = \"getride\"\nurlpatterns = [\n path(\"\", views.getRide, name=\"getRide\"),\n path(\"chat/<str:queryparams>\", views.chat, name=\"chat\")\n]\n"
},
{
"alpha_fraction": 0.650943398475647,
"alphanum_fraction": 0.6521226167678833,
"avg_line_length": 28.241378784179688,
"blob_id": "8c1462771135957ce77d6f9871d6005fef743afb",
"content_id": "ff1c84dc97bd77739e2a30e7f57a09be96222a6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 848,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 29,
"path": "/Hello_World/connect/views.py",
"repo_name": "Suman2023/Lets-Django",
"src_encoding": "UTF-8",
"text": "from connect.models import Room\nimport json\nfrom django.shortcuts import render\nfrom django.utils.safestring import mark_safe\nfrom django.contrib.auth.decorators import login_required\n\n\n# Create your views here.\ndef index(request):\n return render(request, 'connect/index.html')\n\n\n@login_required\ndef room(request, room_name):\n try:\n room = Room.objects.create(\n group_admin=request.user,\n group_name=room_name,\n )\n except:\n room = Room.objects.filter(group_name=room_name)[0]\n print(\"in except mode\", room)\n\n return render(\n request, 'connect/room.html', {\n 'room_name_json': mark_safe(json.dumps(room_name)),\n 'username': mark_safe(json.dumps(request.user.username)),\n 'group_admin': mark_safe(json.dumps(room.group_admin.username)),\n })\n"
}
] | 28 |
bandyp/polysolver | https://github.com/bandyp/polysolver | 7a5241d3c4b3a2ffa9f83acb7ade0b0abe823dd5 | 10923d9647d5512c9f4d09411f9e5239eaec7b0f | 21465f233aafb11a1871d4304d9b67ef37ae5f4f | refs/heads/master | 2022-10-17T00:31:42.630244 | 2020-06-17T06:38:33 | 2020-06-17T06:38:33 | 272,725,410 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6694678068161011,
"alphanum_fraction": 0.6694678068161011,
"avg_line_length": 24.571428298950195,
"blob_id": "aae3ae39944f89e403aec450dad44b903491b489",
"content_id": "12109bd5536c308f78040e383b41e28b4e64611e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 357,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 14,
"path": "/flaskpoly.py",
"repo_name": "bandyp/polysolver",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template\nfrom poly import x, y, coeff, mymodel, rootsp, myline, fig\n\napp = Flask(__name__)\n\n\[email protected]('/')\[email protected]('/home')\ndef polynomial_solver():\n return render_template('home.html', x=x, y=y, coeff=coeff, mymodel=mymodel, rootsp=rootsp, myline=myline, fig=fig)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)"
},
{
"alpha_fraction": 0.7535121440887451,
"alphanum_fraction": 0.7560663819313049,
"avg_line_length": 35.395347595214844,
"blob_id": "00300f44406a246ee5c714783da74bd543e03d87",
"content_id": "3b61ab599d4e0b3fa8e6e03f090a2c72dfd7c217",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1566,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 43,
"path": "/README.md",
"repo_name": "bandyp/polysolver",
"src_encoding": "UTF-8",
"text": "# PolySolver\nSimple app to plot selected x and y coordinates with a bit of calculus added in for good measure. \n\n## UX\nI wanted to keep it very simple and not take away from the objective of the task. Simple colours and easy to read results. \n\n### Users\nExpected users could be anyone who wants to find the equation for a polynomial, or calculate to 0.\n\n## Features\n* Shows the coordinates and task answers on the top row, followed by a scatter plot graph with line of best fit.\n\n## Technology Used\n* HTML - for the sytructure of the content of the page\n* CSS3 - for a little styling that I ddin't use bootstrap for\n* Bootstrap - framework for responsiveness and the rest of the styling - as requested in the task\n* Git - for version control\n* GitHub - to host the repository\n* Pycharm - as the IDE\n* Python - to do the maths\n* Flask - to help the front and backend work together\n* Sqlite3 - to store the mock database\n\n## Deployment\nI used Github for the deployment.\n\nThe process involved:\n* Hosting a git repository on GitHub.\n* On the Github repository go to the settings page and GitHub Pages section.\n* Change source to master branch.\n* Link is provided to page.\n\nTo deploy your own version:\n* Visit the repository [here](https://github.com/bandyp/polysolver)\n* Click 'clone' to copy\n* Open your IDE\n* Type 'git clone https://github.com/bandyp/polysolver.git' in the terminal window of the root directory\n\n## Credits\n\n### Acknowledgments\n* At certain points I sought the help of stack overflow and youtube\n* W3Schools - for various bits of Python and SQL code\n\n"
}
] | 2 |
ashishgopalhattimare/SPOJ_SUMMARIZER | https://github.com/ashishgopalhattimare/SPOJ_SUMMARIZER | 4b696147e53a11ca5765fb5a49310648e8eb290b | 217ac09f0aab27651d5a801eb484b261811d0778 | f9577adf91f030a24ee181e97a9cec0829f64914 | refs/heads/master | 2020-05-01T02:31:31.110193 | 2019-03-18T15:02:45 | 2019-03-18T15:02:45 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6403893232345581,
"alphanum_fraction": 0.6496350169181824,
"avg_line_length": 22.895349502563477,
"blob_id": "397a5bf2f1d7cd71e8e2867a895882f98dd10b93",
"content_id": "c1c9f72d160e353dfed7242090c43cd0584c54b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2055,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 86,
"path": "/script.py",
"repo_name": "ashishgopalhattimare/SPOJ_SUMMARIZER",
"src_encoding": "UTF-8",
"text": "import requests\nimport os\nimport sys\nimport pandas as pd\nfrom tabulate import tabulate\nfrom pyfiglet import Figlet\nfrom bs4 import BeautifulSoup\n\ndef progbar(curr, total, full_progbar):\n frac = curr/total\n filled_progbar = round(frac*full_progbar)\n print('\\r', '#'*filled_progbar + '-'*(full_progbar-filled_progbar), '[{:>7.2%}]'.format(frac), end='')\n\nos.system('clear')\n\nf = Figlet(font='slant')\nprint(f.renderText('SPOJ SUMMARIZER'))\n\nusername = input(\"What is your spoj handle? \")\nprint()\npage = requests.get(\"https://www.spoj.com/users/\"+username)\n\n\nsoup = BeautifulSoup(page.content, 'html.parser')\n\nprofile = soup.find(id='user-profile-left')\nif profile is None:\n print(\"No such user exists\")\n exit()\nprofile_stats = profile.find_all('p')\nstats=[]\nfor i in profile_stats:\n stats.append(i.get_text())\nprint('Welcome ',username)\nprint(stats[3])\nprint(stats[2])\nprint(stats[1])\nprint(stats[0])\nprint('Analyzing your performance...')\n\nproblem_data = soup.find(class_='table-condensed')\nproblems = problem_data.find_all('a')\nproblem_codes = []\nfor i in problems:\n problem_codes.append(i.get_text())\nmaxp=len(problem_codes)\ncc=1\ntopics={}\ntopics['#untagged']=0\n\nproblem_codes = [x for x in problem_codes if x!='']\n\nfor code in problem_codes:\n question = requests.get('https://www.spoj.com/problems/'+code)\n soup = BeautifulSoup(question.content, 'html.parser')\n tag_area = soup.find(id='problem-tags')\n tag_data = tag_area.find_all('a')\n tags=[]\n for e in tag_data:\n tags.append(e.get_text())\n for tag in tags:\n if tag in topics:\n topics[tag]+=1\n else:\n topics[tag]=1\n if len(tags)==0:\n topics['#untagged']+=1\n progbar(cc,maxp,70)\n cc+=1\n sys.stdout.flush()\nprogbar(maxp,maxp,70)\nprint()\ntag_d = []\ncount_d = []\nfor tag,count in topics.items():\n tag_d.append(tag[1:])\n count_d.append(count)\n\ndf = pd.DataFrame(\n {\n 'Topics':tag_d,\n 'No of problems solved':count_d\n }\n)\n\nprint(tabulate(df, headers='keys', tablefmt='psql'))\n"
}
] | 1 |
ajkavanagh/python-jws | https://github.com/ajkavanagh/python-jws | 4adebace1f2b10d677dbf620273c3298c2424ecb | 128a4172fe4a4e37877af6757c24e9885931bf22 | 73675886e55c513f5022cb89bb19acc251f0cdd4 | refs/heads/master | 2021-01-19T07:24:57.358744 | 2014-03-25T11:14:21 | 2014-03-25T11:14:21 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6260762810707092,
"alphanum_fraction": 0.6703566908836365,
"avg_line_length": 25.225807189941406,
"blob_id": "a530252fbf8fd34b64ccdf653ce48be701f49b4a",
"content_id": "0a3ffb1d2cf7d194d38deb94c1f8083610f2fbcc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 813,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 31,
"path": "/jws/utils.py",
"repo_name": "ajkavanagh/python-jws",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\n\nimport base64\nimport json\n\nimport sys\nif sys.version < '3':\n text_type = unicode\n binary_type = str\nelse:\n text_type = str\n binary_type = bytes\n\ndef to_bytes_2and3(s):\n if type(s) != binary_type:\n s = bytes(s, 'UTF-8')\n return s\n\ndef base64url_decode(input):\n input = to_bytes_2and3(input)\n input += b'=' * (4 - (len(input) % 4))\n return base64.urlsafe_b64decode(input)\ndef base64url_encode(input):\n return base64.urlsafe_b64encode(to_bytes_2and3(input)).replace(b'=', b'')\n\ndef to_json(a): return json.dumps(a)\ndef from_json(a): return json.loads(a)\ndef to_base64(a): return base64url_encode(a)\ndef from_base64(a): return base64url_decode(a)\ndef encode(a): return to_base64(to_json(a))\ndef decode(a): return from_json(from_base64(a))\n"
}
] | 1 |
Jalonzpa/education-helpers | https://github.com/Jalonzpa/education-helpers | 37a47c21655d438d9f413fc2bc076ca6f4d3e88d | 02cbe23b23ec90919ea1afda2dac32b5b7635b26 | 050179819275344a99ce526fe6bd33b4aa584813 | refs/heads/master | 2020-03-21T14:25:03.293532 | 2018-06-25T22:25:59 | 2018-06-25T22:25:59 | 138,656,104 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8104575276374817,
"alphanum_fraction": 0.8104575276374817,
"avg_line_length": 75.5,
"blob_id": "6db0dc056b9b52be57c6d2cd6455528d82658859",
"content_id": "e06363db1d6de06cf9a19a1e4816f0c6416e2f8d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 153,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Jalonzpa/education-helpers",
"src_encoding": "UTF-8",
"text": "# education-helpers\nThis is a little collection of hastily-created scripts that will make some things at least a tiny bit easier for your academic life.\n"
},
{
"alpha_fraction": 0.7445651888847351,
"alphanum_fraction": 0.7445651888847351,
"avg_line_length": 51.57143020629883,
"blob_id": "e5f808514e71aef2a2091261a880a9406895c17c",
"content_id": "7daaea88a77c0519edaf10bbcd9846d9f56b0846",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 736,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 14,
"path": "/dictionary.py",
"repo_name": "Jalonzpa/education-helpers",
"src_encoding": "UTF-8",
"text": "# Make sure to first install PyDictionary with \"pip install PyDictionary\"\n# Ever have a huge list of words to define? Don't feel like using Google and constantly waste precious time by deleting the\n# old word and typing in the new one? Well, with this simple, barely working script, you don't have to delete that old word!\n# Now you can just type the new one in and instantly get your result! Sure, it's not a lot of time saved, but it all adds up,\n# right?\n\nfrom PyDictionary import PyDictionary # PyDictionary is a dictionary module that's easy to use with Python\ndictionary=PyDictionary()\n\nwhile True:\n print(\"What do you want to know the definition of?\\n\")\n word = input()\n print(dictionary.meaning(word))\n word = \"\"\n"
}
] | 2 |
alex-de-large/freecad | https://github.com/alex-de-large/freecad | 0d795729feb3a7d2fdabc40539268dafdceec73f | b7c6ba3ca0c880c2cc223af9b3d09a87c0031cbf | 77adbb00e0a903358569e75e567dd3c7643779ee | refs/heads/master | 2023-03-06T20:13:54.801263 | 2021-02-18T18:10:23 | 2021-02-18T18:10:23 | 340,117,281 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5292096138000488,
"alphanum_fraction": 0.5594501495361328,
"avg_line_length": 28.100000381469727,
"blob_id": "ed320a22ecfbc4f2581d2f9c25504cf3fbc5fa31",
"content_id": "b3f4505fed3afcc4059038628ded0baad59329d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1455,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 50,
"path": "/fractals-test.py",
"repo_name": "alex-de-large/freecad",
"src_encoding": "UTF-8",
"text": "import FreeCAD\nfrom FreeCAD import Vector, Placement, Rotation\nimport Part\n\n\ndef pyramid(side: int, height: int, s: Vector):\n doc = FreeCAD.activeDocument()\n v = doc.addObject(\"Part::Vertex\", \"Vertex\")\n offset = side / 2\n v.Placement = Placement(Vector(s.x + offset, s.y + offset, s.z + height), Rotation(0, 0, 1))\n\n vec1 = Vector(s.x, s.y, s.z)\n vec2 = Vector(s.x, s.y + side, s.z)\n vec3 = Vector(s.x + side, s.y + side, s.z)\n vec4 = Vector(s.x + side, s.y, s.z)\n\n Part.show(Part.makePolygon([vec1, vec2, vec3, vec4, vec1]))\n obj = doc.Objects[-1]\n\n loft = doc.addObject(\"Part::Loft\", \"Pyramid\")\n loft.Sections = [v, obj]\n loft.Solid = True\n loft.Ruled = True\n loft.Closed = False\n\n\ndef sierpinski_3d(n: int):\n\n def sierpinski_supp(i, side, height, s: Vector):\n if i == 0:\n pyramid(side, height, s)\n return\n\n new_side = side / 2\n new_height = height / 2\n\n vec1 = Vector(s.x, s.y, s.z)\n vec2 = Vector(s.x, s.y + new_side, s.z)\n vec3 = Vector(s.x + new_side, s.y + new_side, s.z)\n vec4 = Vector(s.x + new_side, s.y, s.z)\n vec5 = Vector(s.x + new_side / 2, s.y + new_side / 2, s.z + new_height)\n vecs = [vec1, vec2, vec3, vec4, vec5]\n for vec in vecs:\n sierpinski_supp(i - 1, new_side, new_height, vec)\n\n sierpinski_supp(n, 1000, 1000, Vector(0, 0, 0))\n\n\nif __name__ == \"__main__\":\n sierpinski_3d(5)\n"
}
] | 1 |
nathanhelms0910/LoadoutManager | https://github.com/nathanhelms0910/LoadoutManager | 522886da4c69d03a7fcd66c7c4f4a94db683f7a2 | c8ca9c993aa75eea5d83ab60eb672ce233b930d0 | 386fb746c7ff07970abf434ed441cf465aac9e87 | refs/heads/main | 2023-07-16T07:57:06.255975 | 2021-08-13T17:28:44 | 2021-08-13T17:28:44 | 395,736,777 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6447409987449646,
"alphanum_fraction": 0.6483135223388672,
"avg_line_length": 38.987396240234375,
"blob_id": "bd06ecdb72658bcb4101449fa320603dee467765",
"content_id": "ec39e4950a4c60555cfc7d30293cf2fe9feaa430",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9517,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 238,
"path": "/loadoutmngr.py",
"repo_name": "nathanhelms0910/LoadoutManager",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python3\n\nimport sqlite3 as sql\nfrom tkinter import *\nimport sys\n\nclass ManageApp(Tk):\n def __init__(self, *args, **kwargs):\n Tk.__init__(self, *args, **kwargs)\n container = Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n\n self.frames = {}\n\n self.frames[\"FrontPage\"] = FrontPage(parent=container, controller=self)\n self.frames[\"FrontPage\"].grid(row=0, column=0, sticky=\"nsew\")\n self.frames[\"HelpPage\"] = HelpPage(parent=container, controller=self)\n self.frames[\"HelpPage\"].grid(row=0, column=0, sticky=\"nsew\")\n self.frames[\"ViewArsenals\"] = ViewArsenals(parent=container, controller=self)\n self.frames[\"ViewArsenals\"].grid(row=0, column=0, sticky=\"nsew\")\n self.frames[\"ViewArmory\"] = ViewArmory(parent=container, controller=self)\n self.frames[\"ViewArmory\"].grid(row=0, column=0, sticky=\"nsew\")\n self.frames[\"AddArsenal\"] = AddArsenal(parent=container, controller=self)\n self.frames[\"AddArsenal\"].grid(row=0, column=0, sticky=\"nsew\")\n self.frames[\"AddArmory\"] = AddArmory(parent=container, controller=self)\n self.frames[\"AddArmory\"].grid(row=0, column=0, sticky=\"nsew\")\n\n self.show_frame(\"FrontPage\")\n \n \n\n def show_frame(self, page):\n frame = self.frames[page]\n frame.tkraise()\n menu = frame.menubar(self)\n self.configure(menu=menu)\n\n\nclass FrontPage(Frame):\n\n def menubar(self, controller):\n menu = Menu(controller)\n fileMenu = Menu(menu)\n fileMenu.add_command(label=\"Help\", command=lambda:controller.show_frame(\"HelpPage\"))\n fileMenu.add_command(label=\"Quit\", command=self.terminate)\n menu.add_cascade(label=\"File\", menu=fileMenu)\n\n viewMenu = Menu(menu)\n viewMenu.add_command(label=\"Arsenals\", command=lambda:controller.show_frame(\"ViewArsenals\"))\n viewMenu.add_command(label=\"Armory\", command=lambda:controller.show_frame(\"ViewArmory\"))\n menu.add_cascade(label=\"View\", menu=viewMenu)\n\n addMenu = Menu(menu)\n addMenu.add_command(label=\"Arsenal\", command=lambda:controller.show_frame(\"AddArsenal\"))\n addMenu.add_command(label=\"Armory\", command=lambda:controller.show_frame(\"AddArmory\"))\n menu.add_cascade(label=\"Add\", menu=addMenu)\n return menu\n\n def __init__(self, parent, controller):\n Frame.__init__(self, parent)\n self.controller = controller\n \n #self.title(\"Loadout Manager\")\n\n def terminate(self):\n print(\"Terminating Program...\")\n exit()\n\nclass HelpPage(Frame):\n def menubar(self, controller):\n menu = Menu(controller)\n fileMenu = Menu(menu)\n fileMenu.add_command(label=\"Help\", command=lambda:controller.show_frame(\"HelpPage\"))\n fileMenu.add_command(label=\"Quit\", command=self.terminate)\n menu.add_cascade(label=\"File\", menu=fileMenu)\n\n viewMenu = Menu(menu)\n viewMenu.add_command(label=\"Arsenals\", command=lambda:controller.show_frame(\"ViewArsenals\"))\n viewMenu.add_command(label=\"Armory\", command=lambda:controller.show_frame(\"ViewArmory\"))\n menu.add_cascade(label=\"View\", menu=viewMenu)\n\n addMenu = Menu(menu)\n addMenu.add_command(label=\"Arsenal\", command=lambda:controller.show_frame(\"AddArsenal\"))\n addMenu.add_command(label=\"Armory\", command=lambda:controller.show_frame(\"AddArmory\"))\n menu.add_cascade(label=\"Add\", menu=addMenu)\n return menu\n\n def __init__(self, parent, controller):\n Frame.__init__(self, parent)\n self.controller = controller\n \n label = Label(self, text=\"Help Page\")\n label.pack(side=\"top\", fill=\"x\", pady=10)\n\n homeButton = Button(self, text=\"Return Home\", command=lambda:controller.show_frame(\"FrontPage\"))\n homeButton.pack()\n \n def terminate(self):\n print(\"Terminating Program...\")\n exit()\n\nclass ViewArsenals(Frame):\n def menubar(self, controller):\n menu = Menu(controller)\n fileMenu = Menu(menu)\n fileMenu.add_command(label=\"Help\", command=lambda:controller.show_frame(\"HelpPage\"))\n fileMenu.add_command(label=\"Quit\", command=self.terminate)\n menu.add_cascade(label=\"File\", menu=fileMenu)\n\n viewMenu = Menu(menu)\n viewMenu.add_command(label=\"Arsenals\", command=lambda:controller.show_frame(\"ViewArsenals\"))\n viewMenu.add_command(label=\"Armory\", command=lambda:controller.show_frame(\"ViewArmory\"))\n menu.add_cascade(label=\"View\", menu=viewMenu)\n\n addMenu = Menu(menu)\n addMenu.add_command(label=\"Arsenal\", command=lambda:controller.show_frame(\"AddArsenal\"))\n addMenu.add_command(label=\"Armory\", command=lambda:controller.show_frame(\"AddArmory\"))\n menu.add_cascade(label=\"Add\", menu=addMenu)\n return menu\n\n def __init__(self, parent, controller):\n Frame.__init__(self, parent)\n self.controller = controller\n \n label = Label(self, text=\"All Arsenals\")\n label.pack(side=\"top\", fill=\"x\", pady=10)\n\n homeButton = Button(self, text=\"Return Home\", command=lambda:controller.show_frame(\"FrontPage\"))\n homeButton.pack()\n \n def terminate(self):\n print(\"Terminating Program...\")\n exit()\n\nclass ViewArmory(Frame):\n def menubar(self, controller):\n menu = Menu(controller)\n fileMenu = Menu(menu)\n fileMenu.add_command(label=\"Help\", command=lambda:controller.show_frame(\"HelpPage\"))\n fileMenu.add_command(label=\"Quit\", command=self.terminate)\n menu.add_cascade(label=\"File\", menu=fileMenu)\n\n viewMenu = Menu(menu)\n viewMenu.add_command(label=\"Arsenals\", command=lambda:controller.show_frame(\"ViewArsenals\"))\n viewMenu.add_command(label=\"Armory\", command=lambda:controller.show_frame(\"ViewArmory\"))\n menu.add_cascade(label=\"View\", menu=viewMenu)\n\n addMenu = Menu(menu)\n addMenu.add_command(label=\"Arsenal\", command=lambda:controller.show_frame(\"AddArsenal\"))\n addMenu.add_command(label=\"Armory\", command=lambda:controller.show_frame(\"AddArmory\"))\n menu.add_cascade(label=\"Add\", menu=addMenu)\n return menu\n\n def __init__(self, parent, controller):\n Frame.__init__(self, parent)\n self.controller = controller\n \n label = Label(self, text=\"All Armory\")\n label.pack(side=\"top\", fill=\"x\", pady=10)\n\n homeButton = Button(self, text=\"Return Home\", command=lambda:controller.show_frame(\"FrontPage\"))\n homeButton.pack()\n \n def terminate(self):\n print(\"Terminating Program...\")\n exit()\n\nclass AddArsenal(Frame):\n def menubar(self, controller):\n menu = Menu(controller)\n fileMenu = Menu(menu)\n fileMenu.add_command(label=\"Help\", command=lambda:controller.show_frame(\"HelpPage\"))\n fileMenu.add_command(label=\"Quit\", command=self.terminate)\n menu.add_cascade(label=\"File\", menu=fileMenu)\n\n viewMenu = Menu(menu)\n viewMenu.add_command(label=\"Arsenals\", command=lambda:controller.show_frame(\"ViewArsenals\"))\n viewMenu.add_command(label=\"Armory\", command=lambda:controller.show_frame(\"ViewArmory\"))\n menu.add_cascade(label=\"View\", menu=viewMenu)\n\n addMenu = Menu(menu)\n addMenu.add_command(label=\"Arsenal\", command=lambda:controller.show_frame(\"AddArsenal\"))\n addMenu.add_command(label=\"Armory\", command=lambda:controller.show_frame(\"AddArmory\"))\n menu.add_cascade(label=\"Add\", menu=addMenu)\n return menu\n\n def __init__(self, parent, controller):\n Frame.__init__(self, parent)\n self.controller = controller\n \n label = Label(self, text=\"Add Arsenal\")\n label.pack(side=\"top\", fill=\"x\", pady=10)\n\n homeButton = Button(self, text=\"Return Home\", command=lambda:controller.show_frame(\"FrontPage\"))\n homeButton.pack()\n \n def terminate(self):\n print(\"Terminating Program...\")\n exit()\n\nclass AddArmory(Frame):\n def menubar(self, controller):\n menu = Menu(controller)\n fileMenu = Menu(menu)\n fileMenu.add_command(label=\"Help\", command=lambda:controller.show_frame(\"HelpPage\"))\n fileMenu.add_command(label=\"Quit\", command=self.terminate)\n menu.add_cascade(label=\"File\", menu=fileMenu)\n\n viewMenu = Menu(menu)\n viewMenu.add_command(label=\"Arsenals\", command=lambda:controller.show_frame(\"ViewArsenals\"))\n viewMenu.add_command(label=\"Armory\", command=lambda:controller.show_frame(\"ViewArmory\"))\n menu.add_cascade(label=\"View\", menu=viewMenu)\n\n addMenu = Menu(menu)\n addMenu.add_command(label=\"Arsenal\", command=lambda:controller.show_frame(\"AddArsenal\"))\n addMenu.add_command(label=\"Armory\", command=lambda:controller.show_frame(\"AddArmory\"))\n menu.add_cascade(label=\"Add\", menu=addMenu)\n return menu\n\n def __init__(self, parent, controller):\n Frame.__init__(self, parent)\n self.controller = controller\n \n label = Label(self, text=\"Add Armory\")\n label.pack(side=\"top\", fill=\"x\", pady=10)\n\n homeButton = Button(self, text=\"Return Home\", command=lambda:controller.show_frame(\"FrontPage\"))\n homeButton.pack()\n \n def terminate(self):\n print(\"Terminating Program...\")\n exit()\n\napp = ManageApp()\napp.geometry(\"400x300\")\napp.mainloop()\n"
},
{
"alpha_fraction": 0.7555555701255798,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 29,
"blob_id": "f27afbb0fb5b8485bb9d73e32b368231ff3f3c27",
"content_id": "209c333d6f0ab1e97320bed2162dcd8ac6c77659",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 90,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 3,
"path": "/README.md",
"repo_name": "nathanhelms0910/LoadoutManager",
"src_encoding": "UTF-8",
"text": "Loadout Manager in progress for the game Destiny 2:\n\nUpdate 1.01: GUI Framework completed\n"
}
] | 2 |
logarithm27/Object_Store_Versioning | https://github.com/logarithm27/Object_Store_Versioning | 9e1dd248027731bebf7ff6c77e37203c063ce8c6 | 4826d45a9a3839bee1cbcec2876777fedf0e8dc1 | 04c48dd950fbe7b119feb4bbe0ccf1836393c384 | refs/heads/main | 2023-01-03T15:34:42.594839 | 2020-10-30T03:39:44 | 2020-10-30T03:39:44 | 308,130,690 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5675291419029236,
"alphanum_fraction": 0.5732157826423645,
"avg_line_length": 41.849998474121094,
"blob_id": "abf73142afcfab1fb4103302b09a2697da7f5d50",
"content_id": "51d0b51d5653ad9d4d7791541a879733a0f07dde",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7034,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 160,
"path": "/client.py",
"repo_name": "logarithm27/Object_Store_Versioning",
"src_encoding": "UTF-8",
"text": "import socket\r\nfrom menu import *\r\nimport pickle\r\nimport os\r\n\r\n''' I write some few comments here \r\nbecause I have already mentioned it in the server file\r\nsince the latter contains some symmetric functions '''\r\n\r\nADDR_PORT = (socket.gethostbyname(socket.gethostname()),1234)\r\nFORMAT = 'utf-8'\r\nHEADER_SIZE = 16\r\nQUIT_MESSAGE = 'quit'\r\nFILE_TRANSFER_ACTIVATED_MSG = \"File transfer Activated\"\r\nST_MODE_W_T = \"Storage mode with transfer activated \"\r\nST_MODE_WITHOUT_T = \"Storage mode without transfer activated \"\r\nSEND_CLIENT_TO_SERVER = \"PUT FILE : TRANSFER FROM CLIENT TO SERVER\"\r\nSEND_SERVER_TO_CLIENT = \"GET FILE : TRANSFER FROM SERVER TO CLIENT\"\r\nCONTINUE = \"CONTINUE\"\r\n\r\nclass Client:\r\n def __init__(self):\r\n # create client\r\n self.client_socket = socket.create_connection(ADDR_PORT)\r\n self.start_client()\r\n\r\n def start_client(self):\r\n self.menu = Menu()\r\n # select the storage mode\r\n self.storage_mode = self.menu.storage_mode()\r\n # to store the response got from the server\r\n self.resp = None\r\n # send the storage mode chose by user\r\n self.send_msg_to_server(self.storage_mode)\r\n self.resp = self.receive_ack_from_server()\r\n # print for debug\r\n print(self.resp)\r\n # checking paths if the given command is PUT or GET\r\n self.command_operation = self.path_checker(self.menu.printing())\r\n # send the command the the server\r\n self.send_msg_to_server(self.command_operation)\r\n # get response from server to know further steps to take\r\n self.resp = self.receive_ack_from_server()\r\n # if the server requested a file ( in case of put command ( with transfer mode ) )\r\n if self.resp == SEND_CLIENT_TO_SERVER:\r\n self.send_file_through_network(self.command_operation)\r\n print(self.receive_ack_from_server())\r\n # if the server will a file ( in case of get command ( with transfer mode ) )\r\n if self.resp == SEND_SERVER_TO_CLIENT:\r\n self.send_msg_to_server('')\r\n self.receive_file_from_server(self.command_operation.split(' ')[1])\r\n # if the input contains the command list\r\n elif self.command_operation.split(' ')[0] == \"list\":\r\n # since listing depends on whether the user have mentioned the name of the object or not\r\n # we call the listing method to do so\r\n self.listing(self.resp)\r\n # if the input contains the delete operation\r\n elif self.command_operation.split(' ')[0] == \"delete\":\r\n print(self.resp)\r\n else:\r\n print(self.resp)\r\n # demanding to user if he wants to quit or continue after the command have been achieved\r\n quit_or_repeat = self.menu.quit()\r\n # if chose to quit\r\n if quit_or_repeat:\r\n # send quit message to server\r\n self.send_msg_to_server(QUIT_MESSAGE)\r\n # close connection\r\n self.client_socket.close()\r\n # else, repeat\r\n elif not quit_or_repeat:\r\n self.send_msg_to_server(CONTINUE)\r\n self.start_client()\r\n\r\n # check if a given path is valid depending on the given command\r\n def path_checker(self,command):\r\n if command.split(' ')[0].lower() == \"get\":\r\n while (not os.path.exists(command.split(' ')[2]) or\r\n not os.path.isdir(command.split(' ')[2])):\r\n print(\"Invalid path, try again : \")\r\n command = input()\r\n if command.split(' ')[0].lower() == \"put\":\r\n while (not os.path.exists(command.split(' ')[2])):\r\n print(\"Invalid path, try again : \")\r\n command = input()\r\n return command\r\n\r\n # listing objects or version of objects\r\n def listing(self, listing_response):\r\n if listing_response == \"Object not found\":\r\n print(\"Object not found\")\r\n else :\r\n for element in listing_response:\r\n # if we have retrieved versions of objects\r\n if len(element) > 1:\r\n version = element[0]\r\n # element[1] is the content name and element[2] is the path\r\n content = list(\r\n map(lambda x, y: os.path.join(x, y), element[2].split(\";\")[0:-1], element[1].split(\";\")[0:-1]))\r\n print(f\"Version {str(version)} contains :\")\r\n for c in content:\r\n print(\"\\t\" + c)\r\n # if we have retrieved objects\r\n else:\r\n print(f\"Object {element[0]}\")\r\n\r\n def receive_ack_from_server(self):\r\n receiving_response = True\r\n response_length = 0\r\n i = 0\r\n while receiving_response:\r\n response_from_server = self.client_socket.recv(HEADER_SIZE)\r\n if i == 0 and response_from_server:\r\n response_length = int(response_from_server)\r\n i += 1\r\n full_response = pickle.loads(self.client_socket.recv(response_length))\r\n return full_response\r\n\r\n def send_msg_to_server(self, message):\r\n message_to_send = pickle.dumps(message)\r\n message_to_send = bytes(f'{len(message_to_send):<{HEADER_SIZE}}', FORMAT) + message_to_send\r\n self.client_socket.send(message_to_send)\r\n\r\n def receive_file_from_server(self, file_name):\r\n cwr = os.getcwd()\r\n receiving_response = True\r\n response_length = 0\r\n i = 0\r\n while receiving_response:\r\n receive_file_from_server = self.client_socket.recv(HEADER_SIZE)\r\n if i == 0 and receive_file_from_server:\r\n response_length = int(receive_file_from_server)\r\n i += 1\r\n full_response = pickle.loads(self.client_socket.recv(response_length))\r\n receiving_response = False\r\n path_given_in_get_command = self.command_operation.split(' ')[2]\r\n with open(os.path.join(str(path_given_in_get_command), file_name+\".txt\"), \"wb\") as f:\r\n print('writing...')\r\n for chunk in full_response:\r\n f.write(chunk)\r\n print('writing finished')\r\n print(f\"File received successfully via GET command on {str(os.path.join(str(path_given_in_get_command), file_name))}\")\r\n return str(os.path.join(str(cwr), file_name))\r\n\r\n def send_file_through_network(self,command):\r\n path = command.split(' ')[2]\r\n file_data = []\r\n with open(path, \"rb\") as f:\r\n print('reading ...')\r\n while True:\r\n binary_data_read = f.read(1024)\r\n file_data.append(binary_data_read)\r\n if not binary_data_read:\r\n break\r\n file_data_to_send = pickle.dumps(file_data)\r\n file_data_to_send = bytes(f'{len(file_data_to_send):<{HEADER_SIZE}}', FORMAT) + file_data_to_send\r\n print('reading finished')\r\n self.client_socket.send(file_data_to_send)\r\n\r\nc = Client()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5949996709823608,
"alphanum_fraction": 0.6003091335296631,
"avg_line_length": 50.762413024902344,
"blob_id": "899fd4b507227bdd0ad278382142f694461b29fc",
"content_id": "710ac2524ab768a8a96ae1137b31774ff9bc5eb4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14879,
"license_type": "permissive",
"max_line_length": 127,
"num_lines": 282,
"path": "/server.py",
"repo_name": "logarithm27/Object_Store_Versioning",
"src_encoding": "UTF-8",
"text": "import socket\r\nfrom operations import *\r\nimport threading\r\nimport pickle\r\n\r\n# get the host address by its hostname and set a port\r\nADDR_PORT = (socket.gethostbyname(socket.gethostname()), 1234)\r\n# used as flag to listening to clients\r\nLISTENING = True\r\n# the format of encoding when we receive/retrieve data with client\r\nFORMAT = 'utf-8'\r\n# the header size by which we know how many chunks of bytes we will send/receive\r\nHEADER_SIZE = 16\r\n# the quit message if the user decided to quit\r\nQUIT_MESSAGE = 'quit'\r\n# message to send to client if the client is about to send a file to the server\r\nSEND_CLIENT_TO_SERVER = \"PUT FILE : TRANSFER FROM CLIENT TO SERVER\"\r\n# same as the previous in inverse way\r\nSEND_SERVER_TO_CLIENT = \"GET FILE : TRANSFER FROM SERVER TO CLIENT\"\r\n# acknowledge message to send to client about which storage mode has been created\r\nST_MODE_W_T = \"Storage mode with transfer activated \"\r\nST_MODE_WITHOUT_T = \"Storage mode without transfer activated \"\r\n# continue message if the user wants to maintain connection with server\r\nCONTINUE = \"CONTINUE\"\r\n\r\nclass Server:\r\n def __init__(self):\r\n # init server\r\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.server.bind(ADDR_PORT)\r\n self.server.listen()\r\n # flag to test if we are already connected with a client\r\n self.cnt_with_client = False\r\n while LISTENING:\r\n # if not connected with a client, accept further connection with clients\r\n if not self.cnt_with_client:\r\n print(\"Waiting for a client to connect ...\")\r\n self.client_socket, self.client_address = self.server.accept()\r\n # to store the command received from the client user\r\n self.command = \"\"\r\n # initialise the operations engine\r\n self.op = Operations()\r\n # to store response received from client during the connection\r\n self.resp = None\r\n # to store the storage mode chosen by user\r\n self.transfer_mode = \"\"\r\n # to store the content/file name given in the put command (from the client)\r\n # will be used only in storage mode with transfer\r\n self.put_file_name = \"\"\r\n # to store the path where the file is saved in the server's local system\r\n # also it's used only in storage mode with transfer\r\n self.new_put_path = \"\"\r\n # to store the path where the user have chosen to save its get file\r\n # used in case of storage mode with transfer is activated\r\n self.client_get_path = \"\"\r\n # receive the first message from the client\r\n # it will be the storage mode chose by the user in the client console\r\n self.resp = self.receive_response_from_client()\r\n # for debug (printing the storage mode)\r\n print(self.resp)\r\n # if the storage mode is 2\r\n if self.resp.__eq__(\"2\"):\r\n # set the transfer mode to 'storage with transfer'\r\n self.transfer_mode = \"2\"\r\n # set the transfer mode in the operation's engine to 2\r\n # because its output will depends in this variable for some commands\r\n self.op.transfer_mode = 2\r\n # send to client that the server is acknowledged about the transfer mode\r\n self.send_acknowledge(ST_MODE_W_T)\r\n # if the storage mode is 1 ( do the same as we have done with storage mode 2)\r\n elif self.resp.__eq__(\"1\"):\r\n self.transfer_mode = \"1\"\r\n # tell the operations engine that we will operate remotely\r\n # in order to tell to the server that it have access to the clients' contents\r\n self.op.remote_or_local = \"from_server\"\r\n self.send_acknowledge(ST_MODE_WITHOUT_T)\r\n # get back the command from the client\r\n self.resp = self.receive_response_from_client()\r\n # convert the command to a list to distinguish the command itself and its arguments\r\n self.command = self.resp.split(' ')\r\n # perform the operation requested by the client\r\n self.perform_operation_with_transfer_mode()\r\n self.resp = self.receive_response_from_client()\r\n if self.resp.__eq__(QUIT_MESSAGE):\r\n print('Disconnected from the client ')\r\n self.cnt_with_client = False\r\n self.client_socket.close()\r\n if self.resp.__eq__(CONTINUE):\r\n self.cnt_with_client = True\r\n\r\n # self.send_receive(client_socket,client_address)\r\n def perform_operation_with_transfer_mode(self):\r\n # if the command is a \"put\" and the storage is with transfer\r\n if self.command[0] == \"put\":\r\n if self.transfer_mode == \"2\":\r\n # get the put file name given by the client in the input\r\n # the path is always the third element in the after splitting the command into a list\r\n self.put_file_name = ntpath.split(self.command[2].rstrip('/'))[1]\r\n # send to the client that the server is about to send a file to it\r\n # so the client will be prepared\r\n self.send_acknowledge(SEND_CLIENT_TO_SERVER)\r\n # after receiving file from the client, it will be stored in the server\r\n # so we get its path inside the server\r\n self.new_put_path = self.receive_file_from_client(self.put_file_name)\r\n # we still have the old file's path (of the client, which the server can't access to it)\r\n # so we set the path inside the server as the new path of the file\r\n self.command[2] = self.new_put_path\r\n # we perform the put, and we store our object in the database (which is in the server)\r\n ack = self.server_put(self.command, self.op)\r\n # we send to the client information about the performed command put\r\n self.send_acknowledge(ack)\r\n # if the transfer mode is 1, then the server have access to the clients files\r\n # so we perform the put operation normally\r\n elif self.transfer_mode == \"1\":\r\n ack = self.server_put(self.command, self.op)\r\n self.send_acknowledge(ack)\r\n # if the command is get and transfer mode is 2\r\n if self.command[0] == \"get\":\r\n if self.transfer_mode == \"2\":\r\n # we store the path (where to store the object) given by client\r\n self.client_get_path = self.command[2]\r\n # we modify the path and we set it as the current directory (where the server is being executed)\r\n self.command[2] = os.getcwd()\r\n # perform the get operation inside the engine\r\n # we get the path where the object is stored in the server\r\n path_of_get_file_in_server = self.server_get(self.command, self.op)\r\n # if the object name is found and the file was created\r\n if path_of_get_file_in_server != \"Object not Found\":\r\n # let the client to know\r\n self.send_acknowledge(SEND_SERVER_TO_CLIENT)\r\n self.receive_response_from_client()\r\n # send that file to the client\r\n self.send_file_through_network(path_of_get_file_in_server)\r\n # if the object requested by the the client was not found by the engine\r\n else:\r\n self.send_acknowledge(\"Object not Found\")\r\n # perform get command normally if the storage is without transfer\r\n if self.transfer_mode == \"1\":\r\n ack = self.server_get(self.command, self.op)\r\n self.send_acknowledge(ack)\r\n # perform delete and list commands whatever the storage mode is\r\n if self.command[0] == \"delete\":\r\n ack = self.server_delete(self.command, self.op)\r\n self.send_acknowledge(ack)\r\n if self.command[0] == \"list\":\r\n ack = self.server_list(self.command, self.op)\r\n self.send_acknowledge(ack)\r\n\r\n # delete command\r\n def server_delete(self, received_command, operation):\r\n acknowledge = ''\r\n # test if the input contains 2 elements (the command and its first optional argument)\r\n # we do the same with all the operations and depending to each one\r\n if len(received_command[1:]) == 1:\r\n object_name = received_command[1]\r\n acknowledge = operation.delete(object_name)\r\n # test if the input contains 3 elements (the command and its two optional arguments)\r\n if len(received_command[1:]) == 2:\r\n object_name, version = received_command[1:]\r\n acknowledge = operation.delete(object_name, int(version))\r\n return acknowledge\r\n\r\n # get command\r\n def server_get(self, received_command, operation):\r\n if len(received_command[1:]) == 3:\r\n object_name, path, version = received_command[1:]\r\n acknowledge = operation.get(object_name, path, int(version))\r\n else:\r\n object_name, path = received_command[1:]\r\n acknowledge = operation.get(object_name, path)\r\n return acknowledge\r\n\r\n # put command\r\n def server_put(self, received_command, operation):\r\n acknowledge = ''\r\n # get the 2 necessary arguments (object_name and path)\r\n object_name, path = received_command[1:3]\r\n # if the storing is with transfer :\r\n # if the put command got 4 arguments from the user\r\n if len(received_command[1:]) == 4:\r\n # take the 2 last arguments\r\n max_versions, policy = received_command[3:]\r\n acknowledge = operation.put(object_name, path, int(max_versions), int(policy))\r\n # if the put command got 3 arguments\r\n if len(received_command[1:]) == 3:\r\n # take the third optional argument\r\n max_versions = received_command[3]\r\n acknowledge = operation.put(object_name, path, int(max_versions))\r\n if len(received_command[1:]) == 2:\r\n object_name, path = received_command[1:]\r\n acknowledge = operation.put(object_name, path)\r\n return acknowledge\r\n\r\n # listing\r\n def server_list(self, received_command, operation):\r\n if len(received_command[1:]) == 1:\r\n object_name = received_command[1]\r\n return operation.list(object_name)\r\n else:\r\n return operation.list()\r\n\r\n # send a message through network\r\n # takes the message as argument\r\n def send_acknowledge(self, acknowledge):\r\n # using pickle, we can transform anything (object, string, dict...) to a byte stream\r\n # we use dumps to serialize the message and transform it to a format where it can be easily reconstructed in the client\r\n acknowledge_to_send = pickle.dumps(acknowledge)\r\n # we insert with the serialized message its length,\r\n # followed by a large space alignment to the right that have the size of the header_size, then followed\r\n # by the message to send\r\n # so the client will know at the first reception the length of the message because its stored at the first\r\n # and then can quickly loads it\r\n acknowledge_to_send = bytes(f'{len(acknowledge_to_send):<{HEADER_SIZE}}', FORMAT) + acknowledge_to_send\r\n # send the message to the client\r\n self.client_socket.send(acknowledge_to_send)\r\n\r\n # to receive a message from the client\r\n def receive_response_from_client(self):\r\n receiving_response = True\r\n response_length = 0\r\n i = 0\r\n while receiving_response:\r\n # receive the first HEADER_SIZE bytes chunk of data\r\n # the first chunk contains surely the length of the message and spaces\r\n # example 84______________\r\n response_from_server = self.client_socket.recv(HEADER_SIZE)\r\n if i == 0 and response_from_server:\r\n # we convert the length of the message to int\r\n response_length = int(response_from_server)\r\n i += 1\r\n # we de-serialize and get the entire message by giving the full message length to the recv method\r\n full_response = pickle.loads(self.client_socket.recv(response_length))\r\n # we return the message\r\n return full_response\r\n\r\n # receive file (used in the case of get operation performed)\r\n def receive_file_from_client(self, file_name):\r\n # store the file got from the client in the current directory\r\n cwr = os.getcwd()\r\n receiving_response = True\r\n response_length = 0\r\n i = 0\r\n # pickle is powerful, so we can receive the file data as a list consisting of binary data elements\r\n while receiving_response:\r\n receive_file_from_client = self.client_socket.recv(HEADER_SIZE)\r\n if i == 0 and receive_file_from_client:\r\n response_length = int(receive_file_from_client)\r\n i += 1\r\n full_response = pickle.loads(self.client_socket.recv(response_length))\r\n receiving_response = False\r\n # open the file in write binary mode after receiving all the file data\r\n with open(os.path.join(str(cwr), file_name), \"wb\") as f:\r\n print('writing...')\r\n # for each binary data element in the list of that stores all files data\r\n for chunk in full_response:\r\n # write that data into the file\r\n f.write(chunk)\r\n print(f'File received to server and placed on {str(os.path.join(str(cwr), file_name))}')\r\n return str(os.path.join(str(cwr), file_name))\r\n\r\n # send file to the client\r\n def send_file_through_network(self,path):\r\n # initialize the list that stores the files data\r\n file_data = []\r\n # open the file in read binary mode\r\n with open(path, \"rb\") as f:\r\n print('reading file data ...')\r\n while True:\r\n # read 1024 bytes of file's binary data and store it as an element in the list\r\n binary_data_read = f.read(1024)\r\n file_data.append(binary_data_read)\r\n # if there is no more data to read exit\r\n if not binary_data_read:\r\n break\r\n # convert list to stream of bytes and serialize it\r\n file_data_to_send = pickle.dumps(file_data)\r\n file_data_to_send = bytes(f'{len(file_data_to_send):<{HEADER_SIZE}}', FORMAT) + file_data_to_send\r\n print('File data sent to client')\r\n # send the list\r\n self.client_socket.send(file_data_to_send)\r\n\r\nServer()\r\n"
},
{
"alpha_fraction": 0.47764530777931213,
"alphanum_fraction": 0.4925484359264374,
"avg_line_length": 34.24324417114258,
"blob_id": "33f729c18bf874d0de5be420856838369426c4e9",
"content_id": "c5fee0721a7e7b4e12b3922e982fe829a0c6b287",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2684,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 74,
"path": "/main.py",
"repo_name": "logarithm27/Object_Store_Versioning",
"src_encoding": "UTF-8",
"text": "from operations import *\r\nfrom menu import *\r\n'''\r\nTO TEST THE OPERATIONS ENGINE IN LOCAL SYSTEM, USE THIS FILE\r\nTO TEST THE ENGINE WITH CLIENT SERVER, RUN THE server.py then client.py \r\n'''\r\n\r\ndef run():\r\n op = Operations()\r\n m = Menu()\r\n input_command = m.printing()\r\n input_command = input_command.split(' ')\r\n print(len(input_command[1:]))\r\n # if put\r\n if m.val.__eq__(\"1\"):\r\n object_name, path = input_command[1:3]\r\n if len(input_command[1:]) == 4:\r\n max_versions, policy = input_command[3:]\r\n print(op.put(object_name, path, int(max_versions), int(policy)))\r\n if len(input_command[1:]) == 3:\r\n max_versions = input_command[3]\r\n print(op.put(object_name, path, int(max_versions)))\r\n if len(input_command[1:]) == 2:\r\n object_name, path = input_command[1:]\r\n print(op.put(object_name, path))\r\n # if get\r\n if m.val.__eq__(\"2\"):\r\n if len(input_command[1:]) == 3:\r\n object_name, path, version = input_command[1:]\r\n print(op.get(object_name, path, int(version)))\r\n else:\r\n object_name, path = input_command[1:]\r\n print(op.get(object_name, path))\r\n\r\n\r\n if m.val.__eq__(\"3\"):\r\n if len(input_command[1:]) == 1:\r\n object_name = input_command[1]\r\n print(object_name)\r\n print(op.delete(object_name))\r\n if len(input_command[1:]) == 2:\r\n object_name, version = input_command[1:]\r\n print(op.delete(object_name, int(version)))\r\n\r\n if m.val.__eq__(\"4\"):\r\n list_ = None\r\n if len(input_command[1:]) == 1:\r\n object_name = input_command[1]\r\n list_ = op.list(object_name)\r\n else:\r\n list_ = op.list()\r\n if list_ == \"Object not found\":\r\n print(\"Object not found\")\r\n else:\r\n for element in list_:\r\n # if we have retrieved versions of objects\r\n if len(element) > 1:\r\n version = element[0]\r\n # element[1] is the content name and element[2] is the path\r\n content = list(\r\n map(lambda x, y: os.path.join(x, y), element[2].split(\";\")[0:-1], element[1].split(\";\")[0:-1]))\r\n print(f\"Version {str(version)} contains :\")\r\n for c in content:\r\n print(\"\\t\" + c)\r\n # if we have retrieved objects\r\n else:\r\n print(f\"Object {element[0]}\")\r\n quit_or_not = m.quit()\r\n if quit_or_not:\r\n pass\r\n elif not quit_or_not:\r\n run()\r\n\r\nrun()\r\n\r\n"
},
{
"alpha_fraction": 0.5596190690994263,
"alphanum_fraction": 0.5638095140457153,
"avg_line_length": 53.87234115600586,
"blob_id": "d3feb99180ad831a9de9d82b8987115d27b4268e",
"content_id": "efa1d33bf57a55a69f8d8f23cc1831ed3868bb0b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5250,
"license_type": "permissive",
"max_line_length": 163,
"num_lines": 94,
"path": "/operations.py",
"repo_name": "logarithm27/Object_Store_Versioning",
"src_encoding": "UTF-8",
"text": "from database import *\r\nimport os\r\nimport ntpath\r\nimport platform\r\nimport posixpath\r\nimport macpath\r\n\r\nMAX_ver = 100\r\n\r\nclass Policy():\r\n Global = 1\r\n Dynamic = 2\r\n\r\nclass Operations :\r\n def __init__(self):\r\n self.db = Database()\r\n self.db.connect_db()\r\n self.remote_or_local = \"from_local\"\r\n self.transfer_mode = 1\r\n\r\n #Create object\r\n def put(self,object_name,path,max_obj=MAX_ver,policy=Policy.Global):\r\n path_to_content, content_name = [\"\",\"\"]\r\n # if we operate with client-server, we will not test if the path exists ( the client should do it)\r\n if (os.path.exists(path) and self.remote_or_local.__eq__(\"from_local\")) or self.remote_or_local.__eq__(\"from_server\"):\r\n # removing last slash or backslash so the content_name won't be empty in the db using rstrip\r\n # split the path into file or directory name and its path\r\n if platform.system().__eq__(\"Windows\"):\r\n path_to_content, content_name = ntpath.split(path.rstrip('/'))\r\n elif platform.system().__eq__(\"Linux\"):\r\n path_to_content, content_name = posixpath.split(path.rstrip('/'))\r\n elif platform.system().__eq__(\"Darwin\"):\r\n path_to_content, content_name = macpath.split(path.rstrip('/'))\r\n # the function takes the object name, the path of the file that contains the content,\r\n # the name of the content itself and the function that verifies if the objects exists already or not\r\n return self.db.create_object(object_name,self.db.conn,path_to_content,content_name,self.db.object_name_exists(object_name,self.db.conn),max_obj,policy)\r\n elif (not os.path.exists(path)) and self.remote_or_local.__eq__(\"from_local\"):\r\n return \"Wrong Path\"\r\n return None\r\n\r\n #Get an object\r\n def get(self,object_name,path, version=None):\r\n # the path shouldn't be a file because the get must store the object as a new file\r\n if (not os.path.exists(path) or os.path.isfile(path)) and self.remote_or_local.__eq__(\"from_local\"):\r\n return \"Wrong Path\"\r\n else :\r\n # change condition if we operate with client-server or in local machine\r\n condition = None\r\n if self.remote_or_local.__eq__(\"from_server\"):\r\n condition = os.path.isdir(path) or self.remote_or_local.__eq__(\"from_server\")\r\n elif self.remote_or_local.__eq__(\"from_local\"):\r\n condition = os.path.isdir(path)\r\n if condition:\r\n # if the object name exists\r\n if self.db.object_name_exists(object_name,self.db.conn):\r\n # if the version is not None and exists (by default the user will get the last version)\r\n if (get_data := self.db.get_version(object_name,self.db.conn,version)) is not None:\r\n # create object as new text file\r\n # get the last attribute of the Versions' table which is the name of the object\r\n # and set it as the name of the file the will be written\r\n crt_obj_as_file = open(os.path.join(path,(str(get_data[-1])+\".txt\")),\"w\")\r\n # get all attributes' data except the object name\r\n version_, content_names,content_paths = get_data[0:len(get_data) - 1]\r\n contents = list(map(lambda x, y: os.path.join(x,y), content_paths.split(\";\"), content_names.split(\";\")[0:-1]))\r\n # write into the object's file the data and the attributes of the data to be readable\r\n # use | as a separator instead of comma and removing parenthesis got from db fetched data\r\n #.format is used to write multiple lines\r\n crt_obj_as_file.write(\"{:<15}{:>15}\".format(\"Version\", \"Contents\\n\"))\r\n crt_obj_as_file.write(f\"{version_}\\n\")\r\n for content in contents:\r\n crt_obj_as_file.write(\"{:<20}{:>18}\".format(\"\",content + \"\\n\"))\r\n # close the file\r\n crt_obj_as_file.close()\r\n if self.transfer_mode == 2:\r\n return (os.path.join(path,(str(get_data[-1])+\".txt\")))\r\n # if transfer mode is without transfer\r\n if self.transfer_mode == 1:\r\n return \"The GET file is made and ready!\"\r\n return \"The GET file is made and ready!\"\r\n else:\r\n return \"Object not Found\"\r\n\r\n # Delete an object\r\n def delete(self,object_name, version=None):\r\n if self.db.object_name_exists(object_name, self.db.conn):\r\n return self.db.delete_obj(object_name,self.db.conn,version)\r\n else:\r\n return f\"Object {object_name} not found\"\r\n\r\n # listing objects\r\n # if the object name is specified by the user then it shows only objects\r\n # else, it will show all versions of the given object\r\n def list(self, object_name=None):\r\n return self.db.get_objects_versions(self.db.conn,object_name)"
},
{
"alpha_fraction": 0.7883556485176086,
"alphanum_fraction": 0.7934697270393372,
"avg_line_length": 93.14814758300781,
"blob_id": "46c618204b2eb05c1cb50f9c56c7bbab3edbe9f2",
"content_id": "a7ccdccc3823a9e368bf55cd2603b3b554475466",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2542,
"license_type": "permissive",
"max_line_length": 243,
"num_lines": 27,
"path": "/README.md",
"repo_name": "logarithm27/Object_Store_Versioning",
"src_encoding": "UTF-8",
"text": "# Home Made Object Versioning\nLike Amazon S3 Object Versioning, this Python program is able to create objects and making versions of it. \n# What is it ?\nThis python program implements an object storage system by which we can perform the following actions : \n1. PUT (creation of an object) : By giving the name of the object and a file's path that contains the data of the object.\n2. GET (requesting an object) : By mentioning the object's name followed by the path where we will store the retreived object.\n3. DELETE (for deleting an object) : By mentioning the object's name.\n4. LIST (to lists objects or versions of an object) : The user is able to see all the existing objects or all the object's versions if he mentioned the object's name.\n# Functionnalities of Object Versioning \n-The objects meta-data are stored in a SQLITE3 Database.\n-We can store multiple versions of the same object.\n-The PUT command of an existing object creates a new version of it.\n-By default, the GET command retreive the latest version of the object. Also, the user can choose to retreive a specific version of the object.\n-By default, the DELETE command achieve complete deletion of the whole object. The user can delete a specific version by mentioning this latter.\n# Client Server Architecture \nI have developed a Client Server Architecture that is able to run the system remotly through different machines (the user should set up the necessary configurations to the firewall in order to ensure the execution of the Server python script).\n# Client Server with transfer\nThe object's content can migrate through the network between the two endpoints. For example,in case of a PUT command, the client can read a file and transmit it to the server, and the latter can store the received file localy.\n# Client Server without transfer \nThe server have the access to the client's paths. He can read/write the client's files.\n# Object's Versions management Policies:\n1. Global Policy: the user can set a maximum number of versions to store for each created object. Any creation of a new version that exceeds the maximum number of versions should follow the deletion of the oldest existing version.\n2. Dynamic Policy : same as the the global policy, but a creation of any new version is preceded by deleting 25% of the oldest versions.\n3. Each Object have its Policy.\n# Prerequisites\nYou can use the program localy or remotely. You should install SQLITE3.\nBefore executing the Client Server interaction, make sure that SQLITE3 is installed besides the Server Side.\n"
},
{
"alpha_fraction": 0.5591772198677063,
"alphanum_fraction": 0.561313271522522,
"avg_line_length": 50.8953971862793,
"blob_id": "b3d2361c9ebe64948dce60b8634bc6aa30e044e2",
"content_id": "b8033c0b204687b24d6e7717f2c8e9b7713b1161",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12640,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 239,
"path": "/database.py",
"repo_name": "logarithm27/Object_Store_Versioning",
"src_encoding": "UTF-8",
"text": "import os\r\nimport sqlite3\r\nfrom sqlite3 import Error\r\n\r\n# global variable that creates a working directory where we can store the database file\r\ncurrent_working_directory = os.getcwd()\r\n# sqlite3 database file name\r\ndb_file_name = \"/drive.db\"\r\n\r\n\r\n# database creation and connection\r\nclass Database:\r\n def __init__(self):\r\n self.conn = None\r\n\r\n def connect_db(self):\r\n # if database file doesn't exist, create new one and create database from scratch then connect\r\n if not os.path.exists(db_file := str(current_working_directory) + db_file_name):\r\n print(db_file)\r\n open(db_file, 'w').close()\r\n self.conn = self.create_connection(db_file)\r\n self.create_db_tables(self.conn)\r\n\r\n # else, simply connect to db\r\n else:\r\n self.conn = self.create_connection(db_file)\r\n\r\n def create_connection(self, db_file):\r\n connection = None\r\n try:\r\n connection = sqlite3.connect(db_file)\r\n print(f\"connected with {sqlite3.version}\")\r\n return connection\r\n except Error as error:\r\n print(error)\r\n return connection\r\n\r\n # create necessary tables for our object storage inside the database\r\n def create_db_tables(self, connection):\r\n objects_table = \"\"\" CREATE TABLE IF NOT EXISTS Objects (\r\n o_name text NOT NULL PRIMARY KEY,\r\n Max_versions INTEGER,\r\n Policy INTEGER); \"\"\"\r\n\r\n # Version table contains the version(the number of the version auto incremented each time a new version comes)\r\n # content : the name of the content\r\n # content_path : the path of the file or directory that contains the object_data\r\n\r\n versions_table = \"\"\" CREATE TABLE IF NOT EXISTS Versions (\r\n ID integer PRIMARY KEY,\r\n version integer ,\r\n content_name text NOT NULL, \r\n content_path text NOT NULL); \"\"\"\r\n # Create foreign key constraint that references the Versions' o_name to the Objects' o_name\r\n object_versions_fk = \"\"\" ALTER TABLE Versions ADD COLUMN o_name text REFERENCES Objects(o_name); \"\"\"\r\n if connection is not None:\r\n try:\r\n c = connection.cursor()\r\n c.execute(objects_table)\r\n c.execute(versions_table)\r\n c.execute(object_versions_fk)\r\n print(\"database created\")\r\n except Error as error:\r\n print(error)\r\n\r\n # test if a certain object_name already exists\r\n def object_name_exists(self, object_name, c):\r\n cursor = c.cursor()\r\n search_for_object_name = \"\"\" SELECT * FROM Objects WHERE o_name=? \"\"\"\r\n cursor.execute(search_for_object_name, (object_name,))\r\n if (data := cursor.fetchone()) is not None:\r\n print(f\"The Object '{data[0]}' exists\")\r\n return True\r\n print(\"Object not found\")\r\n return False\r\n\r\n # get version of an object\r\n def get_version(self, object_name, c, version=None):\r\n cursor = c.cursor()\r\n # by default, version parameter is optional\r\n # if the version is not specified by the user, the latter will get the latest version of the object\r\n if version is None:\r\n get_last_version = \"\"\" SELECT version, content_name, content_path, o_name \r\n FROM Versions \r\n WHERE o_name=? and version = (SELECT MAX(version) from Versions)\"\"\"\r\n cursor.execute(get_last_version, (object_name,))\r\n fetched_data = cursor.fetchone()\r\n return fetched_data\r\n else:\r\n get_requested_version = \"\"\" SELECT version, content_name, content_path, o_name \r\n FROM Versions \r\n WHERE o_name=? and version =?\"\"\"\r\n cursor.execute(get_requested_version, (object_name, version,))\r\n fetched_data = cursor.fetchone()\r\n # if the version requested by the user don't exist, return nothing\r\n if fetched_data is None:\r\n print(f\"This version : {str(version)} don't exist\")\r\n return None\r\n else:\r\n return fetched_data\r\n\r\n # it creates an object in the database\r\n # the exists parameters is boolean that indicates whether the objects exists in the db or not\r\n def create_object(self, object_name, c, path, content_name, exists, MAX_Ver, policy):\r\n cursor = c.cursor()\r\n create_version = \"\"\" INSERT INTO Versions (version,content_name, content_path, o_name) \r\n VALUES(?,?,?,?)\"\"\"\r\n if not exists:\r\n create_new_object = \"\"\"INSERT INTO Objects Values (?,?,?)\"\"\"\r\n cursor.execute(create_new_object, (object_name, MAX_Ver, policy))\r\n # the object is new and have only one version (the first version)\r\n # add \";\" as a separator (in order to separate further contents names and paths on future versions)\r\n cursor.execute(create_version, (1, content_name+\";\", path+\";\", object_name))\r\n c.commit()\r\n return \"New Object created\"\r\n else:\r\n # get number of versions\r\n cursor.execute('''SELECT count(o_name) FROM Versions WHERE o_name=?''', (object_name,))\r\n number_of_versions = cursor.fetchone()[0]\r\n # get the max number of versions allowed to be stored that corresponds to the object\r\n cursor.execute('''SELECT Max_versions FROM Objects WHERE o_name=?''',(object_name,))\r\n max_versions = cursor.fetchone()[0]\r\n # if it exceeds the max number of versions set up by the program(by default) or by the user\r\n if number_of_versions >= max_versions:\r\n # we should now the policy used to manage the object\r\n cursor.execute('''SELECT policy FROM Objects WHERE o_name=?''', (object_name,))\r\n policy = cursor.fetchone()[0]\r\n # if the policy is global, we delete the oldest version\r\n if policy == 1:\r\n oldest_version = ''' Select min(version) from versions where o_name=?'''\r\n cursor.execute(oldest_version, (object_name,))\r\n oldest_version = cursor.fetchone()[0]\r\n self.delete_obj(object_name, c, oldest_version)\r\n # if its dynamic policy, whenever we add a new version, we delete 25% of the oldest versions\r\n elif policy == 2:\r\n delete_quarter = ''' DELETE FROM Versions \r\n WHERE version \r\n IN (\r\n SELECT version \r\n FROM Versions \r\n WHERE o_name=? \r\n ORDER BY version ASC LIMIT ?);'''\r\n quarter_of_versions = number_of_versions / 4\r\n cursor.execute(delete_quarter, (object_name, quarter_of_versions))\r\n c.commit()\r\n # get the number of previous latest version of object and adding one to it\r\n last_version = ''' Select max(version) from versions where o_name=?'''\r\n cursor.execute(last_version, (object_name,))\r\n last_version = cursor.fetchone()\r\n last_version = last_version[0] + 1\r\n # get the content names and content paths of the object from previous version\r\n # plus the current content name and content path added\r\n content_n, paths = self.add_or_replace_paths_for_new_version(object_name,self.conn,path,content_name)\r\n cursor.execute(create_version, (last_version, content_n, paths, object_name))\r\n c.commit()\r\n return \"New Version of the object created\"\r\n\r\n\r\n # the version parameter is optional\r\n def delete_obj(self, object_name, c, version=None):\r\n cursor = c.cursor()\r\n del_all_versions = \"\"\"Delete From Versions Where o_name=?\"\"\"\r\n del_obj = \"\"\"Delete From Objects Where o_name=?\"\"\"\r\n # the user can specify a version, and the latter could be the only version existing\r\n # if its the case we skip to else and\r\n # so we delete the version and the object from the Objects table\r\n cursor.execute('''SELECT count(o_name) from Versions where o_name =?''', (object_name,))\r\n # if version is specified by user and exists and there is more than 1 version\r\n v_exist = self.get_version(object_name, c, version)\r\n if (version is not None) and (v_exist) is not None and (cursor.fetchone()[0] > 1):\r\n del_version = \"\"\"DELETE FROM Versions WHERE o_name=? and version=?\"\"\"\r\n cursor.execute(del_version, (object_name, version))\r\n c.commit()\r\n return f\"Version {str(version)} deleted\"\r\n # if the version don't exist\r\n elif v_exist is None:\r\n return \"Version not found\"\r\n else:\r\n cursor.execute(del_all_versions, (object_name,))\r\n cursor.execute(del_obj, (object_name,))\r\n c.commit()\r\n return \"Object Deleted\"\r\n\r\n # listing objects or objects' versions\r\n # object name is optional\r\n def get_objects_versions(self, c, object_name=None):\r\n cursor = c.cursor()\r\n # if object name is not specified then list all existing object\r\n # else list all object's versions\r\n if object_name is not None:\r\n if self.object_name_exists(object_name, c):\r\n get_all_versions_of_objects = '''SELECT version,content_name,content_path FROM Versions where o_name=?'''\r\n cursor.execute(get_all_versions_of_objects, (object_name,))\r\n return cursor.fetchall()\r\n else :\r\n return \"Object not found\"\r\n else:\r\n get_all_objets = '''SELECT o_name FROM Objects'''\r\n cursor.execute(get_all_objets)\r\n return cursor.fetchall()\r\n\r\n # this function gets the latest version\r\n # and compares the content's name and path of the current version that is being created with the last version\r\n def add_or_replace_paths_for_new_version(self,object_name,c,path,content_name):\r\n cursor = c.cursor()\r\n # get content's name and path from the last version of the object\r\n last_version = '''SELECT content_name, content_path \r\n From Versions \r\n WHERE \r\n o_name=?\r\n AND \r\n version=\r\n (SELECT MAX(version) \r\n FROM Versions)'''\r\n cursor.execute(last_version, (object_name,))\r\n fetched_data = list(cursor.fetchone())\r\n # all contents are separated by semicolons, so we remove it\r\n # and each path/content name will be an element of a list\r\n # when we remove the semicolon, the last element of the array is empty, so we remove the last element\r\n content_names = fetched_data[0].split(\";\")[0:-1]\r\n content_paths = fetched_data[1].split(\";\")[0:-1]\r\n # for each element name in the latest version\r\n for index,c_name in enumerate(content_names):\r\n # if the content name given by the user is already existing(latest version)\r\n if content_name.lower().__eq__(c_name.lower()):\r\n # the name of the content remains the same, but the path change ( overwriting the content )\r\n content_paths[index]=path\r\n break\r\n # # if the content name given by the user is not existing\r\n if content_name not in content_names :\r\n # we add the new content name and path with the others from the latest version\r\n content_names.append(content_name)\r\n content_paths.append(path)\r\n # we convert lists to strings and we join them by making semicolons separators\r\n # in order to execute it with SQL query\r\n content_names = \";\".join(content_names)+\";\"\r\n content_paths = \";\".join(content_paths)+\";\"\r\n # we return content names and content paths that will be in the new version of the object\r\n return [content_names,content_paths]"
}
] | 6 |
supernelson/bs4 | https://github.com/supernelson/bs4 | d93c64badd449314284fa11989a3a8befc47613e | c4d63efad9fbd8d03ff8ea30cc9507b272b410fd | ad36a561c25ba80495e273b3a3abb3c6fc1c9c1d | refs/heads/master | 2022-11-17T18:09:59.952635 | 2020-07-13T16:24:37 | 2020-07-13T16:24:37 | 274,783,065 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5046296119689941,
"alphanum_fraction": 0.5185185074806213,
"avg_line_length": 16.280000686645508,
"blob_id": "057fd3ddd3563518fd60cbd4ead91460bc8f5590",
"content_id": "1db486e9651cbb2f249cc2f3464274c3ab12cda4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 432,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 25,
"path": "/yield_test.py",
"repo_name": "supernelson/bs4",
"src_encoding": "UTF-8",
"text": "#yield_test.py\n\ndef yield_test():\n print (\"hello world\")\n yield\n print (\"end\")\n \nyield_test()\n\ndef square_numbers(nums):\n for i in nums: \n yield (i*i)\n \n # for i in nums:\n # print (a)\n # if len(nums) <= len(nums):\n # print (next(i))\n\ndef old_squares(nums):\n for i in nums:\n print (i*i)\n\na = [1,6,8,0,2,3]\nsquare_numbers = (square_numbers(a))\nlist(square_numbers)\n"
},
{
"alpha_fraction": 0.6184210777282715,
"alphanum_fraction": 0.6381579041481018,
"avg_line_length": 9.066666603088379,
"blob_id": "e2cbc9ab0c3e43e17ec2790e21bb52db47195f22",
"content_id": "46732389e65453446565272063ffe7f769f72db9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 15,
"path": "/self.py",
"repo_name": "supernelson/bs4",
"src_encoding": "UTF-8",
"text": "#self.py\n\nclass Tweet:\n pass\n\na = Tweet()\n\na.message = '140 characters'\n\nprint (a)\n\nprint (Tweet.message)\n\nb = Tweet()\nb.message = \"different note\"\n\n"
}
] | 2 |
isakhawat/Computer-Vision-OpenCV- | https://github.com/isakhawat/Computer-Vision-OpenCV- | 773cea8bf098cb026b120c52b271a8ddff555ae5 | 7f9b056e16cb7458359f3deb3c6e5da0a4a8a70d | 5e3ae0fc407b190c562d4c47d04c39c69cdf5650 | refs/heads/master | 2020-12-31T09:32:20.047624 | 2020-02-28T14:24:01 | 2020-02-28T14:24:01 | 238,978,953 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7414403557777405,
"alphanum_fraction": 0.8063754439353943,
"avg_line_length": 64.07691955566406,
"blob_id": "59aa107c389dc2f6d1a86cc073ae0a65f5c99123",
"content_id": "e77364a6166827e1777c1a388b71a35c36697159",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 847,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 13,
"path": "/README.md",
"repo_name": "isakhawat/Computer-Vision-OpenCV-",
"src_encoding": "UTF-8",
"text": "# Computer-Vision-OpenCV-\n\n1. Image Manipulations & Processing : https://github.com/isakhawat/Computer-Vision-OpenCV-/blob/master/2.%20Image%20Manipulations%20%26%20Processing.ipynb\n\n2.Image Segmentation: https://github.com/isakhawat/Computer-Vision-OpenCV-/blob/master/3.%20Image%20Segmentation.ipynb\n\n3.Object Detection in OpenCV: https://github.com/isakhawat/Computer-Vision-OpenCV-/blob/master/4.%20Object%20Detection%20in%20OpenCV.ipynb\n\n4.real time face detiction: https://github.com/isakhawat/Computer-Vision-OpenCV-/blob/master/5.%20real%20time%20face%20detiction.ipynb\n\n5.real time face detiction 2: https://github.com/isakhawat/Computer-Vision-OpenCV-/blob/master/5.%20real%20time%20face%20detiction.py\n\n6. Handwritten Digit Recognition: https://github.com/isakhawat/Computer-Vision-OpenCV-/blob/master/7.%20digit%20recognition.ipynb\n\n"
},
{
"alpha_fraction": 0.6540524959564209,
"alphanum_fraction": 0.6856604218482971,
"avg_line_length": 30.067567825317383,
"blob_id": "af8381c504efb997a2c56e778f6f87f621217d69",
"content_id": "d7cdbfdcdc6ff4409a24f66bf98f447c431515b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6900,
"license_type": "no_license",
"max_line_length": 267,
"num_lines": 222,
"path": "/5. real time face detiction.py",
"repo_name": "isakhawat/Computer-Vision-OpenCV-",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport cv2\n\n# We point OpenCV's CascadeClassifier function to where our\n# classifier (XML file format) is stored\nface_classifier = cv2.CascadeClassifier('C:\\\\Users\\\\Sabakat\\\\Desktop\\\\Haarcascades\\\\haarcascade_frontalface_default.xml')\n\n# Load our image then convert it to grayscale\nimage = cv2.imread('C:\\\\Users\\\\Sabakat\\\\Desktop\\\\images\\\\Trump.jpg')\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Our classifier returns the ROI of the detected face as a tuple\n# It stores the top left coordinate and the bottom right coordiantes\nfaces = face_classifier.detectMultiScale(gray, 1.3, 5)\n\n# When no faces detected, face_classifier returns and empty tuple\nif faces is ():\n print(\"No faces found\")\n\n# We iterate through our faces array and draw a rectangle\n# over each face in faces\nfor (x,y,w,h) in faces:\n cv2.rectangle(image, (x,y), (x+w,y+h), (127,0,255), 2)\n cv2.imshow('Face Detection', image)\n cv2.waitKey(0)\n\ncv2.destroyAllWindows()\n\n\n# ### Let's combine face and eye detection\n\n# In[1]:\n\n\nimport numpy as np\nimport cv2\n\nface_classifier = cv2.CascadeClassifier('C:\\\\Users\\\\Sabakat\\\\Desktop\\\\Haarcascades\\\\haarcascade_frontalface_default.xml')\neye_classifier = cv2.CascadeClassifier('C:\\\\Users\\\\Sabakat\\\\Desktop\\\\Haarcascades\\\\haarcascade_eye.xml')\n\nimg = cv2.imread('C:\\\\Users\\\\Sabakat\\\\Desktop\\\\images\\\\essan4.png')\n#img = cv2.imread('C:\\\\Users\\\\Sabakat\\\\Desktop\\\\images\\\\Trump.jpg')\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\nfaces = face_classifier.detectMultiScale(gray, 1.3, 5)\n\n# When no faces detected, face_classifier returns and empty tuple\nif faces is ():\n print(\"No Face Found\")\n\nfor (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(127,0,255),2)\n cv2.imshow('img',img)\n cv2.waitKey(0)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = img[y:y+h, x:x+w]\n eyes = eye_classifier.detectMultiScale(roi_gray)\n for (ex,ey,ew,eh) in eyes:\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(255,255,0),2)\n cv2.imshow('img',img)\n cv2.waitKey(0)\n\ncv2.destroyAllWindows()\n\n\n# ### Let's make a live face & eye detection, keeping the face inview at all times\n\n# In[1]:\n\n\nimport cv2\nimport numpy as np\n\nface_classifier = cv2.CascadeClassifier('C:\\\\Users\\\\Sabakat\\\\Desktop\\\\Haarcascades\\\\haarcascade_frontalface_default.xml')\neye_classifier = cv2.CascadeClassifier('C:\\\\Users\\\\Sabakat\\\\Desktop\\\\Haarcascades\\\\/haarcascade_eye.xml')\n\ndef face_detector(img, size=0.5):\n # Convert image to grayscale\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n faces = face_classifier.detectMultiScale(gray, 1.3, 5)\n if faces is ():\n return img\n\n for (x,y,w,h) in faces:\n x = x - 50\n w = w + 50\n y = y - 50\n h = h + 50\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = img[y:y+h, x:x+w]\n eyes = eye_classifier.detectMultiScale(roi_gray)\n\n for (ex,ey,ew,eh) in eyes:\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,255),2)\n\n roi_color = cv2.flip(roi_color,1)\n return roi_color\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n\n ret, frame = cap.read()\n cv2.imshow('Our Face Extractor', face_detector(frame))\n if cv2.waitKey(1) == 13: #13 is the Enter Key\n break\n\ncap.release()\ncv2.destroyAllWindows()\n\n\n# ### Tuning Cascade Classifiers\n#\n# *ourClassifier*.**detectMultiScale**(input image, **Scale Factor** , **Min Neighbors**)\n#\n# - **Scale Factor**\n# Specifies how much we reduce the image size each time we scale. E.g. in face detection we typically use 1.3. This means we reduce the image by 30% each time it’s scaled. Smaller values, like 1.05 will take longer to compute, but will increase the rate of detection.\n#\n#\n#\n# - **Min Neighbors**\n# Specifies the number of neighbors each potential window should have in order to consider it a positive detection. Typically set between 3-6.\n# It acts as sensitivity setting, low values will sometimes detect multiples faces over a single face. High values will ensure less false positives, but you may miss some faces.\n#\n\n# ## 2. Mini Project # 6 - Car & Pedestrian Detection\n#\n# **NOTE**\n# - If no video loads after running code, you may need to copy your *opencv_ffmpeg.dll*\n# - From: *C:\\opencv2413\\opencv\\sources\\3rdparty\\ffmpeg*\n# - To: Where your python is installed e.g. *C:\\Anaconda2\\* \\\n# - Once it's copied you'll need to rename the file according to the version of OpenCV you're using.\n# - e.g. if you're using OpenCV 2.4.13 then rename the file as:\n# - **opencv_ffmpeg2413_64.dll** or opencv_ffmpeg2413.dll (if you're using an X86 machine)\n# - **opencv_ffmpeg310_64.dll** or opencv_ffmpeg310.dll (if you're using an X86 machine)\n#\n# To find out where you python.exe is installed, just run these two lines of code:\n\n# In[1]:\n\n\nimport sys\nprint(sys.executable)\n\n\n# In[2]:\n\n\nimport cv2\nimport numpy as np\n\n# Create our body classifier\nbody_classifier = cv2.CascadeClassifier('C:\\\\Users\\\\Sabakat\\\\Desktop\\\\Haarcascades\\\\haarcascade_fullbody.xml')\n\n# Initiate video capture for video file\ncap = cv2.VideoCapture('C:\\\\Users\\\\Sabakat\\\\Desktop\\\\images\\\\walking.avi')\n\n# Loop once video is successfully loaded\nwhile cap.isOpened():\n\n # Read first frame\n ret, frame = cap.read()\n frame = cv2.resize(frame, None,fx=0.5, fy=0.5, interpolation = cv2.INTER_LINEAR)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # Pass frame to our body classifier\n bodies = body_classifier.detectMultiScale(gray, 1.2, 3)\n\n # Extract bounding boxes for any bodies identified\n for (x,y,w,h) in bodies:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)\n cv2.imshow('Pedestrians', frame)\n\n if cv2.waitKey(1) == 13: #13 is the Enter Key\n break\n\ncap.release()\ncv2.destroyAllWindows()\n\n\n# ### Car Detection\n\n# In[ ]:\n\n\nimport cv2\nimport time\nimport numpy as np\n\n# Create our body classifier\ncar_classifier = cv2.CascadeClassifier('C:\\\\Users\\\\Sabakat\\\\Desktop\\\\Haarcascades\\\\haarcascade_car.xml')\n\n# Initiate video capture for video file\ncap = cv2.VideoCapture('C:\\\\Users\\\\Sabakat\\\\Desktop\\\\images\\\\cars.avi')\n\n\n# Loop once video is successfully loaded\nwhile cap.isOpened():\n time.sleep(.05)\n # Read first frame\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Pass frame to our car classifier\n cars = car_classifier.detectMultiScale(gray, 1.4, 2)\n\n # Extract bounding boxes for any bodies identified\n for (x,y,w,h) in cars:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)\n cv2.imshow('Cars', frame)\n\n if cv2.waitKey(1) == 13: #13 is the Enter Key\n break\n\ncap.release()\ncv2.destroyAllWindows()\n\n\n# - **Full Body / Pedestrian Classifier ** - https://github.com/opencv/opencv/blob/master/data/haarcascades/haarcascade_fullbody.xml\n# - **Car Classifier ** - http://www.codeforge.com/read/241845/cars3.xml__html\n#\n"
}
] | 2 |
treshgame/text_editor | https://github.com/treshgame/text_editor | 54976182259152b4b6488a4be283ebb9a4220aaf | 83f27621e0965be40cf2cd8157fc3393292f743e | d235fd87713709fe6f00bd88ad5e2122f301bc7c | refs/heads/main | 2023-01-24T05:42:39.639195 | 2020-11-11T13:36:33 | 2020-11-11T13:36:33 | 311,979,499 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6755877137184143,
"alphanum_fraction": 0.6802893280982971,
"avg_line_length": 36.971832275390625,
"blob_id": "180785643387946d2ffb878af6d53a0fccf5a1aa",
"content_id": "6fd8c1e0f2b5931a132116fef902b7f6cf7fa4cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3346,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 71,
"path": "/first_tkinter.py",
"repo_name": "treshgame/text_editor",
"src_encoding": "UTF-8",
"text": "import tkinter\r\nimport codecs\r\nfrom tkinter import *\r\nfrom tkinter.filedialog import askopenfile, asksaveasfile #Функции открыть и созанить как\r\nfrom tkinter.messagebox import showerror #Показ всех ошиббок\r\nfrom tkinter import messagebox #Уводемления приложения\r\nfrom settings import *\r\n\r\nclass Text_editor():\r\n def __init__(self):\r\n self.file_name = tkinter.NONE\r\n \r\n def new_file(self):\r\n self.file_name = 'No name'\r\n text.delete('1.0', tkinter.END)\r\n def open_file(self):\r\n inp = askopenfile(mode='r')\r\n if inp is None:\r\n return \r\n data = inp.read()\r\n text.delete('1.0', tkinter.END)\r\n text.insert('1.0', data)\r\n\r\n def save_file(self):\r\n data = text.get('1.0', tkinter.END)\r\n output = open(self.file_name, 'w', encoding='utf-8')\r\n output.write(data)\r\n output.close()\r\n def save_as_file(self):\r\n output = asksaveasfile(mode='w', defaultextension='txt')\r\n data = text.get('1.0', tkinter.END)\r\n try:\r\n output.write(data.rstrip())\r\n except Exception:\r\n showerror(title='Ошибка', message='Ошибка при сохранении ')\r\n def get_info(self):\r\n messagebox.showinfo('Справка', APP_INFO)\r\n\r\n\r\n\r\n\r\napp = tkinter.Tk() #Создать окно приложения\r\napp.title(APP_NAME) #Задаёт название приложения, берёт из файла settings\r\napp.minsize(width=WIDTH, height=HEIGHT) #Задаёт минимальный размер окна приложения\r\napp.maxsize(width=WIDTH, height=HEIGHT) #Задаёт максимальное значение окна приложения\r\n\r\ntext = tkinter.Text(app, width=WIDTH-50, height=HEIGHT, wrap='word') #Создаётся переменная для окна текста, привязывается к окну приложения, задаётся её размер\r\nscroll = Scrollbar(app, orient=VERTICAL, command=text.yview) #Создаётся скролл\r\nscroll.pack(side='right', fill='y') #Задаётся сторона его размещения и направление передвижения\r\ntext.configure(yscrollcommand=scroll.set) #устанавливается скрол текста\r\ntext.pack() #Текст размещается в окне\r\n\r\neditor = Text_editor()\r\n\r\nmenuBar = tkinter.Menu(app) #Создаем меню\r\napp_menu = tkinter.Menu(menuBar) #Создаёт подпундкты для пункта 'файл'\r\napp_menu.add_command(label = 'Новый файл', command=editor.new_file)\r\napp_menu.add_command(label = 'Открыть', command=editor.open_file)\r\napp_menu.add_command(label = 'Сохранить', command=editor.save_file)\r\napp_menu.add_command(label = 'Сохранить как', command=editor.save_as_file)\r\n\r\n\r\nmenuBar.add_cascade(label='Файл', menu=app_menu) #Создаём пункты основного меню\r\nmenuBar.add_cascade(label='Справка', command=editor.get_info)\r\nmenuBar.add_cascade(label='выход', command = app.quit)\r\n\r\napp.config(menu=menuBar)\r\n\r\n\r\n\r\napp.mainloop() #Делает так, чтобы окно работало, пока не закрыть его"
},
{
"alpha_fraction": 0.7714285850524902,
"alphanum_fraction": 0.7714285850524902,
"avg_line_length": 16.5,
"blob_id": "844b0a4e975e5d7c4e6572f2056d5b0cc7e01443",
"content_id": "1fd092b5446a5860ac4e7b785549799d1c6bcb3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 35,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 2,
"path": "/README.md",
"repo_name": "treshgame/text_editor",
"src_encoding": "UTF-8",
"text": "# text_editor\nMy first text editor\n"
},
{
"alpha_fraction": 0.6496350169181824,
"alphanum_fraction": 0.6934306621551514,
"avg_line_length": 21.16666603088379,
"blob_id": "36354edbedf2a9c5fc4affb28952d29e710e5974",
"content_id": "7616eaa3ed341318dd157275a0b49b782d31ac70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 190,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 6,
"path": "/settings.py",
"repo_name": "treshgame/text_editor",
"src_encoding": "UTF-8",
"text": "APP_NAME = 'WhyYouWrite'\r\n\r\nWIDTH = 950\r\nHEIGHT = 600\r\n\r\nAPP_INFO = \"Это файл, который показывает, что у меня есть тяга к новым знаниям \""
}
] | 3 |
Abhinav-Git19/FiniteStateMachine | https://github.com/Abhinav-Git19/FiniteStateMachine | c4d44f6edee46415a8fb9afa6cfd3501ba60be24 | 7d2a96aa41cfbb8cde4837ac23e3fc453d3d23b0 | 7df4f59f6af287ca8a7856651b4565ccd0b02030 | refs/heads/main | 2023-02-15T11:44:57.105387 | 2021-01-13T16:23:37 | 2021-01-13T16:23:37 | 329,363,248 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.668571412563324,
"alphanum_fraction": 0.668571412563324,
"avg_line_length": 24.14285659790039,
"blob_id": "b8a447ab2c53c4d489ff9e98b9ed756692cc3519",
"content_id": "3195f5c12e30724e94ba32156a5d99b25dd006e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 175,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 7,
"path": "/models/intermeidate_state.py",
"repo_name": "Abhinav-Git19/FiniteStateMachine",
"src_encoding": "UTF-8",
"text": "from models.state import State\n\nclass IntermediateState(State):\n\n def __init__(self,name):\n super(IntermediateState, self).__init__(name)\n self.transitions=[]"
},
{
"alpha_fraction": 0.6197183132171631,
"alphanum_fraction": 0.6197183132171631,
"avg_line_length": 23,
"blob_id": "b8faa78d83cb8dc5dc36fe842a039248fc3ce87c",
"content_id": "3184423c51004235352a439378328783f4c42e66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 71,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 3,
"path": "/models/current_state.py",
"repo_name": "Abhinav-Git19/FiniteStateMachine",
"src_encoding": "UTF-8",
"text": "class CurrentState:\n def __init__(self):\n self.cur_state=None"
},
{
"alpha_fraction": 0.6397515535354614,
"alphanum_fraction": 0.6397515535354614,
"avg_line_length": 22.14285659790039,
"blob_id": "454aff6e33eb9588eb9009251705e676ad815d25",
"content_id": "1d9bd927427968ce48c189e282561a4783023266",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 161,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 7,
"path": "/models/start_state.py",
"repo_name": "Abhinav-Git19/FiniteStateMachine",
"src_encoding": "UTF-8",
"text": "from models.state import State\n\nclass StartState(State):\n\n def __init__(self,name):\n super(StartState, self).__init__(name)\n self.transitions=[]"
},
{
"alpha_fraction": 0.6373056769371033,
"alphanum_fraction": 0.6424870491027832,
"avg_line_length": 18.399999618530273,
"blob_id": "efff92f4e1023a39ce57ee86b20ff8877a1f88e1",
"content_id": "cf2e5efaf534a0782f14fef657cd1b99fb6ec620",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 193,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 10,
"path": "/models/transition.py",
"repo_name": "Abhinav-Git19/FiniteStateMachine",
"src_encoding": "UTF-8",
"text": "import uuid\n\nfrom models.state import State\n\n\nclass Transition:\n def __init__(self,name,state):\n self.id=uuid.uuid1().hex\n self.name=name\n self.next_state : State=state"
},
{
"alpha_fraction": 0.6531791687011719,
"alphanum_fraction": 0.6531791687011719,
"avg_line_length": 28,
"blob_id": "4888f3bea41afa32655bfe39f96ec5491007340c",
"content_id": "7fa136b3f587d5fa2b20749774ee90c070bf71fe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 173,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 6,
"path": "/models/end_state.py",
"repo_name": "Abhinav-Git19/FiniteStateMachine",
"src_encoding": "UTF-8",
"text": "from models.state import State\n\nclass EndState(State):\n def __init__(self,name):\n super(EndState, self).__init__(name)\n # End state will have no transitions"
},
{
"alpha_fraction": 0.6222222447395325,
"alphanum_fraction": 0.6233918070793152,
"avg_line_length": 30.703702926635742,
"blob_id": "d128949985d067492257975c537af63c7a1aacc4",
"content_id": "62433d3ccd349de5800e04e4ade1f4d5a2708ba5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 855,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 27,
"path": "/fsm.py",
"repo_name": "Abhinav-Git19/FiniteStateMachine",
"src_encoding": "UTF-8",
"text": "from services.fsm_service import FSMService\nfrom services.state_trans_service import StateTransService\ndef main():\n state_trans_service = StateTransService()\n fsm_service = FSMService()\n\n fsm_commands =['begin_fsm','next_state','enable_notification_by_state','enable_all_notifications','curent_state']\n state_commands =['add_start_state','add_state','end_state','add_transition']\n\n while True:\n cmdargs = input('\\nEnter Command\\n')\n if cmdargs=='EXIT':\n print('Exiting FSM...')\n exit()\n\n base_cmd = cmdargs.split()[0]\n if base_cmd in fsm_commands:\n getattr(fsm_service,base_cmd)(cmdargs)\n elif base_cmd in state_commands:\n getattr(state_trans_service,base_cmd)(cmdargs)\n else:\n print('Invalid Command')\n\n\n\nif __name__ == '__main__':\n main()"
},
{
"alpha_fraction": 0.7719298005104065,
"alphanum_fraction": 0.7816764116287231,
"avg_line_length": 33.13333511352539,
"blob_id": "6f37611937695d6813a6f7ef9e3e82b42ec3184d",
"content_id": "58b9710e5b35a053a4ca2e7f2b109cae66fb8cf1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 513,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 15,
"path": "/README.md",
"repo_name": "Abhinav-Git19/FiniteStateMachine",
"src_encoding": "UTF-8",
"text": "## Overview\nThis app intends to simulate Finite state machine\nwith following capabilities\n* Add new state\n* Add start state\n* Add ending state\n* Add transition\n* Beginging an FSM\n* Transition New state\n* Enabling notification so when new state is reached , user gets notified\n\n## References\nSome good reads on FSM:\n* [Brilliant.org FSM](https://brilliant.org/wiki/finite-state-machines/)\n* [CodeMentor.io](https://www.codementor.io/@arpitbhayani/building-finite-state-machines-with-python-coroutines-15nk03eh9l)\n\n"
},
{
"alpha_fraction": 0.6186841130256653,
"alphanum_fraction": 0.6215921640396118,
"avg_line_length": 28.516128540039062,
"blob_id": "2c93ee0ba553dc8c71c0bf66830e1a390500b762",
"content_id": "d6bbb35f0cb8ad46bc656e8f92356fac8bde4c6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2751,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 93,
"path": "/services/state_trans_service.py",
"repo_name": "Abhinav-Git19/FiniteStateMachine",
"src_encoding": "UTF-8",
"text": "from typing import List\n\nfrom models.end_state import EndState\nfrom models.start_state import StartState\nfrom models.state import State\nfrom models.transition import Transition\nfrom repository.state_repository import StateRepository\nfrom models.intermeidate_state import IntermediateState\n\n\nclass StateTransService:\n state_repository = StateRepository()\n\n\n\n def add_start_state(self,cmdargs):\n cmdlist = cmdargs.split()\n if len(cmdlist)!=2:\n print('Invalid Command')\n return\n\n name = cmdlist[1]\n state_list : List[State] = self.state_repository.get_all_states()\n\n for state in state_list:\n if isinstance(state,StartState):\n print('Start State already present cannot add multiple start state')\n return\n\n start_state = StartState(name)\n self.state_repository.add_state(start_state)\n\n print('Start State added')\n\n\n\n\n def add_state(self, cmdargs):\n cmdlist = cmdargs.split()\n if len(cmdlist) != 2:\n print('Invalid Add State Command')\n return\n\n name: str = cmdlist[1]\n state_obj = self.state_repository.get_state_by_name(name)\n if state_obj is not None:\n print('State already present')\n return\n\n new_state = IntermediateState(name)\n self.state_repository.add_state(new_state)\n print('Added State {}'.format(new_state.id))\n\n\n def end_state(self,cmdargs):\n cmdlist = cmdargs.split()\n if len(cmdlist) != 2:\n print('Invalid End State Command')\n return\n name = cmdlist[1]\n\n state_obj = self.state_repository.get_state_by_name(name)\n if state_obj is not None:\n print('State already present')\n return\n\n end_state = EndState(name)\n self.state_repository.add_state(end_state)\n print('End State {} Added'.format(end_state.id))\n\n def add_transition(self,cmdargs):\n cmdlist = cmdargs.split()\n if len(cmdlist) != 4:\n print('Invalid Transition State Command')\n return\n\n trans_name,first_state_name,next_state_name = cmdlist[1:]\n\n first_state= self.state_repository.get_state_by_name(first_state_name)\n second_state = self.state_repository.get_state_by_name(next_state_name)\n\n if first_state is None or second_state is None:\n print('Invalid Transition')\n return\n if isinstance(first_state,EndState):\n print('First state cannot be end state')\n return\n\n transition = Transition(trans_name,second_state)\n first_state.transitions.append(transition)\n\n\n print('Transtion {} added'.format(transition))\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5855262875556946,
"alphanum_fraction": 0.5921052694320679,
"avg_line_length": 20.714284896850586,
"blob_id": "fffe7a70e3d1f1707e454e60f089f7495e87a8c6",
"content_id": "a902cd6ad007d428052836da1a737aa651e05507",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 7,
"path": "/models/state.py",
"repo_name": "Abhinav-Git19/FiniteStateMachine",
"src_encoding": "UTF-8",
"text": "import uuid\nclass State:\n\n def __init__(self,name):\n self.id=uuid.uuid1().hex\n self.name=name\n self.notification : bool =False\n"
},
{
"alpha_fraction": 0.6464174389839172,
"alphanum_fraction": 0.6464174389839172,
"avg_line_length": 25.75,
"blob_id": "bf2feb58b444b8fe3f8d3dd25ef6ab017289be17",
"content_id": "d6627a4a604ebd0e9396f1c9b2e27c11705684a0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 642,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 24,
"path": "/repository/state_repository.py",
"repo_name": "Abhinav-Git19/FiniteStateMachine",
"src_encoding": "UTF-8",
"text": "from typing import Dict, List\n\nfrom models.start_state import StartState\nfrom models.state import State\n\n\nclass StateRepository:\n state_repo: Dict[str, State] = {}\n\n def get_state_by_name(self, name) -> State:\n return self.state_repo.setdefault(name, None)\n\n def add_state(self, state_obj: State):\n self.state_repo[state_obj.name] = state_obj\n\n def get_all_states(self) -> List[State]:\n return list(self.state_repo.values())\n\n def get_start_state(self) ->StartState:\n\n for key,val in self.state_repo.items():\n if isinstance(val,StartState):\n return val\n return None\n"
},
{
"alpha_fraction": 0.5955519080162048,
"alphanum_fraction": 0.5984349250793457,
"avg_line_length": 31.689189910888672,
"blob_id": "a07a0f1b3373fefc7183ea5900860a7db40355b6",
"content_id": "8c3f9f7b2a9da983f3c40200204b80375fc6efdb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2428,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 74,
"path": "/services/fsm_service.py",
"repo_name": "Abhinav-Git19/FiniteStateMachine",
"src_encoding": "UTF-8",
"text": "from models.current_state import CurrentState\nfrom models.end_state import EndState\nfrom repository.state_repository import StateRepository\n\n\n\nclass FSMService:\n\n state_repository = StateRepository()\n current_state = CurrentState()\n\n def begin_fsm(self,cmdargs):\n cmdlist = cmdargs.split()\n if len(cmdlist) != 1:\n print('invalid begin command')\n return\n\n start_state =self.state_repository.get_start_state()\n if start_state is None:\n print('StartState Not Specified')\n return\n self.current_state.cur_state=start_state\n\n def curent_state(self,cmdargs):\n cmdlist = cmdargs.split()\n if len(cmdlist)!=1:\n print('Invalid current state command')\n return\n\n if self.current_state.cur_state is None:\n print('FSM Not begun yet!')\n return\n print('Current state {}'.format(self.current_state.cur_state))\n\n def next_state(self,cmdargs):\n cmdlist = cmdargs.split()\n if len(cmdlist)!=2:\n print('invalid next_state command')\n return\n transition_name = cmdlist[1]\n\n for trans in self.current_state.cur_state.transitions:\n if trans.name==transition_name:\n self.current_state.cur_state=trans.next_state\n if isinstance(self.current_state.cur_state, EndState):\n print('End State {} reached'.format(self.current_state.cur_state.name))\n\n if self.current_state.cur_state.notification:\n print('State Changed to {}'.format(self.current_state.cur_state.name))\n return\n\n print('Transition not found')\n\n def enable_notification_by_state(self,cmdargs):\n cmdlist = cmdargs.split()\n if len(cmdlist) != 2:\n print('invalid notification command')\n return\n state_name = cmdlist[1]\n state = self.state_repository.get_state_by_name(state_name)\n if state is None:\n print('No such state')\n return\n state.notification=True\n\n def enable_all_notifications(self,cmdargs):\n cmdlist = cmdargs.split()\n if len(cmdlist) != 1:\n print('invalid all_notification command')\n return\n\n state_list = self.state_repository.get_all_states()\n for state in state_list:\n state.notification=True\n\n\n\n\n\n\n\n\n\n"
}
] | 11 |
iamkamleshrangi/geoaddress | https://github.com/iamkamleshrangi/geoaddress | 08ec8a3d29cd42d3dcf219756fe405766f8344a3 | d20e0e22669f5c06e15cccef9e152ed5a3aacdbb | 4b394b72dd893cb0e5a625b7d4c508865ba308c3 | refs/heads/master | 2022-02-06T12:41:35.324654 | 2022-01-21T04:31:03 | 2022-01-21T04:31:03 | 204,328,839 | 0 | 0 | null | 2019-08-25T17:42:03 | 2020-01-14T05:56:27 | 2022-01-21T04:31:03 | Python | [
{
"alpha_fraction": 0.5426312685012817,
"alphanum_fraction": 0.5753811597824097,
"avg_line_length": 48.13888931274414,
"blob_id": "2a3188138655f483d17b8ae7d8b0a9cc32cfdaf9",
"content_id": "7e361a69b5c7785641aab3bf1533e9528ff1aaee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1771,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 36,
"path": "/geoaddress/geoaddress.py",
"repo_name": "iamkamleshrangi/geoaddress",
"src_encoding": "UTF-8",
"text": "import requests\nimport json\n\ndef decode_html(string):\n decoded = ['>', '<', '\"', '&', '\\'','{', '}']\n encoded = ['>', '<', '"', '&', ''', '{', '}']\n for e, d in zip(encoded, decoded):\n string = string.replace(e, d)\n for e, d in zip(encoded[::-1], decoded[::-1]):\n string = string.replace(e, d)\n return string\n\ndef geoaddress(query, country='', proxies=None):\n map_url = 'https://www.lyft.com/api/geocode?address={}+{}'\n headers = { 'Content-Type': 'text/html; charset=utf-8',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none','Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive','Content-Encoding': 'gzip' }\n\n parsed = {}\n query = query.replace(' ','+')\n map_url = map_url.format(query, country)\n response = requests.get(map_url, headers=headers)\n if response.status_code == 200:\n try:\n record = json.loads(response.content)\n return {'display_address': record['display_address'],\n 'latitude': record.get('lat',''), 'longitude': record.get('lng',''),\n 'place_id':record.get('place_id',''), 'place_type': record.get('place_type',''),\n 'formated_address': record.get('routable_address'), 'error_flag': False }\n except Exception as e:\n return {'error_flag': True, 'Error': 'Interface Error with {}'.format(e)}\n else:\n return {'error_flag': True, 'Error': 'Interface Error with http code {}'.format(response.status_code)}\n\n\n"
}
] | 1 |
Frank2015a/rc | https://github.com/Frank2015a/rc | 47d83acd5af023c259fc215786b27defe43d74c1 | de245f7a41246cb54f311496ebe6f726627920df | 03bafd45c604d919920821936beee4d7a2775511 | refs/heads/master | 2020-03-19T02:41:48.263409 | 2017-07-28T04:25:28 | 2018-05-27T13:29:16 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7203390002250671,
"alphanum_fraction": 0.7203390002250671,
"avg_line_length": 38.33333206176758,
"blob_id": "9fd9f759581964e1f3b1ef512f4971f4777b6b6c",
"content_id": "4884b9b848789a05323a7e39d0e560bb54efacbb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 472,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 12,
"path": "/rc/ipython_import.py",
"repo_name": "Frank2015a/rc",
"src_encoding": "UTF-8",
"text": "import numpy as np, collections, sys, types;\nimport matplotlib as mpl;\n# mpl.use('nbagg')\n# %matplotlib inline # `%matplotlib` to restore\n# import matplotlib.pyplot as plt;\n# import pandas as pd; DataFrame = pd.DataFrame; Series = pd.Series;\n\n# tmp, ploting related\n# %matplotlib inline # TODO fix it, not work [TerminalIPythonApp] WARNING | Unknown error in handling IPythonApp.exec_files:\n\n# tmp hacks\ng = lambda s: {k:v for k,v in mpl.rcParams.items() if s in k}\n"
},
{
"alpha_fraction": 0.5844866037368774,
"alphanum_fraction": 0.6020833253860474,
"avg_line_length": 37.45493698120117,
"blob_id": "a9b4fe1505a1046eef1ef11af586875a324433a0",
"content_id": "6621f8e4f2df71530eb7eabeee116ffb1aecfd55",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 26910,
"license_type": "no_license",
"max_line_length": 260,
"num_lines": 699,
"path": "/.bashrc",
"repo_name": "Frank2015a/rc",
"src_encoding": "UTF-8",
"text": "# custFile for bash, sourced by ~/.bashrc\n# . ~/notes/.bashrc # append this line to ~/.bashrc to make sourced an interactive bash is invoked.\n# check also ~/.bash_profile\n\n# set vi mode editing\nset -o vi\n# insert-mode key bindings\n#+ bind <Esc-.> to yank-last-arg, Nth last if type Nth time.\n#+ for zsh, bindkey \"\\e.\" insert-last-word\n#+ in zsh, bindkey -M viins 'kj' vi-cmd-mode\n#+ bind 'kj' to return to vi-cmd-mode when in insert mode\n# \"key, see \" bind \" inputrc\n\n# \"bind, see \" key \" inputrc\nbind '\"\\C-p\":history-search-backward'\nbind '\"\\ep\":history-search-backward'\nbind '\"\\e.\":yank-last-arg'\nbind '\"Tab\":complete'\nbind '\"\\C-n\":menu-complete'\nbind '\"\\C-l\":menu-complete-backward'\nbind '\"\\ew\":unix-filename-rubout'\nbind '\"\\eh\":unix-filename-rubout'\n# bind '\"\\C-u\":unix-line-discard'\nbind '\"\\C-a\":beginning-of-line'\nbind '\"\\C-e\":end-of-line'\nbind '\"\\C-w\":backward-kill-word'\nbind '\"\\C-k\":kill-line'\nbind '\"\\C-o\":undo'\nbind '\"\\C-o\":revert-line'\nbind '\"\\C-f\":forward-char'\nbind '\"\\C-b\":backward-char'\nbind '\"\\ef\":forward-word'\nbind '\"\\eb\":backward-word'\n# keybindings are now defined in .inputrc\nstty -ixon # You will need to do stty -ixon before Ctrl-s will work. – Dennis Williamson Nov 15 '10 at 23:51 http://unix.stackexchange.com/questions/4079/put-history-command-onto-command-line-without-executing-it#comment4618_4086\n# -- eg, !! last command , !-3 last third command, !222 222th command in .bash_history\n# Another small one: Alt+# comments out the current line and moves it into the history buffer. https://unix.stackexchange.com/a/78845/202329\n\n# \"inputrc, \"readline setting, add below lines to, eg, ~/.inputrc , if not been included yet.\n# set show-mode-in-prompt on\n# set show-all-if-ambiguous on\n# set completion-ignore-case on\n# set bell-style none\n\n# \"completion, see \" history\nshopt -s nocaseglob # ~/.inputrc文件中添加: set completion-ignore-case on\ncomplete -c h # https://unix.stackexchange.com/a/345205/202329\ncomplete -c v # version\ncomplete -c w # alias w='whatis'\ncomplete -c wh # alias wh='command -v'\n_my_complete_ai_apt_install_alias() {\n # see file apt apt-get dpkg git in /usr/share/bash-completion/completions\n # a working example very similar to below for 'apt install' alias. Bash下实现alias补全 http://adam8157.info/blog/2010/05/bash-alias-completion/\n # git alias example https://stackoverflow.com/questions/342969/how-do-i-get-bash-completion-to-work-with-aliases/1793178\n # a general solution https://github.com/cykerway/complete-alias https://unix.stackexchange.com/a/332522/202329\n local cur;\n cur=$(_get_cword);\n COMPREPLY=( $( apt-cache --no-generate pkgnames \"$cur\" 2> /dev/null ) )\n return 0\n}\ncomplete -F _my_complete_ai_apt_install_alias $default ai show vl # both for apt\n# todo: complete for alias apt not work, error completion: function _apt not found.\n# alias apt='sudo apt'\n# complete -F _apt apt\n\n# \"history setting ~/.bash_history\n# use `^[^#].*\\_$\\n\\_^[^#]` to search two consecutive line that are not comments in bash history file.\n## and search `^\\s*$` for blank lines, to remove blank entries.\nshopt -s globstar # pattern \"**\" used in pathname expansion context will match all files and zero or more directories and subdirectories.\nshopt -s histappend\nshopt -s histverify # If you have the histverify option set (shopt -s histverify), you will always have the opportunity to edit the result of history substitutions. http://unix.stackexchange.com/a/4086\n# HISTFILE # could set to a synced folder\nHISTFILESIZE=-1\nHISTSIZE=-1\nHISTCONTROL=ignoreboth\t# ignoredups and ignorespace (line begin with space are not stored)\nHISTIGNORE='?:??:history'\nHISTTIMEFORMAT='%F %T '\t# Record timestamps, use `date -d @${timestamp} '+%D-%T'` (depend on your timezone)\nshopt -s cmdhist\t# attempt to save all lines of a multiple-line command in the same history entry\t\nPROMPT_COMMAND='history -a'\t# Store history immediately\n#+ execute last command\nalias r='fc -s'\nalias bP='bind -P |vim -i NONE \"+setl bt=nofile |setf text\" -c \"nnoremap q :q<Enter>\" -'\n\nanyjobs() { [[ \"$1\" != 0 ]] && echo \"$1\"; } # https://unix.stackexchange.com/a/446149/202329\n# prompt setting\nPS1='\\e[1;34m$(pp=\"$PWD/\" q=${pp/#\"$HOME/\"/} p=${q%?};((${#p}>19))&&echo \"${p::9}…${p:(-9)}\"||echo \"$p\") \\A $(anyjobs \\j)\\$ \\e[m' # none if $HOME, ${t%?} to remove last char of string t. \n# PS1='\\e[1;34m$(p=${PWD/#\"$HOME/\"/};((${#p}>19))&&echo \"${p::9}…${p:(-9)}\"||echo \"$p\") \\A $(anyjobs \\j)\\$ \\e[m' # full pathname if $HOME\n# PS1='\\e[1;34m [$(p=${PWD/#\"$HOME\"/\\047~\\047};((${#p}>30))&&echo \"${p::10}…${p:(-19)}\"||echo \"\\w\")]$(anyjobs \\j)\\$ \\e[m'\n# PS1='\\e[1;34m /\\W \\t \\j\\$ \\e[m'\n # https://askubuntu.com/questions/17723/trim-the-terminal-command-prompt-working-directory\n\nfunction settitle() { echo -ne '\\e]0;'\"${1--bash}\"'\\a'; }\n\n# \"pager, \"less, see \" info \" help\nexport PAGER=\"less\"\n# All three pager programs, more, less and lv, support passing parameters via separate environment variables. These variables are named LESS, MORE and LV, respectively. \nexport LESS=\"-isRM\"\n# from http://www.refining-linux.org/archives/3/Configuring-your-console-pager/\n\n# \"edit, see \" vim\nalias vi=vim\n# alias vf='vim -i ~/.viminfof' # use vf for ft\nfunction vf() {\n depth=${1:-1}\n if (($# >= 2)); then\n vim $(find -type f -maxdepth $depth -iname \"*${2}\"); else\n vim $(find -type f -maxdepth $depth);\n fi\n}\nalias v-='vim -c \"set path=$PWD\" -c \"set bt=nofile\" -c \"nn q :q<CR>\" --cmd \"let g:ctrlp_cache_dir=\\\"~/swo/.cache/ctrlp/v-\\\"\" -'\nalias v--='vim -c \"set path=$PWD\" -c \"set bt=nofile\" -c \"nn q :q<CR>\" --cmd \"let g:ctrlp_cache_dir=\\\"~/swo/.cache/ctrlp/v-\\\"\" '\nalias vN='vim -i NONE -c \"set path+=$PWD\" -c \"setf text\" -c \"setl bt=nofile\" -c \"nnoremap q :q<Enter>\" --cmd \"let g:loaded_ctrlp = 1\"'\nalias vNN='vim -N -u NONE -c \"set nocp| nn gt <C-^>|nn <C-l> :bn<CR>| nn <C-h> :bp<CR>| nn ; :|cno ; <C-e><C-u><C-h><Esc><Esc><Esc>| nnoremap q :q<Enter>\"'\nalias vnn=vNN\nalias vq='vim -i NONE -c \"set path+=$PWD\" -c \"nnoremap q :q<Enter>\"'\nalias npp='d:/programfiles/nppbin/notepad++.exe'\nalias vimnoctrlp='vim --cmd \"let g:loaded_ctrlp = 1\"'\n\nalias vimi='vim -i ~/.tmpiviminfo'\nalias v.='vN .'\n# \"vim, see also \" edit \" cpp\n# function v* { if (( $# == 0 )); then vi *; else vi *.$1; fi }\nfunction vv {\n if (( $# == 0 )); then\n # vimnoctrlp *;\n find -L -maxdepth 1 -type f -exec vim --cmd \"let g:loaded_ctrlp = 1\" {} +\n elif (( $# == 2 )); then\n vimnoctrlp *.$1 *.$2;\n else # use only first arg as extension.\n vimnoctrlp *.$1;\n fi\n}\nalias v*=vv\nalias v8=vv\nalias xvi='vimnoctrlp -c \"sil argdo %!xxd\" -c \"sil argdo set bt=nofile\"'\n\n# \"alias\n#alias vim='d:/pkg/dt/vim/vim.exe'\n# alias mysql='/c/Program\\ Files/MySQL/MySQL\\ Server\\ 5.7/bin/mysql.exe'\n\n# \"dirs, see \" cd \" unix.scp:/^.dirs/\nalias d='dirs -v'\nalias pd='pushd'\n\n# CDPATH, similar to PATH\n# \"cd, see \" dirs \" info\nfunction abspath() { (cd ${1:-.} && pwd); }\nalias c-='cd ~-'\nalias c.='cd ..'\nalias c..='cd ../..'\n# pust this to '~/.bash_profile' to make it global?\n# mkdir -p list_of_dir && cd list_of_dir[-1]. currently only first word (contain non-space solely) was used.\nfunction mcd() { # mkdir -p list_of_dir && cd list_of_dir[-1]. currently only first word (contain non-space solely) was used.\n if [ \"$#\" -ne 1 ]; then\n echo 'Too many or to few arguments. only one is expected.\n exited.';\n\t\t# echo 'Only **one** arg was expected.';\n return 1;\n elif ! cd $1 &>/dev/null; then\n mkdir -p $1 && cd $1;\n # cd $1 &>/dev/null ||\n # (mkdir -p $1; cd $1);\n # Note that '||' and '&&' in bash are of same precedence.\n # also cd inside '()' never change current directory, since '()' creates subshell.\n # mkdir -p \"$1\" && cd \"$1\";\n fi\n}\n\nalias rm='rm -i'\nalias rm-f='rm -f'\nalias mv='mv -i'\nalias cp='cp -i'\nfunction cp- { if (( $# == 0 )); then echo '**one** arg, please.'; else cp ${OLDPWD}/$1 .; fi }\n\n# \"info, see \" ll \" ed \" help\nalias wh='command -v' # use shell builtin hash, command -v, type -P. not external command which.\nfunction wh() { command -v \"$@\"; }\nfunction h() { \"$*\" --help || \"$*\" -h || help \"$*\"; }\n# function pe() { str=${1^^}; printev ${str}; }\nfunction pe() { str=${1^^}; echo ${!str}; }\nalias str=strings\n\n# \"ls, see \" ll \" cd \" dirs \" info \" edit \" python\n # \" ag \" lang\n\n# \"ll, see \" cd \" dirs \" info\n# alias ll='ls -ahl --color=auto'\n# NOTE: if use nested alias such as `alias llld='ls -d */'`, there will be two slash. https://stackoverflow.com/a/40314363/3625404\n_LS_PRINT_OPTION=' --color=auto --show-control-chars'\nalias ls='\\ls -F --color=auto --show-control-chars'\nalias ll='ls -ahl'\nalias ls.='ls ..'\nalias ld=\"\\ls -d $_LS_PRINT_OPTION */\" # directory\n# function ld() { ls -d $_LS_PRINT_OPTION \"${1}*/\"; } # todo: '$1./' or \"\".\nalias lda=\"\\ls -d $_LS_PRINT_OPTION .*/ */\" # directory, include hidden ones\nalias ldc=\"\\ls -1 -d $_LS_PRINT_OPTION .*/ */\" # directory, in one column\nfunction lf_macro() { # qeatzy's answer https://unix.stackexchange.com/questions/329994/alias-and-functions\n local CMD=${1:-ls} DIR=${2:-.};\n if [ ${CMD} != ls -a ${CMD} != ll ]; then return 1; fi\n bash -c \"$CMD\" '$(find $DIR -maxdepth 1 -type f | cut -c3-)';\n}\nfunction lsf() { lf_macro ls \"$1\"; }\nfunction lf() {\t# show all files, no directories\n\tlocal DIR=\"${@:-.}\"\n\tif [ $DIR = \".\" ]; then\n\t\tll $(find $DIR -maxdepth 1 -type f | cut -c3-);\n\tfi\n}\n# alias lf='lf_macro ll'\n# alias lf='ll $(find -maxdepth 1 -type f)'\n# alias lsf='ls $(find -maxdepth 1 -type f)'\nalias l.=\"\\ls -dF $_LS_PRINT_OPTION .[!.]*\"\nalias l..='ls ..'\nalias llt='ll -t |vN -'\n# alias llt='ll -t |sed -r \"s/(.*) ([^ ]+)$/\\2 \\1/\" |vN -'\nalias llh='ll |head'\nalias llth='ll -t |head'\nalias la='ls -A'\nalias l-='ls ~-'\nalias lld=\"\\ls -ahl $_LS_PRINT_OPTION -d */\" # directory\nalias lS='ls -S'\nalias llS='ll -S'\nalias lls='ll -S'\nalias llsh='ll -S|head'\nalias llsv='ll -S |v-'\nfunction llv () { ll \"$@\" |v-; }\nalias lsh='ls ~'\nalias llh='ll ~'\nalias lsv='la |v-'\nalias laf='la ~'\nalias lR='ls -R'\n# alias cc='clear'\n\n# \"du\nalias du='\\du -h'\nalias dus='\\du -sh'\nalias df='\\df -h'\n\n# \"perl, see \" lang\n# alias pd='perldoc'\nalias pdv='perldoc -v'\nalias pdq='perldoc -q'\nalias pdf='perldoc -f'\n\n# \"lang, see \"\" ls\n# \" python \" R\n\n# \"python, see \"\" lang \" cygwin\n# alias pip='python -m pip' # bad for venv, could use wrong pip\nalias py=python\nalias py3=python3\n\n# \"R, see \"\" lang\nalias R='/usr/bin/R --quiet'\n\n# \"cpp, see also \" lang \" edit \" git\nalias vc='vim -i ~/.viminfoc *.{hpp,h,cpp}' \n# alias vc='vim -i ~/.viminfoc *.cpp *.h' \nalias lsc='ls *.{h,cpp}'\n\nalias lfc='find -type f|wc'\nfunction llc() { find |wc; find -type f|wc; }\nfunction llc() { echo $(find |wc) \"\t\" $(find -type f|wc); }\n# \"dir, \"tree, see also \" explorer \" find \" fd(find)\n# bash test if is a directory is git repo. https://stackoverflow.com/questions/2180270/\n# git rev-parse --is-inside-work-tree # works for subdirectories too\n# impl logic of git 'rev-parse'. inside .git dir, 2 folders: objects refs 1 file HEAD. https://stackoverflow.com/a/27452421/3625404\n# alias treev='tree | v-'\nfunction treev() { tree \"$@\" |v-; }\n# alias findv='find | v-'\nfunction findv() {\n if [[ $- != *i* ]]; then\n find $1 -type f;\n else\n find $1 -type f | v-;\n fi\n}\n\n# \" explorer\nalias explorer='cygstart .' # Open Windows Explorer to the current working directory from Cygwin\nalias udiff='vimdiff $HOME/notes/code/git/utility.h utility.h'\n# alias vc='vim -i ~/.viminfoc' # vc for work/project related\nalias vif='vim -i ~/.viminfof' # currently fangtian\nalias vn='vim -i ~/.viminfon -c \"pu_\" -c \"set bt=nofile\" --cmd \"let g:ctrlp_cache_dir=\\\"~/swo/.cache/ctrlp/vn\\\"\" ' # use vn for general notes\nalias vo='vim -i ~/.viminfoo +\" cd ~/tmp\"' # use vo for general notes\nalias vp='vim -i ~/.viminfop' # use vp for python\nalias vj='vim -i ~/.viminfoj -S ~/.vim.j.session' # use vj for java\nfunction vk() { set -o history && set -o histexpand; vim -i ~/.viminfok $($(history -p !!)); }\n# both vk and vkk has a problem, not work for successive ones.\nfunction vkk() { set -o history && set -o histexpand; eval $(history -p !!) |v-; }\nalias vb='vim -i ~/.viminfob' # use vp for bash\nalias vbb='vq ~/notes/.bashrc' # use vp for bash\n# \"java\nalias lj='ls *.java'\nalias lc='ls *.class'\nalias rmj='rm -f *.class'\n# alias vimn='vim -u NONE -N'\nalias hs=history\n# alias foo='cd /usr; ls; cd -' # no space before/after equal sign.\nalias ln='ln -n'\nalias lns='ln -s'\nalias px='ps x'\nalias ipp='ipython --quick --no-banner --no-confirm-exit -c \"import numpy as np, pandas as pd; pd.options.display.max_rows = 12; np.set_printoptions(precision=4, suppress=True)\" -i'\n# compile C with here-doc, from file:///E:/bks/ndal/C/21st_Century_C_C_Tips_from_the_New_School_[2E,2015][Ben_Klemens](Book4You).pdf\ngo_libs=\"-lm\"\ngo_flags=\"-g -Wall -include /cygdrive/e/notes/allheads.h -O3\"\nalias go_c=\"c99 -xc - $go_libs $go_flags\"\nalias grep-i='grep -i' \nalias mtmux='tmux start-server;sleep 0.3;tmux new-window notes;tmux new-window java;tmux new-window tmp;tmux new-window fmanager;tmux new-window swo;tmux new-window bks;'\n# \"diff\nalias vd=vimdiff\nvdd () { vimdiff $1 ../$1; }\nvd- () { vimdiff $1 ${OLDPWD}/$1; }\n# bdiff diff files in different branch\n# \"git\nalias g-='git checkout -' # co to alternate branch\nalias gb='git branch'\nalias gilog='git log'\n# alias gls='git ls-tree -r --name-only' # from Git: 1.List all files in a branch, 2.compare files from different branch http://stackoverflow.com/a/1910822/3625404\nfunction gls { if (($# == 0)); then name=$(git rev-parse --abbrev-ref HEAD); else name=$1; fi; echo branch $name; git ls-tree -r --name-only $name | grep -v \"util\\|ignore\\|[mM]akefile\\|\\.mk\"; }\nfunction gll { for branch in adt bi comb dev fc graph lc master math mx of pac str tree trim; do printf \"\\nbranch $branch \\n\"; git ls-tree -r --name-only $branch | grep -v \"util\\|ignore\\|[mM]akefile\\|\\.mk\"; done; }\nalias gl='git status'\nalias gist='git status'\nalias g='git status'\nalias gis='git status'\nalias gcl='(git status) && make clean'\n# alias go='git checkout' # use function instead, reuse zero arg form to 'git branch'.\nfunction go {\n if (($# == 0)); then\n git branch;\n else\n git checkout \"$@\";\n return $?\n fi\n}\n# alias gi='git' # use function instead, reuse zero arg form to 'git log'.\nfunction gi {\n if (($# == 0)); then\n git log;\n else\n git \"$@\";\n return $?\n fi\n}\n\n# \"emacs\nalias em='emacsclient -c \"$@\" -a \" vim\"'\nalias emq='emacs -nw -q' # terminal capture keystroke 'C-c', fail to exit with 'C-x C-c'. use 'M-x kill-emacs' instead.\n\n# \"env, see also \" variables\nexport EDITOR=vim\n# \"variables, see also \" env\nfunction e { tmp=${1^^}; echo ${!tmp}; } # indirect expansion, ${!var} , http://stackoverflow.com/a/14204692/3625404\nalias uv='unset -v'\nfunction uuv { unset -v ${@^^}; }\n# eg, `e ldlibs`, `e cflags`, `e path`.\n# \"echo, see also \" env\n# \"print, see also \" echo\n\n# \"func\n\nfunction fj {\n # vim -i NONE -c \"set path+=$PWD\" -c \"setf text\" -c \"setl bt=nofile\" -c \"nnoremap q :q<Enter>\" <(ag \"$*\") ;\n # vim <(ag \"$*\") ;\n # cat <(ag \"$*\") ;\n # ag \"$*\" | vim -i NONE -c \"set path+=$PWD\" -c \"setf text\" -c \"setl bt=nofile\" -c \"nnoremap q :q<Enter>\" - ;\n # ag \"$*\" | vim -i NONE -c \"set path+=$PWD\" -c \"setf text\" -c \"setl bt=nofile\" -c \"nnoremap q :q<Enter>\" - ;\n # vim - < $(ag \"$*\")\n echo ag \"$*\"\n}\n\n# \"grep(search,\"ack), see \" find\nif hash ag &>/dev/null; then alias grep=ag; fi\n# todo: bag for ag \"\\b$1\\b\" ??\n\n# \"find, see \" grep \" dir(tree)\n# \" fd(find utils)\n\nfunction ff() { if (( $# == 0 )); then printf \"usage:\\n\\t ff arg <==> find | grep 'arg'\\n\"; else find | grep $@; fi }\nfunction fd2() { find -maxdepth 2 -type d; }\nfunction fd3() { find -maxdepth 3 -type d; }\nalias findd='find -type d'\nalias findf='find -type f' # fails for 'findf *'\n# function findf() {\n# \"fd(find), see \" find \" tree(dir)\n# test speed on linux kernel source tree, could write your own version in C.\nfunction cnd() { # count directories' entry. -- or better name ffcnt? \n# https://unix.stackexchange.com/questions/90106/whats-the-most-resource-efficient-way-to-count-how-many-files-are-in-a-director\n # ues rsync instead of find + wc. `rsync --stats -ax --dry-run . /vvvvvvvv` https://stackoverflow.com/a/34941137/3625404\n # or using c, here is a rust version: https://github.com/the8472/ffcnt https://stackoverflow.com/a/41903547/3625404\n # local cnt_total=0;\n declare cnt; # make `cnt` local\n declare cnt_total=0;\n for d in \"$(find -maxdepth 1 -type d)\"; do\n if [[ \"$d\" != . ]]; then\n cnt=$(find \"$d\" |wc -l);\n # cnt_total=$(( $cnt_total + $cnt ));\n ((cnt_total+=cnt))\n # cnt_total=`expr $cnt_total + $cnt`;\n # cnt_total=$[cnt_total + cnt];\n printf \"$d:\\t$cnt\\n\";\n fi;\n done;\n printf \"total:\\t$cnt_total\\n\";\n}\nfunction cndd() {\n declare cnt; # make `cnt` local\n declare cnt_total=0;\n for d in \"$(find -maxdepth 1 -type d)\"; do\n if [[ \"$d\" != . ]]; then\n cnt=$(find \"$d\" -type d |wc -l);\n ((cnt_total+=cnt))\n printf \"$d:\\t$cnt\\n\";\n fi;\n done;\n printf \"total:\\t$cnt_total\\n\";\n}\nfunction cndf() {\n declare cnt; # make `cnt` local\n declare cnt_total=0;\n for d in \"$(find -maxdepth 1 -type d)\"; do\n if [[ \"$d\" != . ]]; then\n cnt=$(find \"$d\" -type f |wc -l);\n ((cnt_total+=cnt))\n printf \"$d:\\t$cnt\\n\";\n fi;\n done;\n printf \"total:\\t$cnt_total\\n\";\n}\nfunction cnda() { find \"$@\"|wc; }\nalias fcnd=cndf\nalias dcnd=cndd\n\n# \"ag\nfunction defag() { # find python definition\n ag '^ *def '\"$1\";\n}\nfunction sdefag() { # find python definition, use Gf_search() in .vimrc\n ag '^ *def '\"$1\" |sed 's/.*def \\(.*\\)(.*$/'\"\\0\\\\/def \\1\\\\\\\\>\\\\/\"'/';\n}\nfunction sbdefag() { # find python definition, search + word boundary, see sdefag.\n ag '^ *def '\"$1\"'\\b' |sed 's/.*def \\(.*\\)(.*$/'\"\\0\\\\/def \\1\\\\\\\\>\\\\/\"'/';\n}\nalias agdef=defag\nalias psd=sdefag\nfunction psd() { sdefag \"$@\"; } # for non-interactive shell\n\nfunction llag() {\n ll|ag \"$@\";\n}\n\nfunction fag() { # find then ag, ignore some directories and file.\n # find . -not \\( -path r -prune \\) -not \\( -path code -prune \\) -iname '*'\"$*\"'*'; # exclude directories, not work.\n find . -not \\( -path ./r -prune \\) -not \\( -path ./code -prune \\) -iname '*'\"$*\"'*'; # exclude directories. see \" find in lx.scp\n # find|ag \"$*\";\n}\nfunction fag2 {\n find . -maxdepth 2 -iname \"$@\";\n}\n\nfunction dag() { # find directory that contain pattern\n find . -not \\( -path ./r -prune \\) -not \\( -path ./code -prune \\) -type d -iname '*'\"$*\"'*';\n}\n\nfunction bag() {\n # also lbag, rbag?\n if (( $# == 1 )) ; then\n ag \"\\b$1\\b\";\n elif (( $# < 1 )) ; then\n :\n else\n ag \"$*\";\n fi\n}\n\nfunction j {\nif [[ \"$@\" == java ]]; then\n echo command is java.\n \"$@\" -help 2>&1 |v-;\n swapfile\nelse\n \"$@\" --help 2>&1 |v-;\nfi\n}\n\n# \"help, \"man, see \" info\nalias ha='help -d' # hd conflict on ubuntu, change to ha.\nalias w='whatis'\nalias a='apropos'\nalias less='less -Mi'\nalias le='less'\nalias lps='ps aux |less'\n# alias info='info --vi-keys' # use ~/.infokey rc/.infokey instead\nalias cm='cppman'\nalias f.='nautilus . &'\n\n# \"TODO\n# make 'ld' -- 'ls -d' accept args. use 'find path -maxdepth 1 -type d'?\n# a better alternative of 'find something | grep [optional |v-]'\n \nfunction h() {\n # bug: 'h grep' hangs and waiting for input. -- 'grep -help' hangs in non-interactive bash.\n# \"TODO, add --help option, add -option to not open vi or specify pager, add debug switch to silent or not-silent stderr. possibly use getopts?\n# \"caution: this function should only used interactively because there is security hole.\n# eg, any command is run verbatim, which could be very dangerous when used\n# non-interactively.\nif (( $# == 0 )); then\n echo \"error: too few arguments. which command's help file do you want to see?\" &&\n return 1; # or as default, invoke help of builtin, or man bash?\n # \"$*\" --help;\n # if \"$*\" --help; then\n # \"$*\" --help |v'+color peaksea';\n # seoul256\nelse\n # 1. space in argument(s)\n # for git, 'git commit -h' for usage and option, 'git commit --help' for man git-commit\n if [[ \"$@\" =~ \" \" ]]; then \"$@\" --help;\n return $?;\n fi\n # bash builtin, 'function case time select [ { [[' are keyword, 'builtin test' is builtin.\n # compgen -kb\n local TYPE=\"$(type -t $@)\"; \n if [ \"$TYPE\" = builtin -o \"$TYPE\" = keyword ]; then help \"$@\";\n return $?;\n fi\n local ERRNO=\"\"; # a fix for unbound variable when 'set -euo pipefail'.\n outfile=~/.tmphelp\n # todo: some command display help and exit with exit code 1. eg, showkey, eject.\n if \"$@\" -help &> $outfile; then # for vim, 'vim -h' is prefered than 'vim --help'\n : # null command\n elif \"$@\" --help &> $outfile; then # most case. eg, 'vim --help'\n # echo help file retrieved successfully. via suffix '--help';\n :\n elif \"$@\" -h &> $outfile; then # some rare case, eg, 'java -help'.\n # some command always return 1, even for valid -help option, eg, more, java, jdb.\n :\n else\n # echo else\n ERRNO=1;\n fi\n if [[ $- != *i* ]]; then\n { cat $outfile; return; } # non-interactive shell, cat instead of vim.\n # elif [ \"\" != $ERRNO ]; then\n # echo COMMAND FAILED. help file not found.;\n else\n # vi $outfile '+color peaksea' -c 'setlocal noswapfile' -c 'setlocal bt=nofile';\n vi $outfile -c 'setlocal noswapfile' -c 'setlocal bt=nofile'; # in case color peaksea not exist\n fi\n return $ERRNO;\n # echo \"error: too many arguments. 1 is expected.\";\nfi\n}\n\nfunction hh() { help $1 | less ; }\n\nfunction f {\n # ERRNO=1;\n if [[ -n $ERRNO ]]; then\n echo length nonzero.;\n else\n echo length zero.;\n fi\n}\n\nfunction v() {\n# finished: add apt package search if not command, eg, libc6\nif (( $# == 0 )); then\n echo \"error: too few arguments. 1 is expected.\";\n return 1;\nelif (( $# == 1 )); then\n # if \"$@\" --version 2>/dev/null || \"$@\" -version 2>/dev/null; then\n if \"$@\" --version || \"$@\" -version; then # eg, python\n # echo version info retrieved successfully.;\n :\n else\n apt-cache show \"$@\" | grep \"^Package|^Version\";\n # echo COMMAND FAILED. version info not found.;\n # return 2;\n fi\nelse\n echo \"error: too many arguments. 1 is expected.\";\n return 3;\nfi\n}\n\nfunction hj { \"$@\" --help | less ; }\n\n# \"man\n# alias man='\\man -P vi\\ +\"color\\ peaksea\"\\ -c\\ \"setf\\ text\"\\ -c\\ \"setl\\ bt=nofile\"\\ -'\n# vi +\"color peaksea\" -c \"setf text\" -c \"setl bt=nofile\" -\nif [[ $(uname -o) = Cygwin ]]; then\n# alias R='/cygdrive/d/pkg/dt/R-3.4.3/bin/R.exe --no-save'\nexport MANPAGER='vim --cmd \"let g:loaded_ctrlp = 1\" -c \"color default\" -c \"%! col -b\" -c \"sil file $MAN_PN\" -c \"set bt=nofile ft=man nomod nolist ignorecase\" -c \"sil! %s/\\d\\{1,2\\}m//g\" -c \"sil! /^SYNOPSIS\" -' # https://murukesh.me/2015/08/28/vim-for-man.html\n\nelse\nexport MANPAGER='vim --cmd \"let g:loaded_ctrlp = 1\" -c \"%! col -b\" -c \"sil file $MAN_PN\" -c \"set bt=nofile ft=man nomod nolist ignorecase\" -' # https://murukesh.me/2015/08/28/vim-for-man.html\nexport MANPAGER='bash -c \" vim --cmd \\\"let g:loaded_ctrlp = 1\\\" -c \\\"sil file $MAN_PN\\\" -c \\\"set bt=nofile ft=man nomod nolist nospell\\\" </dev/tty <(col -b)\"' # http://vi.stackexchange.com/a/4687/10254\nfi\n# export MANPAGER='vim </dev/tty <(col -b)'\nfunction mg { man $1 | grep $2 | less ; } \n\npe () { # print env, convert to upper first.\n str=${1^^}\n printenv ${str}\n}\n\nfunction crun { make $1 && ./$1; } # make and run for simple c program\n\n# \"ffld, \"download\nffld=\"~/Dowloads/ffld/\"\n\n# note that effect of so could be different from source and .\nso () { source $1 | less; }\n\n# tmux has-session -t development ||\n# tmux new -s dev\n# tmux attach -t dev\ntmux-new() {\n if [[ -n $TMUX ]]; then\n tmux switch-client -t \"$(TMUX= tmux -S \"${TMUX%,*,*}\" new-session -dP \"$@\")\"\n else\n tmux new-session -s \"$@\"\n fi\n}\n# tmux-new dev\n\n# TMUX= tmux new-session -d -s dev\n# tmux attach -t dev\n\n# If not running interactively, do not do anything\n# [[ $- != *i* ]] && return\n# [[ -z \"$TMUX\" ]] && exec tmux\n# [[ -z \"$TMUX\" ]] && source /home/qeatzy/notes/tmux_startup\n\n# tmux start-server\n# if [[ -z \"$TMUX\" ]]\n# then\n# exec tmux attach -d -t default\n# fi\n\n# \"platform dependent, http://stackoverflow.com/questions/394230/detect-the-os-from-a-bash-script\n# https://gist.github.com/davejamesmiller/1965683\n# elif [[ \"$OSTYPE\" == \"darwin\"* ]]; then\nif [[ \"$OSTYPE\" == \"cygwin\" ]]; then\n alias firefox='/cygdrive/d/pkg/dt/firefox/firefox.exe'\n alias cmd='cygstart c:/windows/system32/cmd'\n alias cpy='/cygdrive/d/pkg/dt/Anaconda2/python' # for plotting on windows, c for conda\n alias icpy='/cygdrive/d/pkg/dt/Anaconda2/Scripts/ipython' # for plotting on windows, c for conda\n# You can run a batch file from a Cygwin shell directly,... it might be simpler to run cmd /c 'foo.bat \"quoted arguments\"'. https://superuser.com/a/189094/487198\nelif [[ \"$OSTYPE\" == \"linux-gnu\" ]]; then\n alias em='emacsclient -nw -c \"$@\" -a \" vim\"'\nfi\n\n# \"apt, \"pkg\n# alias ai='sudo pacman -S'\n# alias sagi='sudo apt-get install'\nif type -P apt-cyg &>/dev/null\nthen\n alias ai='apt-cyg install'\n alias ala='apt-cyg listall'\n alias al='apt-cyg list'\n # alias apt='apt-cyg'\n function apt-la { apt-cyg listall \".*${1}.*\"; }\n function aptla { apt-cyg listall \".*${1}.*\"; }\n function apt-l { apt-cyg list \".*${1}.*\"; }\n function aptl { apt-cyg list \".*${1}.*\"; }\n function apt { apt-cyg \"$@\"; }\n alias asa='apt-cyg searchall'\nelif type -P pacman &>/dev/null; then\n alias ai='sudo pacman -S';\n alias pc=pacman\n alias apt=pacman\n alias s='pacman -Ss'\nelif type -P apt-get &>/dev/null; then\n alias ai='sudo apt install';\n function s() { apt-cache search --names-only \"$@\" |v-- -c 'sort' -c 'nn <buffer> <F8> :!sudo apt-get install <cWORD><CR>' -; }\n function ss() { apt-cache search \"$@\" |v-- -c 'sort' -; }\n function show() {\n outfile=~/.tmphelp\n if apt-cache show \"$@\" > $outfile && [ $(wc -l $outfile |cut -d\" \" -f1) -gt 2 ] ; then # second test for virtual package, eg ctags\n :;\n else\n apt-cache search --names-only \"$@\" > $outfile;\n fi\n if [[ $- != *i* ]]; then \n cat $outfile;\n else\n cat $outfile | v-;\n fi\n }\nfi\n\nhash vim ps\n\n#example of bashrc file\n#https://github.com/hashrocket/dotmatrix/blob/master/.bashrc\n# .bashrc, see also .bvimbrc, which is used within vim.\n\n# .bashrc, see .zshrc .bvimbrc\n"
},
{
"alpha_fraction": 0.6426844000816345,
"alphanum_fraction": 0.6638452410697937,
"avg_line_length": 36.568180084228516,
"blob_id": "a286354f4178d92ac29ead0276008fed5fcdc554",
"content_id": "71b442816abf44fe7b17e57fb00e05d431c2f660",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1654,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 44,
"path": "/rc/ipython_function.py",
"repo_name": "Frank2015a/rc",
"src_encoding": "UTF-8",
"text": "import types\n# if not isinstance(arg, types.StringTypes):\n# \"iterable but not string\" http://stackoverflow.com/a/1055378/3625404\n\n# \"todo, see also \" ls\n# use pager -- vim -- to display output of expression, similar but not solved case, How to use Pipe in ipython http://stackoverflow.com/questions/5740835/how-to-use-pipe-in-ipython\n\n# \"ls, see also \" todo\n\ndef lmap(func, *iterables):\n return list(map(func, *iterables))\n\ndef pe(arg): # print each item if iterable, http://stackoverflow.com/a/1952481/3625404\n # TODO if no arg, print last output\n if isinstance(arg, collections.Iterable) and not isinstance(arg, types.StringTypes):\n for x in arg:\n print(x)\n else:\n print(arg)\n\ndef itype(arg): # print type of iterable of object\n if isinstance(arg, collections.Iterable) and not isinstance(arg, types.StringTypes):\n for x in arg:\n print(type(x))\n else:\n print(type(arg))\n\ndef ishape(arg): # print shape of iterable of numpy array-like object, eg, pandas.DataFrame\n if hasattr(arg, 'shape'):\n print(arg.shape)\n elif isinstance(arg, collections.Iterable) and not isinstance(arg, types.StringTypes):\n for x in arg:\n print(x.shape)\n else:\n raise TypeError(\"object has no 'shape'\")\n\ndef dtypes(arg): # print dtypes of iterable of numpy array-like object, eg, pandas.DataFrame\n if hasattr(arg, 'dtypes'):\n print(arg.dtypes)\n elif isinstance(arg, collections.Iterable) and not isinstance(arg, types.StringTypes):\n for x in arg:\n print(x.dtypes)\n else:\n raise TypeError(\"object has no 'dtypes'\")\n\n"
}
] | 3 |
robotriot/smart_reminders | https://github.com/robotriot/smart_reminders | c7e7b60329fa154a12bab568e499edb87c65463e | 87700445cdabe0b469ee6379010cc75cb11ddebf | bb4af49fab83551594dcb41ac7b7ff96a9dddfd0 | refs/heads/master | 2022-12-05T13:38:22.536356 | 2020-08-24T17:11:53 | 2020-08-24T17:11:53 | 287,790,123 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.580325722694397,
"alphanum_fraction": 0.5830618739128113,
"avg_line_length": 32.2251091003418,
"blob_id": "7d176ca8b4b4e951c44ec4a7b92eb1c84496f088",
"content_id": "807c19bfaf7498ad45dc332e94b124cc0cde7d80",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7675,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 231,
"path": "/custom_components/smart_reminders/__init__.py",
"repo_name": "robotriot/smart_reminders",
"src_encoding": "UTF-8",
"text": "from datetime import datetime, timedelta\nimport logging\nimport re\nimport traceback\n\nfrom homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_USERNAME\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.helpers.entity_component import EntityComponent\nfrom homeassistant.util import Throttle\nimport psycopg2\n\nfrom .const import DOMAIN\n\n_LOGGER = logging.getLogger(__name__)\n\nMIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)\n\nATTR_TITLE = 'title'\nATTR_DESC = 'description'\nATTR_DUE = 'due_date'\nATTR_PRIORITY = 'priority'\n\n\nCONF_DATABASE = 'database'\n\nCONST_LEADING_ENTITY_NAME = 'reminder_'\n\n\ndef setup(hass, config):\n \"\"\"Set up is called when Home Assistant is loading our component.\"\"\"\n\n conf = config[DOMAIN]\n hass.data[DOMAIN] = {}\n reminders = SmartReminders(hass, conf)\n\n # Return boolean to indicate that initialization was successfully.\n return True\n\n\nclass SmartReminders:\n \"\"\"Main Smart Reminders Service\"\"\"\n\n def __init__(self, hass, config):\n self.hass = hass\n self.conf = config\n self.db = SmartReminderDB(config)\n\n items = []\n try:\n self.items = self.db.get_all_reminders()\n except:\n self.items = []\n\n self.component = EntityComponent(_LOGGER, DOMAIN, hass)\n entities = []\n\n if self.items:\n for item in self.items:\n ent = SmartReminderItem(hass, item, self.db)\n entities.append(ent)\n if entities:\n self.component.add_entities(entities)\n\n hass.services.register(DOMAIN, \"add_task\", self.handle_add_task)\n hass.services.register(DOMAIN, \"complete_task\", self.handle_complete_task)\n hass.services.register(DOMAIN, \"delete_task\", self.handle_delete_task)\n\n async def handle_add_task(self, call):\n \"\"\"Handle the service call.\"\"\"\n await self.add_task(call.data)\n\n async def handle_delete_task(self, call):\n try:\n entity_id = call.data.get('id')\n ent = self.component.get_entity(entity_id)\n idx = ent._id\n await self.db.delete_reminder(idx)\n await self.component.async_remove_entity(entity_id)\n except Exception as e:\n logging.error(traceback.format_exc())\n\n async def handle_complete_task(self, call):\n \"\"\"Handle completing the task and removing it from entities\"\"\"\n try:\n entity_id = call.data.get('id')\n ent = self.component.get_entity(entity_id)\n idx = ent._id\n self.db.complete_reminder(idx)\n if ent.is_repeatable:\n derp = {ent._repeat_type: ent._repeat_number}\n due_date = ent._original_due_date + timedelta(**derp)\n data = {\n ATTR_TITLE: ent._title,\n \"user\": ent._username,\n ATTR_DUE: due_date,\n \"repeat_type\": ent._repeat_type,\n \"repeat_number\": ent._repeat_number,\n \"repeatable\": True\n }\n await self.add_task(data)\n await self.component.async_remove_entity(entity_id)\n except Exception as e:\n logging.error(traceback.format_exc())\n\n async def add_task(self, data):\n try:\n new_item = await self.db.add_reminder(data)\n ent = SmartReminderItem(self.hass, new_item, self.db)\n await self.component.async_add_entities([ent])\n except Exception as e:\n logging.error(traceback.format_exc())\n\n\nclass SmartReminderItem(Entity):\n \"\"\"An individual Smart Reminder\"\"\"\n\n def __init__(self, hass, data, db):\n self.hass = hass\n\n self._title = data[0]\n self._due = data[1]\n self._priority = data[2]\n self._completed = data[3]\n self._id = data[4]\n self._username = data[5]\n self._ignore_count = data[6] if data[6] is not None else 0\n self._repeat_type = data[7]\n self._repeat_number = data[8]\n self._original_due_date = data[9]\n self._db = db\n self._overdue = self.is_overdue()\n\n def is_overdue(self, _overdue=False):\n now = datetime.now()\n overdue = _overdue\n if now >= self._due and not _overdue:\n overdue = True\n message = f\"{self._username}, I'm reminding you to {self._title}\"\n self.hass.services.call(\"tts\", \"google_translate_say\", {\n 'entity_id': 'all',\n 'message': message\n })\n new_time = datetime.now() + timedelta(hours=1)\n self._db.set_due_time(self._id, new_time, self._ignore_count)\n self._due = new_time\n elif now <= self._due and _overdue:\n overdue = False\n return overdue\n\n @ property\n def is_repeatable(self):\n return self._repeat_type is not None and self._repeat_number > 0\n\n @ property\n def name(self):\n return f\"{CONST_LEADING_ENTITY_NAME}{self._id}\"\n\n @ property\n def state_attributes(self):\n \"\"\"Returns the name of the reminder\"\"\"\n return {\n \"title\": self._title,\n \"due\": self._due,\n \"completed\": self._completed,\n \"user\": self._username,\n \"ignore_count\": self._ignore_count,\n \"repeatable\": self.is_repeatable,\n \"repeats\": f\"Repeats every {self._repeat_number}{self._repeat_type}\",\n }\n\n @ property\n def state(self):\n return self._overdue\n\n def update(self):\n try:\n self._overdue = self.is_overdue(self._overdue)\n except Exception as e:\n logging.error(traceback.format_exc())\n\n\nclass SmartReminderDB:\n \"\"\"Interface with Postgresql\"\"\"\n\n def __init__(self, config):\n self.psql = psycopg2.connect(\n user=config.get(CONF_USERNAME),\n password=config.get(CONF_PASSWORD),\n host=config.get(CONF_HOST),\n port=config.get(CONF_PORT),\n database=config.get(CONF_DATABASE))\n\n def get_all_reminders(self):\n cursor = self.psql.cursor()\n cursor.execute(\"\"\"select * from reminders where completed = false\"\"\")\n items = cursor.fetchall()\n cursor.close()\n return items\n\n async def add_reminder(self, data):\n cursor = self.psql.cursor()\n is_repeatable = data.get('repeatable')\n repeat_type = data.get('repeat_type') if is_repeatable else ''\n repeat_number = data.get('repeat_number') if is_repeatable else 0\n cursor.execute(\"\"\"INSERT INTO reminders (title, due_date, username, repeat_type, repeat_number, original_due_date) VALUES (%s, %s, %s, %s, %s, %s) RETURNING *\"\"\",\n (data.get(ATTR_TITLE), data.get(ATTR_DUE), data.get('user'), repeat_type, repeat_number, data.get(ATTR_DUE)))\n item = cursor.fetchone()\n self.psql.commit()\n cursor.close()\n\n return item\n\n def complete_reminder(self, idx):\n cursor = self.psql.cursor()\n cursor.execute(\"\"\"UPDATE reminders SET completed=true WHERE id=%s\"\"\", [idx])\n self.psql.commit()\n cursor.close()\n\n async def delete_reminder(self, idx):\n cursor = self.psql.cursor()\n cursor.execute(\"\"\"DELETE FROM reminders WHERE id=%s\"\"\", [idx])\n self.psql.commit()\n cursor.close()\n return True\n\n def set_due_time(self, idx, due_date, ct=0):\n cursor = self.psql.cursor()\n new_count = ct + 1\n cursor.execute(\"\"\"UPDATE reminders SET due_date=%s, ignore_count=%s WHERE id=%s\"\"\", [due_date, new_count, idx])\n self.psql.commit()\n cursor.close()\n"
},
{
"alpha_fraction": 0.6486486196517944,
"alphanum_fraction": 0.6486486196517944,
"avg_line_length": 17.5,
"blob_id": "809dd586252ff00c782fcda5573bc5649f51bdb6",
"content_id": "11f7be80d4c3526fb99cc2ded52553294b69d162",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 37,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 2,
"path": "/custom_components/smart_reminders/const.py",
"repo_name": "robotriot/smart_reminders",
"src_encoding": "UTF-8",
"text": "DOMAIN = \"smart_reminders\"\nDB = \"db\"\n"
},
{
"alpha_fraction": 0.7777777910232544,
"alphanum_fraction": 0.7777777910232544,
"avg_line_length": 17,
"blob_id": "df23704952a401274aefb1eb72541394877412fd",
"content_id": "dadad0ba7771343af77c76ceb825bc3315aacab7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 72,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 4,
"path": "/README.md",
"repo_name": "robotriot/smart_reminders",
"src_encoding": "UTF-8",
"text": "# Smart Reminders for Home Assistant\n\n## Installation\n# smart_reminders\n"
}
] | 3 |
AKkkAha/API_WITH_EXCEL | https://github.com/AKkkAha/API_WITH_EXCEL | 4ce5e381fc5b508ccf322513563978dfb459a3f2 | 483bdd077e57469494b0af2f442bf752b9c8ae4b | ad5b1928547dab58a7b40c2b93ff13f5c047704f | refs/heads/master | 2020-05-30T04:27:16.392925 | 2019-08-20T03:04:31 | 2019-08-20T03:04:31 | 189,538,008 | 0 | 0 | null | 2019-05-31T06:13:05 | 2019-06-24T02:51:07 | 2019-06-24T03:04:19 | Python | [
{
"alpha_fraction": 0.532517671585083,
"alphanum_fraction": 0.535341203212738,
"avg_line_length": 37.357398986816406,
"blob_id": "7e2c06df6c7f822122e6ceb7585e52e519ba0f27",
"content_id": "7afb4984ecc926f7a9456858338e3008127cf0dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10853,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 277,
"path": "/Execute.py",
"repo_name": "AKkkAha/API_WITH_EXCEL",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\nimport xlrd\nimport os\nimport json\nimport glob\nimport config\nimport re\nimport HTTP_API\nimport time\nfrom parse_dict import *\nimport logger\nimport sys\nfrom HTML import *\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\npre_case_list = []\npre_recv = None\npre_var = config.custom_var\ntitledict = {}\nlogr = None\nlogl = None\n\n\ndef exec_test(times=1):\n global pre_case_list, pre_recv, pre_var, titledict, logr, logl\n #add by zx---begin\n # global my_token\n # my_token = None\n # add by zx---end\n filename = glob.glob(sys.path[0] + os.sep + '*.xls*')[0]\n wb = xlrd.open_workbook(filename)\n for num in range(times):\n for testsheet in config.test_module.keys():\n pre_case_list = []\n pre_recv = None\n pre_var = config.custom_var\n titledict = {}\n logr = logger.rstcls.initial(testsheet + \"_result\")\n logl = logger.logcls(testsheet)\n logl.log(\"Case module : \" + testsheet)\n logl.log(\"Test Round : Round \" + str(num + 1))\n logr.log(\"Case module : \" + testsheet)\n logr.log(\"Test Round : Round \" + str(num + 1))\n table = wb.sheet_by_name(testsheet)\n caselist = get_case(config.test_module[testsheet], table)\n for case_num in caselist:\n api_run(table, int(case_num))\n logl.log(\"Round \" + str(num + 1) + \" finished\")\n logr.log(\"Round \" + str(num + 1) + \" finished\")\n\n\ndef get_title_index(title_list):\n global titledict\n for title in title_list:\n titledict[title.encode(\"utf-8\")] = title_list.index(title)\n return titledict\n\n\ndef get_case(sheet_list, table):\n case_list = []\n if sheet_list:\n for case_srl in sheet_list:\n if type(case_srl) is int:\n case_list.append(case_srl)\n else:\n extent = case_srl.split('-')\n for i in range(int(extent[0]), int(extent[1])+1):\n case_list.append(i)\n else:\n case_list = [i + 1 for i in range(table.nrows)]\n return case_list\n\ndef make_headers_json(header_str):\n \"\"\"\n :param header_str: 从chrome控制台直接复制出来的headers外面用单引号\n :return: json_str\n \"\"\"\n if header_str:\n headers_li = header_str.split('\\n')\n header_ch = []\n for each in headers_li:\n each = each.replace(' ', '', 1)\n each_li = each.split(':', 1)\n each_li[0] = \"\\'\" + each_li[0] + \"\\':\"\n each_li[1] = \"\\'\" + each_li[1] + \"\\',\\n\"\n each_str = ''.join(each_li)\n header_ch.append(each_str)\n all_str = ''.join(header_ch)\n headers = json.dumps(eval('{' + all_str[:-3] + '}'))\n else:\n headers = '{\"Content-Type\": \"application/x-www-form-urlencoded\", \"fronttype\": \"scp-admin-ui\"}'\n\n return headers\n\ndef api_run(table, case_num):\n print \"run case \" + str(case_num)\n global pre_case_list, pre_recv, pre_var, logr, logl\n global titledict\n global my_token\n if not titledict:\n titledict = get_title_index(table.row_values(0))\n caseinfo = table.row_values(case_num)\n url_addr = caseinfo[titledict[\"URL_ADDR\"]]\n url_addr = deal_var(url_addr, caseinfo, table)\n url = caseinfo[titledict[\"域名IP及端口\"]] + url_addr\n msg = caseinfo[titledict[\"REQUEST_MESSAGE\"]]\n try:\n msg_loads = json.loads(msg)\n except:\n msg_loads = None\n if type(msg_loads) is dict:\n msg = deal_var(str(msg), caseinfo, table)\n else:\n msg = deal_var(str(msg), caseinfo, table)\n http_test = HTTP_API.HTTP_Cls(table.name)\n # add by zx---begin\n # if my_token:\n # http_test.headers[\"authorization\"] = my_token\n # print(\"header is set by token={}\".format(http_test.headers))\n # if case_num in (4,5):\n # http_test.headers[\"Content-Type\"] = \"application/json;charset=UTF-8\"\n # print(\"header is set by case {0}={1}\".format(case_num, http_test.headers))\n # add by zx---end\n headers = caseinfo[titledict[\"HEADERS\"]]\n headers = make_headers_json(headers.encode('utf-8'))\n headers = json.loads(deal_var(headers, caseinfo, table))\n if caseinfo[titledict[\"请求方法\"]].upper() == \"GET\":\n recv_msg = http_test.get_msg(url, msg, headers)\n else:\n recv_msg = http_test.post_msg(url, msg, headers)\n #add by zx---begin\n # if \"token\" in recv_msg:\n # dict_tmp = eval(recv_msg)\n # if \"data\" in dict_tmp and \"token\" in dict_tmp[\"data\"]:\n # my_token = dict_tmp[\"data\"][\"token\"]\n # print(\"my_token set to:{}\".format(my_token))\n #add by zx---end\n if int(case_num) not in pre_case_list:\n pre_case_list.append(int(case_num))\n check_flag = check_result(recv_msg, caseinfo)\n if check_flag is None:\n print \"用例 PASS %s\" % caseinfo[titledict[\"用例标题\"]]\n logr.log(\"用例 PASS %s %s\" % (table.name, caseinfo[titledict[\"用例标题\"]]))\n logl.debug(\"用例 PASS %s\" % caseinfo[titledict[\"用例标题\"]])\n else:\n print \"用例 FAIL %s fail_result: %s\" % (caseinfo[titledict[\"用例标题\"]], str(check_flag))\n logr.log(\"用例 FAIL %s %s fail_result: %s\" % (table.name, caseinfo[titledict[\"用例标题\"]], str(check_flag)))\n logl.debug(\"用例 FAIL %s fail_result: %s\" % (caseinfo[titledict[\"用例标题\"]], str(check_flag)))\n # print \"check_failed: \" + str(check_flag)\n try:\n recv_msg = json.loads(recv_msg)\n pre_recv = recv_msg\n except:\n pass\n if caseinfo[titledict[\"REMAIN_PARAM\"]]:\n remain_param_list = caseinfo[titledict[\"REMAIN_PARAM\"]].split()\n for remain_param in remain_param_list:\n remain_param = remain_param.strip()\n remain_value = find_from_dict(remain_param, recv_msg)\n pre_var[remain_param+'_'+str(case_num)] = remain_value\n print \"pre_var\"\n print pre_var\n print \"pre_case_list\"\n print pre_case_list\n return recv_msg\n\n\n# def deal_var_dict(msg, msg_loads, caseinfo, table):\n# global pre_recv\n# var_list = re.findall(r'\".*?\":\\s+?\"\\${.*?}\"', msg)\n# key_list = []\n# value_list = []\n# if var_list:\n# for item in var_list:\n# value_list.append(item.split(\"${\")[-1].strip('}\"'))\n# key_list.append(item.split(\"${\")[0].strip('\"').strip().strip(':').strip('\"'))\n# if caseinfo[titledict[\"前置条件\"]]: # 表格内多个前置条件用空格隔开\n# for pre_case in str(caseinfo[titledict[\"前置条件\"]]).split():\n# pre_case = int(float(pre_case))\n# if pre_case in pre_case_list:\n# pass\n# else:\n# pre_case_list.append(pre_case)\n# pre_recv = api_run(table, pre_case)\n# for pre_condition in value_list:\n# if pre_condition not in pre_var.keys():\n# # pre_var[pre_condition] = Check(pre_condition, msg_loads)\n# pre_var[pre_condition] = eval(\"pre_recv\" + search_dict(pre_condition, pre_recv))\n# for var in value_list:\n# var_key = key_list[value_list.index(var)]\n# if var == \"timestamp\":\n# exec (\"msg_loads\" + search_dict(var_key, msg_loads) + \"=\" + time.time())\n# else:\n# exec (\"msg_loads\" + search_dict(var_key, msg_loads) + \"='\" + str(pre_var[var]) + \"'\")\n# else:\n# if caseinfo[titledict[\"前置条件\"]]:\n# for pre_case in str(caseinfo[titledict[\"前置条件\"]]).split():\n# pre_case = int(float(pre_case))\n# if pre_case in pre_case_list:\n# pass\n# else:\n# pre_case_list.append(pre_case)\n# pre_recv = api_run(table, pre_case)\n# return msg_loads\n\n\ndef deal_var(msg, caseinfo, table):\n global pre_recv\n var_list = re.findall(r'\\${(.*?)}', msg)\n if var_list:\n if caseinfo[titledict[\"前置条件\"]]:\n for pre_case in str(caseinfo[titledict[\"前置条件\"]]).split():\n pre_case = int(float(pre_case))\n if pre_case in pre_case_list:\n pass\n else:\n pre_recv = api_run(table, pre_case)\n for pre_condition in var_list:\n if pre_condition not in pre_var.keys():\n # pre_var[pre_condition] = Check(pre_condition, msg_loads)\n pre_var[pre_condition] = eval(\"pre_recv\" + search_dict(pre_condition, pre_recv))\n for var in var_list:\n msg = msg.replace('${' + str(var) + '}', str(pre_var[var]))\n else:\n if caseinfo[titledict[\"前置条件\"]]:\n for pre_case in str(caseinfo[titledict[\"前置条件\"]]).split():\n pre_case = int(float(pre_case))\n print(\"pre_case={}\".format(pre_case))\n print(\"pre_case_list={}\".format(pre_case_list))\n if pre_case in pre_case_list:\n pass\n else:\n # pre_case_list.append(pre_case)\n pre_recv = api_run(table, pre_case)\n print(\"pre_recv={}\".format(pre_recv))\n return str(msg)\n\n\ndef check_result(recv_msg, caseinfo):\n try:\n recv_msg = json.loads(recv_msg)\n exp_code = caseinfo[titledict[\"EXPECTED_CODE\"]]\n if exp_code:\n get_code = find_from_dict(\"code\", recv_msg)\n if exp_code != get_code:\n return \"code = \" + str(get_code)\n if caseinfo[titledict[\"EXPECTED_RESULTS\"]]:\n try:\n result_dict = json.loads(caseinfo[titledict[\"EXPECTED_RESULTS\"]])\n miss_list = compare_dict(result_dict, recv_msg)\n if miss_list:\n return miss_list # 返回缺少的值\n except:\n result = json.loads(caseinfo[titledict[\"EXPECTED_RESULTS\"]])\n if recv_msg != result:\n return recv_msg\n except:\n if caseinfo[titledict[\"EXPECTED_RESULTS\"]]:\n try:\n json.loads(caseinfo[titledict[\"EXPECTED_RESULTS\"]])\n return recv_msg\n except:\n result = str(caseinfo[titledict[\"EXPECTED_RESULTS\"]])\n if recv_msg != result:\n return recv_msg\n return None\n\n\nif __name__ == \"__main__\":\n exec_test()\n obj = Html(logger.now + \"result\")\n loglist = []\n for module in config.test_module:\n logfile = obj.find_new_file(os.path.join(os.getcwd(), \"log\", module))\n loglist.append(logfile)\n obj.parse_logfile(loglist)\n"
},
{
"alpha_fraction": 0.5402278304100037,
"alphanum_fraction": 0.5570078492164612,
"avg_line_length": 49.54910659790039,
"blob_id": "9f2e4a24420f862c578e0914a7d447629c06f2ea",
"content_id": "f28b1edad83bf0d76fd78bf963e3c71ff0c9ee53",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11453,
"license_type": "no_license",
"max_line_length": 481,
"num_lines": 224,
"path": "/HTML.py",
"repo_name": "AKkkAha/API_WITH_EXCEL",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\nimport os\nimport sys\nimport re\nimport collections\nimport shutil\nfrom os.path import join\nimport time\nfrom pyh import *\nimport cPickle as pickle\nimport config\nfrom platform import system\n# from ExcelAccessor import *\nreload(sys)\nsys.setdefaultencoding('utf8')\nsys.path.append('log')\nRECORD_TITLE = '测试'\n\nclass Html:\n def __init__(self, recordFileName):\n #print type(recordFileName)\n self.recordFileName = recordFileName\n timestr = time.strftime('%Y-%m-%d %H:%M')\n self.page = PyH(timestr+RECORD_TITLE)\n self.page << h1(timestr+RECORD_TITLE, align='center')\n self.page.addJS('mode.js')\n self.page.addCSS(\"http://libs.baidu.com/bootstrap/3.0.3/css/bootstrap.min.css\")\n self.page.addJS('http://libs.baidu.com/jquery/2.0.0/jquery.min.js')\n self.page.addJS('http://libs.baidu.com/bootstrap/3.0.3/js/bootstrap.min.js')\n self.page.addCSS('mode.css')\n self.versionNumber=0\n self.passcase_num, self.failcase_num = 0, 0\n self.r = self.page << div(cl=\"panel panel-default\")\n self.r << div(h2(\"测试概况\", align='left'),cl=\"panel - heading\")\n self.r = self.r << div(cl=\"panel-body\")\n self.t = table(cl=\"table\",body={\"width\": \"80%\", \"margin\": \"auto\"})\n self.t << tr(td('执行轮数',align=\"right\",width=\"5%\")+td('测试用例组名称',align=\"left\",width=\"35%\")+td('测试总数',align=\"left\",width=\"10%\")+td('成功',align=\"left\",width=\"10%\")+td('失败',align=\"left\",width=\"10%\")+td('执行记录',align=\"left\",width=\"10%\"),id=\"header_row\")\n self.write_page()\n self.page << self.t\n #print type(self.page)\n\n def write_page(self):\n f = open(\"cpickle.db\", \"wb\")\n pickle.dump(self.t, f)\n f.close()\n\n def read_page(self):\n f = open(\"cpickle.db\", \"rb\")\n self.t = pickle.load(f)\n\n def add_result(self, resultlist):\n self.r << p(\"总共执行用例: %d | 通过:%d | 失败(测试预期结果不对): %d\" %(resultlist[0]+resultlist[1],resultlist[0],resultlist[1]))\n self.r << p(\"用例通过率:{:.2f}%\".format(float(resultlist[0])/float((resultlist[0]+resultlist[1]))*100))\n\n def add_table(self, name, passValue, failValue):\n #self.r=self.page << table(caption=\"chen\",border=\"1\",cl=\"table1\",cellpadding=\"0\",cellspacing=\"0\",align='center',width=1200)\n self.i = self.page << table(cl=\"table table-bordered\",body={\"width\": \"80%\", \"margin\": \"auto\"})\n #self.r << colgroup()\n #self.r<<colgroup(col(align=\"left\",width=\"50%\")+col(align=\"right\",width=\"10%\")+col(align=\"right\",width=\"10%\")+col(align=\"right\",width=\"10%\")+col(align=\"right\",width=\"10%\"))\n if failValue:\n self.i << tr(td(\"\",align=\"right\",width=\"5%\") + td(name,align=\"left\",width=\"35%\") + td(str(passValue+failValue),align=\"left\",width=\"10%\") + td(str(passValue),align=\"left\",width=\"10%\") + td(str(failValue),align=\"left\",width=\"10%\") + td(a(\"查看详情\",href=\"javascript:void(0)\",onclick=\"showClassDetail(this.parentNode.parentNode)\"),align=\"left\",width=\"10%\"),cl=\"testclass failClass\")\n else:\n self.i << tr(td(\"\",align=\"right\",width=\"5%\") + td(name, align=\"left\", width=\"35%\") + td(str(passValue+failValue),align=\"left\",width=\"10%\") + td(str(passValue),align=\"left\",width=\"10%\") + td(str(failValue),align=\"left\",width=\"10%\") + td(a(\"查看详情\",href=\"javascript:void(0)\",onclick=\"showClassDetail(this.parentNode.parentNode)\"),align=\"left\",width=\"10%\"),cl=\"testclass passClass\")\n\n def add_tr(self, isPass, name, round, row_list):\n self.read_page()\n if isPass == \"FAIL\":\n self.i << tr(td(round) + td(name, cl=\"failCase\") + td(a(\"FAIL\", cl=\"popup_link\", onfocus=\"this.blur();\", href=\"javascript:showTestDetail('div_caseRun.%s')\" % (name))+div(div(a(\"[x]\",onfocus=\"this.blur();\",onclick=\"document.getElementById('div_caseRun.%s').style.display = 'none'\" %(name)),style=\"text-align: right; color:red; cursor:pointer;\")+p(row_list), id=\"div_caseRun.%s\" %(name), cl='popup_window',style=\"display: none;\")), cl=\"testcase\", id=\"caseRun.%s\" %(name))\n else:\n self.i << tr(td(round) + td(name, cl=\"passCase\") + td(a(\"PASS\", cl=\"popup_link\", onfocus=\"this.blur();\", href=\"javascript:showTestDetail('div_caseRun.%s')\" % (name))+div(div(a(\"[x]\",onfocus=\"this.blur();\",onclick=\"document.getElementById('div_caseRun.%s').style.display = 'none'\" %(name)),style=\"text-align: right; color:red; cursor:pointer;\")+p(row_list), id=\"div_caseRun.%s\" %(name), cl='popup_window',style=\"display: none;\")), cl=\"testcase\", id=\"caseRun.%s\" %(name))\n #self.page << self.r\n\n def createhtml(self):\n #self.page<<self.t\n print \"html path:[%s]\" % (self.record_path + self.recordFileName+'.html')\n self.recordFileName = self.recordFileName.decode('gbk').encode('utf-8')+\".html\" if system() == 'Linux' else self.recordFileName + \".html\"\n print self.record_path+self.recordFileName\n self.page.printOut(self.record_path+self.recordFileName)\n self.page = None\n self.page = PyH(RECORD_TITLE)\n self.page << h1(RECORD_TITLE, align='center')\n\n def timespace(self,start,end):\n day_s=start.split(\"-\")[0]\n time_s=start.split(\"-\")[1]\n\n day_e=end.split(\"-\")[0]\n time_e=end.split(\"-\")[1]\n\n #print day_s,time_s,day_e,time_e\n [hour_s, minu_s, sec_s] = time_s.split('_')\n [hour_e, minu_e, sec_e] = time_e.split('_')\n if day_s == day_e:\n spacetime=int(hour_e)*3600+int(minu_e)*60+int(sec_e)-(int(hour_s)*3600+int(minu_s)*60+int(sec_s))\n # print spacetime\n else:\n day_s=day_s.split('-')[-1]\n day_e = day_e.split('-')[-1]\n spacetime = int(hour_e) * 3600 + int(minu_e) * 60 + int(sec_e) + 3600*24 - (int(hour_s) * 3600 + int(minu_s) * 60 + int(sec_s))\n return spacetime\n\n def parse_entitylog(self,entitylog):\n\n result = 1 if entitylog.find(\":Pass\") != -1 else 0\n if result:\n self.passcase_num += 1\n else:\n self.failcase_num += 1\n index = entitylog.find(\"this case spend\")\n index_end = entitylog[index+17:].find('s')\n time = '%.3f' % (float(entitylog[index+17:][:index_end]))\n time = str(float(time)*1000)+\"毫秒\"\n #print 'time=',time.decode('gbk').encode('utf-8')\n return result, time\n\n def add_cssjs_to_html(self):\n htmlfile = self.record_path + self.recordFileName\n jsfile = 'mode.js'\n cssfile = 'mode.css'\n csstext = open(cssfile, 'r').read()\n jstext = open(jsfile, 'r').read()\n #print type(jstext)\n #print type(csstext)\n line = True\n #with open(htmlfile, \"r\", encoding=\"utf-8\") as f1, open(\"%s.bk\" % htmlfile, \"w\", encoding=\"utf-8\") as f2:\n with open(htmlfile, \"r\") as f1, open(\"%s.bak\" % htmlfile, \"w\") as f2:\n f2.write(r'<meta http-equiv=\"Content-Type\" content=\"text/html;charset=utf-8\">')\n for line in f1.readlines():\n if 'src=\"mode.js\"' in line:\n line = line.replace('src=\"mode.js\"', '')\n line = line.replace('type=\"text/javascript\">', 'type=\"text/javascript\">'+jstext)\n #print line\n elif 'href=\"mode.css\"' in line:\n line = '<style type=\"text/css\" media=\"screen\">' + csstext + '</style>'\n #print line\n f2.write(line)\n f1.close()\n f2.close()\n os.remove(htmlfile)\n os.rename(\"%s.bak\" % htmlfile, htmlfile)\n\n def parse_logfile(self, logcol):\n __FORMAT = '%Y-%m-%d %H:%M:%S'\n casedict = collections.OrderedDict()\n result_dict = {}\n for logfile in logcol:\n print logfile\n round = []\n postmsg, recvmsg = \"\", \"\"\n caseflag, postflag, recvflag = 0, 0, 0\n entitydict = None\n symbol = os.sep\n self.record_path = os.path.join(sys.path[0], \"log\") + symbol\n casemodule = logfile.split(symbol)[-2]\n with open(logfile) as lf:\n for line in lf:\n if casemodule in casedict:\n entitydict = casedict[casemodule]\n else:\n entitydict = collections.OrderedDict()\n if \"Test Round\" in line:\n round.append(line.strip('\\r\\n').split(':')[-1])\n if \"post to\" in line or \"get from\" in line:\n postmsg = line.strip('\\r\\n').split('- INFO -')[-1]\n postflag = 1\n elif \"recv :\" in line:\n recvmsg = line.strip('\\r\\n').split('- INFO -')[-1]\n elif \"DEBUG\" in line and postflag == 1:\n conc = line.strip('\\r\\n').split(\"- DEBUG -\")[-1]\n result, casename = tuple(conc.split()[1:3])\n logdetail = postmsg + '<p>' + recvmsg + '<p>' + conc\n entitylist = [round[-1], result, logdetail]\n entitydict[casename + round[-1]] = entitylist\n casedict[casemodule] = entitydict\n pass_num = 0\n fail_num = 0\n for casename, entitylist in casedict[casemodule].items():\n if entitylist[1] == \"PASS\":\n pass_num += 1\n self.passcase_num += 1\n else:\n fail_num += 1\n self.failcase_num += 1\n result = 1 if fail_num == 0 else 0\n result_dict[casemodule] = {\"result\": result, \"info\": [pass_num, fail_num]}\n self.add_result([self.passcase_num, self.failcase_num])\n for resultcase, resultitem in result_dict.items():\n if not resultitem[\"result\"]:\n self.add_table(resultcase, resultitem[\"info\"][0], resultitem[\"info\"][1])\n for entitynameitem, entityrstitem in casedict[resultcase].items():\n if \"FAIL\" not in entityrstitem:\n self.add_tr(entityrstitem[1], entitynameitem, entityrstitem[0], entityrstitem[2])\n for entitynameitem, entityrstitem in casedict[resultcase].items():\n if \"FAIL\" in entityrstitem:\n self.add_tr(entityrstitem[1], entitynameitem, entityrstitem[0], entityrstitem[2])\n for resultcase, resultitem in result_dict.items():\n if resultitem[\"result\"]:\n self.add_table(resultcase, resultitem[\"info\"][0], resultitem[\"info\"][1])\n for entitynameitem, entityrstitem in casedict[resultcase].items():\n self.add_tr(entityrstitem[1], entitynameitem, entityrstitem[0], entityrstitem[2])\n self.createhtml()\n self.add_cssjs_to_html()\n\n\n\n def find_new_file(self, dir):\n file_lists = os.listdir(dir)\n file_lists.sort(key=lambda fn: os.path.getmtime(dir + \"\\\\\" + fn)\n if not os.path.isdir(dir + \"\\\\\" + fn) else 0)\n file = os.path.join(dir, file_lists[-1])\n return file\n\n\ndef make_html():\n obj = Html(\"result\")\n loglist = []\n for module in config.test_module:\n logfile = obj.find_new_file(os.path.join(sys.path[0], \"log\", module))\n loglist.append(logfile)\n obj.parse_logfile(loglist)\n\n\nif __name__ == \"__main__\":\n make_html()\n"
},
{
"alpha_fraction": 0.5302042365074158,
"alphanum_fraction": 0.5362885594367981,
"avg_line_length": 44.117645263671875,
"blob_id": "9b2e28a5eaf95ec8b4a4288fc1f334095b525768",
"content_id": "4c60c067e08472a1bd9013e49c1ef702d2009417",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2305,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 51,
"path": "/HTTP_API.py",
"repo_name": "AKkkAha/API_WITH_EXCEL",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\nimport requests\nimport json\nimport logger\n# import time\n\n\nclass HTTP_Cls(object):\n def __init__(self, arg):\n self.log = logger.logcls(arg)\n self.r = None\n # application/json;charset=UTF-8\n # application/x-www-form-urlencoded\n # self.headers = {'Content-Type': 'application/x-www-form-urlencoded', 'authorization': 'eyJhbGciOiJSUzUxMiJ9.eyJzdWIiOiJhZG1pbiIsImV4cCI6MTU5Mjg3NzI5NX0.NaFI2zH5ESVdvGJgrNzE63qCWxeWY3ZGnVQJmf7alZjpvPhnITrRFNTv4E6riWLhcWQwHwf_v_p891b1OqM9BcXf3KTkVemDRGPjVxC8zxjiyRc6fEV1ZJ2_aVuHVd2bEzU3wBAiNkLUaEu-DmLsIfPczBPrGJiQ1tT504IgIkA', 'fronttype': 'scp-admin-ui'}\n # 登陆 data='username=admin&password=YWRtaW4%3D'\n\n def post_msg(self, url, post_data=\"\", headers=None):\n self.r = requests.post(url=url, data=post_data.encode(\"utf-8\"), headers=headers)\n print \"------ post to %s ------: data = %s, headers = %s\" % (url, json.dumps(post_data), headers)\n self.log.log(\"post to %s : json_data = %s, headers = %s\" % (url, json.dumps(post_data), headers))\n if len(self.r.text) < 2000:\n print \"-------- recv ---------: %s\" % self.r.text\n else:\n print \"-------- recv ---------: %s\" % \"get messege successfully but it's too long to show you !\"\n self.log.log(\"recv : %s\" % self.r.text)\n # try:\n # return self.r.json(), self.r.headers\n # except Exception as e:\n # return e, self.r.headers\n return self.r.text\n\n def get_msg(self, url, param=None, headers=None):\n self.r = requests.get(url=url, params=param, headers=headers)\n print \"------- get from %s ------: param = %s, headers = %s\" % (url, json.dumps(param), headers)\n # self.r = requests.get(url=url, params=param, headers=headers)\n # print \"get from %s ------: param = %s\" % (url, json.dumps(param))\n self.log.log(\"get from %s : param = %s, headers = %s\" % (url, json.dumps(param), headers))\n if len(self.r.text) < 2000:\n print \"-------- recv ---------: %s\" % self.r.text\n else:\n print \"-------- recv ---------: %s\" % \"get messege successfully but it's too long to show you !\"\n self.log.log(\"recv : %s\" % self.r.text)\n return self.r.text\n # try:\n # return self.r.json(), self.r.headers\n # except Exception as e:\n # if e is ValueError:\n # return {\"code\": 200}, self.r.headers\n # else:\n # return e, self.r.headers\n"
},
{
"alpha_fraction": 0.5537325739860535,
"alphanum_fraction": 0.5582444667816162,
"avg_line_length": 27.68235206604004,
"blob_id": "e9fa41b62f7427b6eb4dbbc50b23358c8ba3c132",
"content_id": "f0683f31bdade7751c503142d826d779578ffa85",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2438,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 85,
"path": "/logger.py",
"repo_name": "AKkkAha/API_WITH_EXCEL",
"src_encoding": "UTF-8",
"text": "import logging\nimport config\nimport time\nimport os\n\n\nnow = time.strftime(\"%Y-%m-%d-%H_%M_%S\", time.localtime(time.time()))\n\n\nclass logcls:\n # _singleton = None\n # logger = None\n\n def __init__(self, arg):\n # now = time.strftime(\"%Y-%m-%d-%H_%M_%S\", time.localtime(time.time()))\n filename = \"log\\\\\"+str(arg)+\"\\\\\"+now + r\"_log.txt\"\n file_dir = os.path.split(filename)[0]\n if not os.path.isdir(file_dir):\n os.makedirs(file_dir)\n # if self._singleton is None:\n # self._singleton = logcls(arg)\n logger = logging.getLogger(arg)\n if not logger.handlers:\n logger.setLevel(level=logging.DEBUG)\n handler = logging.FileHandler(filename)\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n self.logger = logger\n # return self._singleton\n\n def log(self, msg):\n #if logcls._singleton is None:\n #logcls._singleton = logcls()\n self.logger.info(str(msg) + '\\n')\n # logger.debug(msg)\n # logger.warning(msg)\n # logger.info(msg)\n\n def debug(self, msg):\n self.logger.debug(str(msg) + '\\n')\n\n\n\n\nclass rstcls:\n _singleton = None\n logger1 = None\n\n def __init__(self):\n pass\n\n @staticmethod\n def initial(arg):\n\n filename1 = \"log\\\\\"+now + r\"_result.txt\"\n if 'log' not in os.listdir(os.getcwd()):\n try:\n os.mkdir('log')\n except:\n pass\n if rstcls._singleton is None:\n rstcls._singleton = rstcls()\n logger1 = logging.getLogger(str(arg))\n\n if not logger1.handlers:\n logger1.setLevel(level=logging.INFO)\n handler = logging.FileHandler(filename1)\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n logger1.addHandler(handler)\n rstcls.logger1 = logger1\n return rstcls._singleton\n\n\n @staticmethod\n def log(msg):\n #if logcls._singleton is None:\n #logcls._singleton = logcls()\n rstcls.logger1.info(str(msg))\n # logger.debug(msg)\n # logger.warning(msg)\n # logger.info(msg)\n"
},
{
"alpha_fraction": 0.501089334487915,
"alphanum_fraction": 0.5179738402366638,
"avg_line_length": 32.38181686401367,
"blob_id": "86f59b32acf3119cefea4b14f06e2f8caddd49fb",
"content_id": "1b1ba9b58aa806d449d200c1b5e86bffb2e08679",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1882,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 55,
"path": "/parse_dict.py",
"repo_name": "AKkkAha/API_WITH_EXCEL",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\n\n\ndef search_dict(target, temp_dict): # 返回target在temp_dict中的索引\n value = \"\"\n if target in temp_dict.keys():\n value = \"['\" + str(target) + \"']\"\n else:\n for key in temp_dict.keys():\n if type(temp_dict[key]) is list and temp_dict[key]:\n temp_dict[key] = temp_dict[key][0]\n if type(temp_dict[key]) is dict:\n value = \"['\" + str(key) + \"']\" + search_dict(target, temp_dict[key])\n return value\n\n# 从输入的key = target, 获取temp_dict中对应值\ndef find_from_dict(target, temp_dict): # 返回target在temp_dict中的值\n value = \"\"\n try:\n if target in temp_dict.keys():\n value = temp_dict[target]\n else:\n for key in temp_dict.keys():\n if type(temp_dict[key]) is list and temp_dict[key]:\n temp_dict[key] = temp_dict[key][0]\n if type(temp_dict[key]) is dict:\n value = find_from_dict(target, temp_dict[key])\n except:\n value = str(temp_dict)\n return value\n\n\ndef compare_dict(dict1, dict2):\n miss_list = []\n for key, value in dict1.items():\n if key in dict2.keys():\n if type(dict2[key]) is list and dict2[key]:\n dict2[key] = dict2[key][0]\n if type(dict2[key]) is dict:\n if type(dict1[key]) is dict:\n miss_list += compare_dict(dict1[key], dict2[key])\n else:\n miss_list.append(key)\n return miss_list\n\n# target = \"b\"\n# mydict = {'a': 1, 'b': {\"xy\": 4, \"xx\": 8}, 'c': 3}\n# path = search_dict(target, mydict)\n# print \"mydict\" + path + \"=24\"\n# exec(\"mydict\" + path + \"=24\")\n# print mydict\n# target = \"xx\"\n# test_dict = {'a': 1, 'b': 2, 'c': {'d': 4, 'e': 5}, 'f': {'g': 6, 'xx': 7, 'h': 8}}\n# value = find_from_dict(target, test_dict)\n# print value\n"
},
{
"alpha_fraction": 0.5502958297729492,
"alphanum_fraction": 0.5917159914970398,
"avg_line_length": 14.363636016845703,
"blob_id": "fa231e1757b94145eb9660c9ca40a2270a73a23b",
"content_id": "3ff59bcae69c839893021825f639d09e4c39b7db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 283,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 11,
"path": "/config.py",
"repo_name": "AKkkAha/API_WITH_EXCEL",
"src_encoding": "UTF-8",
"text": "# -*- coding:utf-8 -*-\ntest_module = {\n \"testcase\": [\"3-13\"], # 要执行的用例sheet和具体用例,单个用例用整形表示如 1 ,多个连续用例用字符串表示如 \"2-6\" ,都储存在列表钟\n\n}\n\n\n# 预设的参数,字典格式表示。\ncustom_var = {\n\n}\n"
}
] | 6 |
Jackson200114/AID1905 | https://github.com/Jackson200114/AID1905 | 3077091b435e71bae49085cc8e43cf89c8753759 | 2ef48225a02f8791be477d775ff03abe6701402b | 7692fe3ab0ee0655dc9a9ceb2ebb814c4e2181b7 | refs/heads/master | 2020-06-21T20:01:43.221707 | 2019-07-18T12:00:48 | 2019-07-18T12:00:48 | 197,541,543 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5704917907714844,
"alphanum_fraction": 0.5803278684616089,
"avg_line_length": 18.677419662475586,
"blob_id": "9e88146d2390f80c2e2884804065ae9df2205ce8",
"content_id": "92f2677827238d8a1601dcd369cb76b406fef139",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 846,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 31,
"path": "/read_db.py",
"repo_name": "Jackson200114/AID1905",
"src_encoding": "UTF-8",
"text": "\"\"\"\n pymysql 操作数据库基本流程演示\n\"\"\"\nimport pymysql\n\n# 连接数据库\ndb = pymysql.connect(host=\"localhost\",\n port=3306,\n user=\"root\",\n password=\"123456\",\n database=\"stu\",\n charset=\"utf8\")\n\n# 获取游标(操作数据库,执行sql语句)\ncur = db.cursor()\n\n# 获取数据库数据\nsql = \"select * from class where gender='w';\"\ncur.execute(sql) # 执行正确后cur调用函数获取结果\n\n# #获取一个查询结果\n# one_row=cur.fetchone()\n# print(one_row)#获取的是一个元祖,每一个元祖是一条信息的基本内容\n\n# 获取多个查询结果\nmany_row = cur.fetchmany(2)\nprint(many_row) # 获取的是一个元祖,每一个元素是一条元祖构成的信息\n\n# 关闭数据库\ncur.close()\ndb.close()\n"
}
] | 1 |
yrttiahoS/py_gazedat | https://github.com/yrttiahoS/py_gazedat | a53e0886787ff910bd76dea66f3ebfec9bb68a09 | 5c042d79e831b23d519cafc33e6d964911045b8d | 04b18e7deca8531745197d3598a0d4889360cc90 | refs/heads/master | 2021-06-05T14:20:05.538673 | 2021-05-07T12:18:30 | 2021-05-07T12:18:30 | 91,787,968 | 0 | 0 | null | 2017-05-19T09:09:11 | 2017-05-19T09:44:45 | 2017-05-26T09:19:17 | Python | [
{
"alpha_fraction": 0.5339681506156921,
"alphanum_fraction": 0.5567083358764648,
"avg_line_length": 18.21839141845703,
"blob_id": "0d1b6d88e8479e622caa0e1d6d1799985710206e",
"content_id": "dd60dbdc35525fb3624f27754e0f29cd3e3c32a7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7036,
"license_type": "permissive",
"max_line_length": 148,
"num_lines": 348,
"path": "/py_test.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "from datetime import datetime, date\r\n\r\nimport os\r\n\r\nimport csv\r\n\r\nimport sys\r\n\r\n#import math\r\n\r\nfrom collections import OrderedDict\r\n\r\nimport routine # a script with functions\r\n\r\nimport numpy as np\r\n\r\nfrom my_classes import GazeReader\r\n\r\ninput_folder = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\7mo,trec2\"\r\n#input_folder = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\testing 7mo,trec2\"\r\n#print(os.path.isdir(input_folder))\r\n#os.mkdir(\"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\50-years visit\\\\\")\r\n\r\nallFIlenames =os.listdir(input_folder)\r\nprint(len(allFIlenames))\r\nfileNames = [fileName for fileName in os.listdir(input_folder) if fileName.endswith(\".gazedata\")]\r\nprint(len(fileNames))\r\n\r\na = \"stringi1\"\r\nb = \"stringi2\"\r\nif a == b:\r\n print(\"samat\")\r\nelse: \r\n print(\"eri\")\r\n\r\n##folder_path = os.path.split(\"C:/lasayr/file.txt\")\r\n##folder_tail = folder_path[1] \r\n##print(folder_tail)\r\n##\r\n##i = folder_tail.find('.')\r\n##print(folder_tail[0:i])\r\n##print(folder_tail[i])\r\n##\r\n##a=1\r\n##while a <= 3:\r\n## print(a)\r\n## a += 1\r\n## \r\n##\r\n##\r\n##\r\n##\r\n##with open(os.path.join(os.getcwd(), \"tiste_file.txt\"), \"wt\", newline = \"\\n\") as outputfile:\r\n## writer = csv.writer(outputfile, delimiter=\"\\t\")\r\n## writer.writerow( [\"guuguu\", \"geegee\"] )\r\n## writer.writerow( [\"guuguu 2\",] )\r\n##\r\n\r\n\r\n\r\n#args_pro = 'D:\\\\lasayr\\\\Aaltonen\\\\mi', '353_4.gazedata', None\r\n \r\n# make new GazeReader object for reading and processing input file\r\n##f_processor = GazeReader(args_pro) #40 is optional limit for rows\r\n##print(f_processor.get_filename())\r\n##hh = f_processor.get_headers()\r\n##\r\n##print(hh)\r\n##\r\n##hh_num=[]\r\n##tets=[]\r\n##print(len(tets))\r\n##\r\n##for i in range(10):\r\n## print(\"duck\")\r\n\r\n##for header in hh:\r\n## print(header)\r\n## hh_num.append(routine.string_or_number(header))\r\n## tets.append(isinstance(hh_num[-1], str)) #[-1] is the last element...!\r\n##\r\n##\r\n##\r\n##\r\n##print(\"is strin:\" + str(tets))#isinstance(hh[0], str))\r\n##\r\n##print(\"all headers are string:\" + str(all(tets)))\r\n##a = [1, 2, 3]\r\n##\r\n##print(a)\r\n##print(np.percentile(a , 50))\r\n##\r\n##\r\n##start_time = date.today()\r\n##print(start_time )\r\n\r\n#returns the elapsed milliseconds since the start of the program\r\n##def millis(start_time):\r\n## dtn = datetime.now()\r\n## print(dtn)\r\n## dt = dtn - start_time\r\n##\r\n## print(dt)\r\n## mus =(dt.days * 24 *60 *60 + dt.seconds) * 1000 + dt.microseconds \r\n## ms = mus / 1000\r\n## s = mus / (1000*1000/1)\r\n## minutes = mus /(1000*1000*60)\r\n## \r\n## return ms, s, minutes\r\n##\r\n##for i in range(1,10^9990):\r\n## 10^i\r\n##\r\n##print( str(millis(start_time)))\r\n#from my_classes import MyClass\r\n\r\n#print(sys.version)\r\n\r\n\r\n#foo = routine.string_or_number('neutral2.bmp')\r\n\r\n\r\n#stim = ['control.bmp', 'neutral2.bmp', 'control.bmp', 'neutral2.bmp']\r\n\r\n##stim = ['control.bmp', 1, 'control.bmp', 'neutral2.bmp',\r\n## 'control.bmp', 'neutral2.bmp', 'control.bmp', 'neutral2.bmp',\r\n## 'control.bmp', 'aaneutral2.bmp']\r\n##\r\n##\r\n##print(sorted(list(stim)))\r\n##print()\r\n\r\n\r\n##\r\n##ab = [\"a\", 1, \"c\", \"c\", \"a\"]\r\n##bb= []\r\n##for el in ab:\r\n## if isinstance(el, str): bb.append(1)\r\n## else: bb.append(0)\r\n##\r\n## \r\n##print(all(bb))\r\n##\r\n\r\n#print(any (isinstance(ab,str)))\r\n\r\n#print(isinstance(foo, str))\r\n\r\n#headers = rderedDict(\"a\": None, \"b\": None, \"c\": None)\r\n#headers = [\"a\", \"b\", \"c\", \"c\", \"a\"]\r\n#headers2 = [\"Q\", \"W\"]\r\n##print(sorted(headers))\r\n##print(set(headers))\r\n##\r\n##od = OrderedDict.fromkeys(headers)\r\n##\r\n##fod = \"a\" in od.keys()\r\n##\r\n##print (fod)\r\n\r\n\r\n##print(\"aa\" == \"ab\")\r\n##\r\n##od = OrderedDict()\r\n##od['a'] = [1,2,3]\r\n##od['b'] = None\r\n##print(od['a'][len(od['a'])-1])\r\n##od['a'].append(2)\r\n##\r\n##\r\n##print(not od['a'])\r\n##\r\n##aa = list([1,1])\r\n##for el in [2,3]: aa.append(el)\r\n###aa.append([2,3])\r\n##\r\n##a = [10,10]\r\n###aa.append(10,10)\r\n##print(aa)\r\n##\r\n##print(max(aa))\r\n##print(len(aa))\r\n##print(isinstance(1,str))\r\n\r\n\r\n\r\n#print(os.getcwd() )\r\n##\r\n##\r\n##aa = not isinstance(headers[0], str)\r\n##print(aa)\r\n\r\n##\r\n##a = {'numbero': [1,2], \"wordolo\": \"nuppi\"}\r\n###a = {'numberot': 1, 2, 3, 4, \"wordolot\": \"nuppi\", \"nappi\", \"noppi\"}\r\n#intti = float('a')\r\n##print( min(a['numbero']) )\r\n##\r\n\r\n\r\n##\r\n##print(range(0,10))\r\n##\r\n##a = []\r\n##\r\n##for i in range(9):\r\n## a.append(i)\r\n## print(str(a))\r\n\r\n\r\n#from os.path import join, getsize\r\n\r\n##for root, dirs, files in os.walk('C:\\\\Users\\\\'):\r\n## if 'testing 7mo,trec2' in root: \r\n## print(root, \" \", end=\" FOUND! \")\r\n## print(\"\")\r\n\r\n\r\n##\r\n##\r\n##def funny(argue):\r\n## if not argue:\r\n## print(\"yell more\")\r\n## else:\r\n## print(\"i agrue\")\r\n##\r\n##funny(\"s\")\r\n##\r\n##funny(None)\r\n##\r\n##\r\n##default_input_folder = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\testing 7mo,trec2\"\r\n##print(os.path.isdir(default_input_folder))\r\n\r\n#C:\\\\Users\\\\\r\n\r\n\r\n\r\n#x = MyClass(\"gillo\")\r\n\r\n#print(x.f())\r\n\r\n#print(x.__doc__)\r\n#round(k*3.14)\r\n#\r\n#y = []\r\n#k = 25\r\n#for i, item in enumerate(range(k+1)):\r\n# print (str(i))\r\n## yx = (i/k)*math.pi\r\n# y.append (math.sin((i/k)*2*math.pi)) #*math.pi\r\n# \r\n\r\n\r\n#print(x.f())\r\n#print(x.get_i())\r\n\r\n\r\n##input_file = \"headers_tre_5mo_to_7mo.txt\"\r\n##input_file = \"headers_txt_data.txt\"\r\n##\r\n##input_file_delimiter = \"\\t\"\r\n##\r\n##aa = [\"q\",\"w\",\"e\",\"r\"]\r\n##\r\n##aa = [aa, aa, aa]\r\n\r\n#print(list(enumerate(aa)))\r\n\r\n##bb = zip( range(20000,20001,1), aa)\r\n#print(list(range(3)))\r\n#print(list(bb))\r\n\r\n##for index, element in bb:\r\n## print(element)\r\n\r\n#a = routine.wonder((\"amo\", \"rati\", \"her\"))\r\n\r\n##d = dict()\r\n##od = OrderedDict()\r\n##\r\n##od[\"yks\"] = 1\r\n##od[\"kaks\"] = 2\r\n##od[\"kolme\"] = 3\r\n##\r\n##d[\"yks\"] = 1\r\n##d[\"kaks\"] = 2\r\n##d[\"kolme\"] = 3\r\n\r\n#print(od)\r\n# print(d)\r\n\r\n\r\n##if (\"a\" is \"a\"):\r\n## print(\"a is a\")\r\n##\r\n##b = \"abc\"\r\n\r\n\r\n#print(\"aaaa\" + chr(9) + \"aaaa\") #tab\r\n#print(\"aaaa\" + chr(13) + \"aaaa\") #CR is a bytecode for carriage return (from the days of typewriters)\r\n#print(\"aaaa\" + chr(10) + \"aaaa\") #LF similarly, for line feed\r\n\r\n\r\n# scan through files in a directory\r\n\r\n# diritems = os.listdir(os.getcwd())\r\n\r\n\r\n#table for header pairs\r\n\r\n# maptable = {}#dict([('KEY','value')])\r\n\r\n\r\n#read dictionary key-value pairs from file\r\n\r\n#maptable = routine.get_headers(os.getcwd(), input_file)\r\n#print (maptable)\r\n#print (len(maptable.keys()))\r\n#print (maptable.keys())\r\n##print (maptable.values())\r\n\r\n#testing a function in imported code\r\n\r\n#routine.miracle(5) \r\n\r\n\r\n#list_my = [1, 2, 3, 4]\r\n\r\n#print(len(list_my))\r\n\r\n\r\n##def file_handle(file):\r\n## print (file)\r\n## print (file.split(\".\"))\r\n##\r\n##for filenum, file in enumerate(diritems):\r\n## file_handle(file)\r\n##\r\n##\r\n##for i, a in enumerate([\"uu\",\"jee\"]):\r\n## print(i)\r\n## print(a)\r\n##\r\n##\r\n##\r\n##print (\"Directory contains \"\r\n##+ str(len(diritems)) + \" files.\")\r\n"
},
{
"alpha_fraction": 0.6776271462440491,
"alphanum_fraction": 0.6888135671615601,
"avg_line_length": 41.17647171020508,
"blob_id": "8c1e8ebbb0de396d2c6ac5bedaee32d871a140b4",
"content_id": "fdf9ac45a52c93fc88249bd59b9616eaed4ec9d0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2950,
"license_type": "permissive",
"max_line_length": 225,
"num_lines": 68,
"path": "/standardizeGazedata_0.2.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "##Script for reading, modifying and re-writing gazedata\r\n##*this will have some algorithms from\r\n##gazedata2gazedata_sy3.py and the like, but should use classes in my_classes.py\r\n##*use DataFolder class to access all gazefiles in folder\r\n##\t**write new method for writing new data into folder\r\n##\t\t*method will loop through files, i.e., GazeReader objects\r\n##\t\t*method will read (gettinf from GazeReader), \r\n##\t\t\t change (by accessing separate textfile),\r\n##\t\t\t\t and write headers to new file (use writer object?)\r\n##\t\t*method will read, change, and write datarow one-by-one into new file,\r\n##\t\t\tsame logic as with headers, changing might be more tricky?\r\n##\t\t*i.e., GazeReader will never have to give up entire data!\r\n\r\n## first things 1st\r\n\r\nimport os\r\nimport routine\r\nfrom my_classes import DataFolder\r\nfrom my_classes import HeaderReader\r\n\r\n#read header conversion map \r\nfolder = \"C:/Users/lasayr/Documents/GitHub/py_gazedat\"\r\nfile = \"header map 3D.txt\"\r\nhr = HeaderReader(folder, file)\r\n\r\n\r\n### Set folder and data for header conversion map\r\n##folder = \"C:/Users/infant/Documents/GitHub/py_gazedat\"\r\n##fileModelHM = \"header map.txt\"\r\n##fileModelCur = \"??????.txt\"\r\n##\r\n### Read old-new conversion map, for headers\r\n##hmModel = routine.get_headers(folder,fileModelHM)\r\n##hmCurrent = routine.get_headers(folder,fileModelCur)\r\n\r\n\r\n#vals = list(hm.values()) #list(d.values())\r\n#vals.remove('OBSOLETE')\r\n#print(vals)\r\n\r\nhome = 'C:\\\\Users\\\\lasayr\\\\Documents\\\\'\r\n\r\n## then do some business\r\n\r\n# Source folder:\r\n#input_folder = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\7mo,trec2\"\r\n#input_folder = home + \"D\\\\Aaltonen\\\\ct\\\\ct_6mo\"\r\n#input_folder = home + \"D\\\\Aaltonen\\\\ct\\\\ct_18mo_temp\"\r\ninput_folder = home + \"D\\\\Aaltonen\\\\ct\\\\ct_48mo\"\r\n#input_folder = \"D:\\\\lasayr\\\\Aaltonen\\\\mi\"\r\n#input_folder = \"D:\\\\lasayr\\\\Aaltonen\\\\24mo,trec2\"\r\ninFolderUnique = os.path.split(input_folder)[1]\r\n\r\n#output_folder = \"D:\\\\lasayr\\\\Aaltonen\\\\TREc2_7mo_std TESTING\" #output_folder = \"D:\\\\lasayr\\\\Aaltonen\\\\ct\\\\6mo_TESTING\"#output_folder = \"D:\\\\lasayr\\\\Aaltonen\\\\mi\\\\testing\"#output_folder = \"D:\\\\lasayr\\\\Aaltonen\\\\24mo\\\\testing\"\r\n#output_folder = input_folder + \"\\\\testing\"\r\noutput_folder = home + \"D\\\\Aaltonen\\\\\" + inFolderUnique + \"_std\"\r\n\r\n# Init DataFolder\r\ndata_folder = DataFolder(input_folder, map_header = hr, date_limit = \"01 Jan 00\",\r\n limit_files = (0,None), limit_rows = None)#, fileModelCur)#, limit_files = (0,3))#, limit_rows = 20, limit_files = (1,3)) \r\n\r\n# Print header map, conversion table\r\ndata_folder.print_header_map()\r\n\r\nprint(\"\\nFiles selected: \" + str(data_folder.get_filelist()))\r\n# Change output folder, default is: C:\\Users\\Public\\Documents\\Tampereen yliopisto\\Eye tracker\\TRE Cohort 2\\gazeAnalysisLib analyses\\testing data\r\n#data_folder.set_output_folder(output_folder)\r\ndata_folder.rewrite_data(output_folder) \r\n \r\n"
},
{
"alpha_fraction": 0.5342648029327393,
"alphanum_fraction": 0.5434131622314453,
"avg_line_length": 26.05855941772461,
"blob_id": "6890bb4781abc1ea29e8b4fd5b6afcf999aaa942",
"content_id": "aa51af80532b01badcea009dca466ce0e7814526",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6012,
"license_type": "permissive",
"max_line_length": 147,
"num_lines": 222,
"path": "/gazedata2gazedata_sy2 - copy.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "import os\r\rimport csv\r\rimport routine\r\rfrom collections import OrderedDict\r\r##\r\r# This Jussi's script converts eyetracking data in txt-format to gazedata-format\r\r# It also converts X- and Y- coordinates to relative values for screen size.\r\r# Input folder needs to be relative to the script location in the folder tree.\r\r# In this case the folder where this script is located needs to have a folder\r\r# named \"files_to_change\" where the files are located.\r\rinput_folder = folder = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\testing 7mo,trec2\"\r#input_folder = folder = \"C:\\Users\\Public\\Documents\\Tampereen yliopisto\\Eye tracker\\TRE Cohort 2\\gazeAnalysisLib analyses\\7mo,trec2\"\r\routput_folder = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\testing data\"\r\rfile_ext = \".gazedata\" #input file extension, .txt\r\routput_file_ext = \".gazedata\"\r\rinput_file_delimiter = \"\\t\"\r\routput_file_delimiter = input_file_delimiter\r\r##null_values = [\".\", \"\"] # two possible kinds values for missing samples\r##\r##null_values_new = \"-999999\" # \r\rscreen_x_dim = 1920.0 #screen width, adjust for differnt sites? \r\rscreen_y_dim = 1020.0 #screen height, adjust for differnt sites?\r\rheaders_folder = os.getcwd() #path for headers inputfile\r\rheaders_inputfile = \"headers_tre_5mo_to_7mo.txt\"\r\r##\r\r#map for one type of \"gazedata\" (or txt) headers, values may not apply to all gazedata \r\rmap_header = routine.get_headers(headers_folder, headers_inputfile)\r\r# list files in a directory\r\rdiritems = os.listdir(input_folder)\r\r##\r#subroutine for processing one file\r\r##def file_process(file, maptable):\r##\r## print (\" Filename matches with the specified file_ext -> processing..\")\r##\r## #self.liststore_exp.append([file])\r##\r## input_file = file\r##\r##\r## #re-initialize file-specific vars\r## \r## newrows = [] #processed data, process in function\r##\r## indexed_maptable = {} #processin mpad\r##\r## imkeys = [] #processing dict_keys\r##\r##\r##\r## # input file reading\r##\r## with open(os.path.join(input_folder, input_file), \"rt\") as inputfile:\r##\r## reader = csv.reader(inputfile, delimiter = input_file_delimiter)\r## \r## # grab header information, into a list\r##\r## headers = next(reader) #reader.__next__() \r## \r##\r## # calculate list index numbers for map-keys\r##\r## indexed_maptable = OrderedDict()\r##\r## for key in maptable:\r## #print(\"key: \" + key)\r## #print(\"index of header: \" + str(headers.index(key)))\r##\r## indexed_maptable[key] = headers.index(key)\r## \r##\r## # loop file rows and cols, \r##\r## imkeys = indexed_maptable.keys()\r## \r## for r, row in enumerate(reader):\r## \r## newrow = []\r##\r## for k, key in enumerate(imkeys):\r##\r## ncol = k #indexed_maptable[key]\r## if r < 20: break#print(ncol)\r## \r## #If loop goes out of index, print cell\r##\r## try:\r##\r## foo = row[ncol]\r##\r## except(IndexError): \r##\r## foo = newrows[r-1]\r##\r## foo = foo[k]\r##\r#### print(\"key: \" + key)\r#### print(\"index of header: \" + str(headers.index(key)))\r#### print (\"foo: \" + str(foo))\r#### print (\"row: \" + str(r) + \" col: \" + str(ncol))\r#### print( str(len(newrows)))\r##\r##\r## # take away the null-values if they exist \r##\r## if foo not in null_values: #if row[ncol] not in null_values:\r##\r## if key in ['LEFT_GAZE_X', 'RIGHT_GAZE_X']:\r##\r## newrow.append(float(foo) / 1920.0) #newrow.append(float(row[ncol]) / 1920.0)\r##\r## elif key in ['LEFT_GAZE_Y', 'RIGHT_GAZE_Y']:\r##\r## newrow.append(float(foo) / 1020.0) #newrow.append(float(row[ncol]) / 1020.0)\r##\r## else:\r##\r## newrow.append(foo) #newrow.append(row[ncol])\r##\r## else:\r##\r## newrow.append(null_values_new)\r##\r## newrows.append(newrow)\r##\r## return newrows, list(imkeys)\r## \r##\r## \r## \r##\r\r## [subcode]\r# \"main-like\" code \r\rprint (\"Directory contains \" + str(len(diritems)) + \" files.\")\r\rfor filenum, file in zip(range(2716), diritems):\r\r#for filenum, file in enumerate(diritems): #diritems\r\r #print (\"Checking file \" + str(filenum + 1) + '/' + str(len(diritems)))\r\r if file.endswith(file_ext):\r\r print (\"Process file \" + str(filenum + 1) + '/' + str(len(diritems)))\r\r print(file)\r\r\r\r #read in data, process, and strore in newrows\r args_pro = input_folder, file, map_header\r \r newrows, newheaders = routine.file_process(args_pro)\r\r #print (str(newrows))\r\r\r print(\"newrows length: \" + str(len(newrows)))\r\r # output file formation\r\r # resolve the output file name\r\r input_filename_parts = file.split(\".\") #input_file.split(\".\")\r\r output_file = input_filename_parts[0] + output_file_ext\r\r\r\r # open file\r\r with open(os.path.join(output_folder, output_file), \"wt\") as outputfile:\r\r writer = csv.writer(outputfile, delimiter=output_file_delimiter)\r\r\r # write header row\r\r writer.writerow(newheaders) \r\r\r # write datarows\r\r for newrow in newrows:\r\r writer.writerow(newrow)\r\r print (\" File processed.\")\r\r# else:\r\r #print (\" Filename did not match the file_ext -> did nothing.\")\r\r\r\r\r\r"
},
{
"alpha_fraction": 0.6972140669822693,
"alphanum_fraction": 0.7250733375549316,
"avg_line_length": 37.882354736328125,
"blob_id": "39eaf7d7ce25f759cd68c9bb554c17fc2bf49acd",
"content_id": "2b705817dae14391d2e760ebe7d248ceaa7a62de",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1364,
"license_type": "permissive",
"max_line_length": 139,
"num_lines": 34,
"path": "/check_dataOO_0.1.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "import os\r\nimport csv\r\nfrom my_classes import DataFolder\r\nfrom datetime import datetime\r\n\r\n# script for writing headers and data stats from different gazedata to file(s)\r\n\r\n# path to data testing eg., 7mo,trec2\r\n\r\ntre5mo_old = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\5mo,trec2\"\r\ntre7mo_testin = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\testing 7mo,trec2\"\r\ntre7mo_old = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\7mo,trec2\"\r\nmi = (\"D:\\\\lasayr\\\\Aaltonen\\\\mi\") #+ folder_name)\r\n\r\nfolder_path = tre7mo_old\r\n\r\n# create new DataFolder to be inspected/processed, limit_files = (19,20) or (0, None)\r\ndata_folder = DataFolder(folder_path, limit_rows = 100, limit_files = (2600,2600+10))\r\n\r\n# target output to current working directory, cwd\r\ndata_folder.set_output_folder(os.getcwd())\r\n\r\n##\r\n\r\n# list statistics from differt files to output \"log\"\r\nstart_time = datetime.now()\r\n#percentiles parameter for numerical stats\r\npercentiles = (0.1,99.9)\r\ndata_folder.write_stats_to_file(percentiles)\r\nprint(datetime.now() - start_time) #print time elapsed\r\n\r\nheaders = data_folder.get_headers()\r\nfor header in headers:\r\n print(header + \": \" + str(data_folder.get_stats(header)))\r\n \r\n\r\n"
},
{
"alpha_fraction": 0.6248348951339722,
"alphanum_fraction": 0.6314398646354675,
"avg_line_length": 24.395973205566406,
"blob_id": "03265ea3f1575bf6d5ab22b0861675dd0e1156fb",
"content_id": "639e3720aef324211e0aaffb799e1b10480d72d6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3785,
"license_type": "permissive",
"max_line_length": 159,
"num_lines": 149,
"path": "/gazedataOO_0.2.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "import os\r\rimport csv\r\rimport routine\r\r#from collections import OrderedDict\r\rfrom itertools import islice\r\rfrom my_classes import GazeReader\r\r# This Jussi's script converts eyetracking data in txt-format to gazedata-format\r\r# It also converts X- and Y- coordinates to relative values for screen size.\r\r# Input folder needs to be relative to the script location in the folder tree.\r\r# In this case the computer(!) where this script is located needs to have a folder\r\r# named 'testing 7mo,trec2' where the files are located.\r\r# VERSION IS 1st TO ADOPT ObjectOriented programming of our gazedata scripts\r\r\r\r\r\rinput_folder = 'testing 7mo,trec2' #\"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\testing 7mo,trec2\"\r#input_folder = folder = \"C:\\Users\\Public\\Documents\\Tampereen yliopisto\\Eye tracker\\TRE Cohort 2\\gazeAnalysisLib analyses\\7mo,trec2\"\r\rn_files = 1 # set limit for files to be processed, None if no limit desired\r\routput_folder = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\testing data\"\r\ros.makedirs(output_folder,exist_ok=True)\r\rfile_ext = \".gazedata\" #input file extension, .txt\r\routput_file_ext = \".gazedata\"\r\rinput_file_delimiter = \"\\t\"\r\routput_file_delimiter = input_file_delimiter\r\rheaders_folder = os.getcwd() #path for headers inputfile\r\rheaders_inputfile = \"headers_tre_5mo_to_7mo.txt\"\r\r\r#map for one type of \"gazedata\" (or txt) headers, values may not apply to all gazedata \r\rmap_header = routine.get_headers(headers_folder, headers_inputfile)\r\r\r# find directory by \"walking\" through the system\r\rfor root, dirs, files in os.walk('C:\\\\Users\\\\'):\r \r if input_folder in root: \r \r print(root, \" \", end=\" FOUND! \")\r \r print(\"\")\r \r input_folder = root\r \rprint (input_folder)\r\r\r##\r \r# list files in a directory, \r\rdiritems = os.listdir(input_folder)\r\rprint (\"Directory contains \" + str(len(diritems)) + \" files.\")\r\r\r#loop through files, limit loop by isslice(items, start, stop), can be None\r\rfor filenum, file in islice(enumerate(diritems), 0, n_files): \r\r #print (\"Checking file \" + str(filenum + 1) + '/' + str(len(diritems)))\r\r if file.endswith(file_ext):\r\r print (\"Process file \" + str(filenum + 1) + '/' + str(len(diritems)))\r\r print(file)\r\r\r #read in data, process, and strore in newrows\r\r args_pro = input_folder, file, map_header\r \r \r # make new GazeReader object for reading and processing input file \r \r f_processor = GazeReader(args_pro, 40) #40 is optional limit for rows\r \r f_processor.set_row_limit(40) # limit rows, good for debugging\r \r print(\"Newrows length: \" + str(f_processor.get_row_count()))\r\r \r # output file formation\r\r # resolve the output file name\r\r input_filename_parts = file.split(\".\") #input_file.split(\".\")\r\r output_file = input_filename_parts[0] + output_file_ext\r\r\r\r # open output file\r\r with open(os.path.join(output_folder, output_file), \"wt\") as outputfile:\r\r writer = csv.writer(outputfile, delimiter=output_file_delimiter)\r\r\r # write header row\r\r writer.writerow(f_processor.get_headers()) \r\r\r # write datarows, until there are none left\r\r newrow = True\r\r while newrow:\r \r newrow = f_processor.get_new_row()\r \r if (newrow):\r \r writer.writerow(newrow)\r \r \r\r print (\" File processed.\")\r \r \r\r# else:\r\r #print (\" Filename did not match the file_ext -> did nothing.\")\r\r"
},
{
"alpha_fraction": 0.5761858820915222,
"alphanum_fraction": 0.5845111608505249,
"avg_line_length": 22.907407760620117,
"blob_id": "aa39f138dcd076019a5ac3e32a1c7c7840becff1",
"content_id": "067af58ccd11b71545bd39f6c386b43bc199a6e5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5165,
"license_type": "permissive",
"max_line_length": 139,
"num_lines": 216,
"path": "/gazedata2gazedata_sy.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "import os\r\rimport csv\r\rimport routine\r\r# This Jussi's script converts eyetracking data in txt-format to gazedata-format\r\r# It also converts X- and Y- coordinates to relative values for screen size.\r\r# Input folder needs to be relative to the script location in the folder tree.\r\r# In this case the folder where this script is located needs to have a folder\r\r# named \"files_to_change\" where the files are located.\r\rinput_folder = folder = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\7mo,trec2\"\r#input_folder = folder = \"C:\\Users\\Public\\Documents\\Tampereen yliopisto\\Eye tracker\\TRE Cohort 2\\gazeAnalysisLib analyses\\7mo,trec2\"\r\routput_folder = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\testing data\"\r\rending = \".gazedata\" #input file extension, .txt\r\routput_file_ending = \".gazedata\"\r\rinput_file_delimiter = \"\\t\"\r\rnull_values = [\".\", \"\"] # two possible kinds values for missing samples\r\rreplace_null_values = \"-999999\" # \r\rscreen_x_dim = 1920.0 #screen width, adjust for differnt sites? \r\rscreen_y_dim = 1020.0 #screen height, adjust for differnt sites?\r\rheaders_folder = os.getcwd() #path for headers inputfile\r\rheaders_inputfile = \"headers_tre_5mo_to_7mo.txt\"\r\r\r\r#map for one type of \"gazedata\" (or txt) headers, values may not apply to all gazedata \r\rmaptable = routine.get_headers(headers_folder, headers_inputfile)\r\r\r#subroutine for processing one file\r\rdef file_process(file):\r\r print (\" Filename matches with the specified ending -> processing..\")\r\r #self.liststore_exp.append([file])\r\r input_file = file\r\r\r\r # input file reading\r\r newrows = []\r\r with open(os.path.join(input_folder, input_file), \"rt\") as inputfile:\r\r reader = csv.reader(inputfile, delimiter = input_file_delimiter)\r #reader.line_num())\r\r\r # grab header information, into a list\r\r headers = next(reader) #reader.__next__() \r #print(headers)\r\r\r # calculate list index numbers for map-keys\r\r indexed_maptable = {}\r\r for key in maptable:\r print(\"key: \" + key)\r print(\"index of header: \" + str(headers.index(\"Subject\")))\r print (\"headers index key: \" +headers.index(key))\r indexed_maptable[key] = headers.index(key)\r\r\r\r # loop file rows and cols, \r\r imkeys = indexed_maptable.keys()\r\r for row in reader:\r\r newrow = []\r\r for key in imkeys:\r\r ncol = indexed_maptable[key]\r\r # take away the null-values if they exist\r\r if row[ncol] not in null_values:\r\r if key in ['LEFT_GAZE_X', 'RIGHT_GAZE_X']:\r\r newrow.append(float(row[ncol]) / 1920.0)\r\r elif key in ['LEFT_GAZE_Y', 'RIGHT_GAZE_Y']:\r\r newrow.append(float(row[ncol]) / 1020.0)\r\r else:\r\r newrow.append(row[ncol])\r\r else:\r\r newrow.append(replace_null_values)\r\r newrows.append(newrow)\r\r\r\r\r# scan through files in a directory\r\rdiritems = os.listdir(input_folder)\r\r\r\rprint (\"Directory contains \" + str(len(diritems)) + \" files.\")\r\rfor filenum, file in zip(range(10), diritems):\r\r#for filenum, file in enumerate(diritems): #diritems\r\r #print (\"Checking file \" + str(filenum + 1) + '/' + str(len(diritems)))\r\r if file.endswith(ending):\r\r print (\"Process file \" + str(filenum + 1) + '/' + str(len(diritems)))\r\r file_process(file)\r\r\r # output file formation\r\r # resolve the output file name\r\r input_filename_parts = file.split(\".\") #input_file.split(\".\")\r\r output_file = input_filename_parts[0] + output_file_ending\r\r\r\r # open file\r\r with open(os.path.join(output_folder, output_file), \"wb\") as outputfile:\r\r writer = csv.writer(outputfile, delimiter='\\\\t')\r\r\r\r # form header row\r\r newheaders = []\r\r for key in imkeys:\r\r newheaders.append(maptable[key])\r\r\r\r # write header row\r\r writer.writerow(newheaders)\r\r\r\r # write datarows\r\r for newrow in newrows:\r\r writer.writerow(newrow)\r\r\r\r print (\" File processed.\")\r\r# else:\r\r #print (\" Filename did not match the ending -> did nothing.\")\r\r\r\r#map for one type of \"gazedata\" (or txt) headers, values may not apply to all gazedata \r##maptable = {\"TIMESTAMP\":\"TETTime\",\r##\r## \"RECORDING_SESSION_LABEL\":\"Subject\",\r##\r## \"LEFT_GAZE_X\":\"XGazePosLeftEye\",\r##\r## \"LEFT_GAZE_Y\":\"YGazePosLeftEye\",\r##\r## \"RIGHT_GAZE_X\":\"XGazePosRightEye\", \r##\r## \"RIGHT_GAZE_Y\":\"YGazePosRightEye\",\r##\r## \"TRIAL_INDEX\":\"TrialId\",\r##\r## \"SAMPLE_MESSAGE\":\"UserDefined_1\",\r##\r## \"RIGHT_PUPIL_SIZE\":\"DiameterPupilRightEye\",\r##\r## \"stimulus_right_2\":\"Stim\",\r##\r## \"__target_x__1\":\"Target\"}\r\r"
},
{
"alpha_fraction": 0.6987425088882446,
"alphanum_fraction": 0.706943690776825,
"avg_line_length": 37.456520080566406,
"blob_id": "0570ab2bf41213ba10755828da2d24efcd633219",
"content_id": "9c8d3cec83f947ea0fff2ea7927312e33f54f6f8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1829,
"license_type": "permissive",
"max_line_length": 144,
"num_lines": 46,
"path": "/standardizeGazedata_0.1.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "##Script for reading, modifying and re-writing gazedata\r\n##*this will have some algorithms from\r\n##gazedata2gazedata_sy3.py and the like, but should use classes in my_classes.py\r\n##*use DataFolder class to access all gazefiles in folder\r\n##\t**write new method for writing new data into folder\r\n##\t\t*method will loop through files, i.e., GazeReader objects\r\n##\t\t*method will read (gettinf from GazeReader), \r\n##\t\t\t change (by accessing separate textfile),\r\n##\t\t\t\t and write headers to new file (use writer object?)\r\n##\t\t*method will read, change, and write datarow one-by-one into new file,\r\n##\t\t\tsame logic as with headers, changing might be more tricky?\r\n##\t\t*i.e., GazeReader will never have to give up entire data!\r\n\r\n## first things 1st\r\n\r\nimport routine\r\nfrom my_classes import DataFolder\r\n\r\n# Set folder and data for header conversion map\r\nfolder = \"C:/Users/infant/Documents/GitHub/py_gazedat\"\r\nfile = \"header map.txt\"\r\n\r\n# Read old-new conversion map, for headers\r\nhm = routine.get_headers(folder,file)\r\n\r\n\r\n#vals = list(hm.values()) #list(d.values())\r\n#vals.remove('OBSOLETE')\r\n#print(vals)\r\n\r\n## then do some business\r\n\r\n# Source folder:\r\ninput_folder = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\7mo,trec2\"\r\noutput_folder = \"D:\\\\lasayr\\\\Aaltonen\\\\TREc2_7mo_std\"\r\n\r\n# Init DataFolder\r\ndata_folder = DataFolder(input_folder, map_header = hm)#, limit_files = (0,3))#, limit_rows = 20, limit_files = (1,3)) \r\n\r\n# Print header map, conversion table\r\ndata_folder.print_header_map()\r\n\r\n# Change output folder, default is: C:\\Users\\Public\\Documents\\Tampereen yliopisto\\Eye tracker\\TRE Cohort 2\\gazeAnalysisLib analyses\\testing data\r\n#data_folder.set_output_folder(output_folder)\r\n\r\ndata_folder.rewrite_data(output_folder) \r\n \r\n"
},
{
"alpha_fraction": 0.6486742496490479,
"alphanum_fraction": 0.653724730014801,
"avg_line_length": 23.36153793334961,
"blob_id": "ba740a6ded5ee67b48c86e2ff7133fb9042c474e",
"content_id": "134f0b229404be4360089209a25f0834dd19af58",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3168,
"license_type": "permissive",
"max_line_length": 168,
"num_lines": 130,
"path": "/gazedata2gazedata_sy3.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "import os\r\rimport csv\r\rimport routine\r\r#from collections import OrderedDict\r\rfrom itertools import islice\r\r# This Jussi's script converts eyetracking data in txt-format to gazedata-format\r\r# It also converts X- and Y- coordinates to relative values for screen size.\r\r# Input folder needs to be relative to the script location in the folder tree.\r\r# In this case the folder where this script is located needs to have a folder\r\r# named \"files_to_change\" where the files are located.\r\rinput_folder = folder = \"testing 7mo,trec2\" #\"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\testing 7mo,trec2\"\r#input_folder = folder = \"C:\\Users\\Public\\Documents\\Tampereen yliopisto\\Eye tracker\\TRE Cohort 2\\gazeAnalysisLib analyses\\7mo,trec2\"\r\routput_folder = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\testing data\"\r\ros.makedirs(output_folder,exist_ok=True)\r\r\r#if ~os.path.isdir(output_folder):\r \r\r\rfile_ext = \".gazedata\" #input file extension, .txt\r\routput_file_ext = \".gazedata\"\r\rinput_file_delimiter = \"\\t\"\r\routput_file_delimiter = input_file_delimiter\r\rheaders_folder = os.getcwd() #path for headers inputfile\r\rheaders_inputfile = \"headers_tre_5mo_to_7mo.txt\"\r\r\r#map for one type of \"gazedata\" (or txt) headers, values may not apply to all gazedata \r\rmap_header = routine.get_headers(headers_folder, headers_inputfile)\r\r\r# find directory by \"walking\" through the system\r\rfor root, dirs, files in os.walk('C:\\\\Users\\\\'):\r \r if input_folder in root: \r \r print(root, \" \", end=\" FOUND! \")\r \r print(\"\")\r \r input_folder = root\r \rprint (input_folder)\r\r\r\r# list files in a directory\r\rdiritems = os.listdir(input_folder)\r\rprint (\"Directory contains \" + str(len(diritems)) + \" files.\")\r\r\r#loop through files, limit loop by isslice(items, start, stop)\r\rfor filenum, file in islice(enumerate(diritems), 0, 1): \r\r #print (\"Checking file \" + str(filenum + 1) + '/' + str(len(diritems)))\r\r if file.endswith(file_ext):\r\r print (\"Process file \" + str(filenum + 1) + '/' + str(len(diritems)))\r\r print(file)\r\r\r #read in data, process, and strore in newrows\r\r args_pro = input_folder, file, map_header\r \r newrows, newheaders = routine.file_process(args_pro)\r\r #print (str(newrows))\r\r\r print(\"newrows length: \" + str(len(newrows)))\r\r # output file formation\r\r # resolve the output file name\r\r input_filename_parts = file.split(\".\") #input_file.split(\".\")\r\r output_file = input_filename_parts[0] + output_file_ext\r\r \r\r # open file\r \r with open(os.path.join(output_folder, output_file), \"wt\") as outputfile:\r\r writer = csv.writer(outputfile, delimiter=output_file_delimiter)\r\r\r # write header row\r\r writer.writerow(newheaders) \r\r\r # write datarows\r\r for newrow in newrows:\r\r writer.writerow(newrow)\r\r print (\" File processed.\")\r\r# else:\r\r #print (\" Filename did not match the file_ext -> did nothing.\")\r\r"
},
{
"alpha_fraction": 0.6954103112220764,
"alphanum_fraction": 0.7023643851280212,
"avg_line_length": 24.629629135131836,
"blob_id": "0926f8047def3920107a120e90fdea59f7d72d15",
"content_id": "0c069c2f475517890e765c913d7242674954ca1f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 719,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 27,
"path": "/check_headersOO_0.1.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "import os\r\nimport csv\r\nfrom my_classes import DataFolder\r\nfrom datetime import datetime\r\nfrom collections import OrderedDict\r\n\r\n# script for writing headers from different gazedata to file(s)\r\n\r\n\r\n\r\n# path to data\r\nfolder_path = (\"D:\\\\lasayr\\\\Aaltonen\\\\mi\") #+ folder_name)\r\n\r\n# create new DataFolder to be inspected/processed\r\ndata_folder = DataFolder(folder_path)# limit_files = (1,None))#(1,100))\r\n\r\n# target output to current working directory, cwd\r\ndata_folder.set_output_folder(os.getcwd())\r\n \r\n#print(os.getcwd() )\r\n \r\n##\r\n\r\n# list headers from differt files to output \"log\"\r\nstart_time = datetime.now()\r\ndata_folder.write_headers_to_file()\r\nprint(datetime.now() - start_time) #print time elapsed\r\n"
},
{
"alpha_fraction": 0.8155080080032349,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 61.33333206176758,
"blob_id": "001d8bd5dc5ec3bb200c682920ec18bad8f25fc0",
"content_id": "bd8821e7f493d8ca5a0e85d08da23f877453c86a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 748,
"license_type": "permissive",
"max_line_length": 151,
"num_lines": 12,
"path": "/README.md",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "# py_gazedat\n\nPython code for converting eye tracking data (.gazedata) with variable formats to a\nuniform format, where variables are always stored under constant headers, column positions, data types and scales.\nScript: standardizeGazedataAnon.py\n\nThe implementation is based on dedicated classes. Conversion from variable data formats is further based\non a headermap, which lists input headernames, new standard headernames, and starndardized column numbers. These must be given in 3 columns\nseparated by tab chars, respectively (e.g., \"header map 3d.txt\" within this repository).\n\nAnonymization of data is also supported. This is based on erasing unique timestamps and assigning random names to datafiles and within data participant\nidetifiers.\n"
},
{
"alpha_fraction": 0.5359506011009216,
"alphanum_fraction": 0.5480811595916748,
"avg_line_length": 21.081632614135742,
"blob_id": "802060171b63e9a5d7dea1e468f8c4c0b21b43af",
"content_id": "f21add865eb772d1cf5ff55e0a02477d349a3d4f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4534,
"license_type": "permissive",
"max_line_length": 127,
"num_lines": 196,
"path": "/routine.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "import os\r\n\r\nimport csv\r\n\r\nfrom collections import OrderedDict\r\n\r\nfrom itertools import islice\r\n\r\ninput_file_delimiter = \"\\t\"\r\n\r\nnull_values = [\".\", \"\"] # two possible kinds values for missing samples\r\n\r\nnull_values_new = \"-999999\" #\r\n\r\nx_coord = ['LEFT_GAZE_X', 'RIGHT_GAZE_X']\r\ny_coord = ['LEFT_GAZE_Y', 'RIGHT_GAZE_Y']\r\n\r\nscreen_x_dim = 1920.0 #screen width, adjust for differnt sites? \r\n\r\nscreen_y_dim = 1020.0 #screen height, adjust for differnt sites?\r\n\r\n\r\n##\r\n\r\ndef miracle(number):\r\n for x in range(0, number):\r\n print(str(number))\r\n\r\n#miracle(3)\r\n\r\n\r\n##\r\n\r\ndef wonder(args):\r\n for x, arg in enumerate(args):\r\n print(arg)\r\n #print(str(x))\r\n #print(args(x))\r\n\r\n#miracle(3)\r\n## [subcode]\r\ndef get_headers(dir_path ,input_file):\r\n\r\n #table for header pairs\r\n\r\n maptable = OrderedDict()# {}#dict([('KEY','value')])\r\n\r\n\r\n #read dictionary key-value pairs from file,\r\n #1st column is for key and second/last column for values\r\n\r\n with open(os.path.join(dir_path, input_file), \"rt\", ) as inputfile:\r\n\r\n reader = csv.reader(inputfile, delimiter = input_file_delimiter)\r\n\r\n # grab header information, into maptable\r\n\r\n \r\n #all_lines = list(reader)\r\n\r\n #print (all_lines)\r\n \r\n for row in reader:\r\n \r\n #a = next(reader)\r\n #maptable[a[0]] = a[len(a)-1]\r\n maptable[row[0]] = row[len(row)-1] \r\n #print(a)\r\n \r\n\r\n return maptable\r\n\r\n##\r\n#routine for processing gazedata, read, manipulate, return\r\n\r\n#takes a list of argument including\r\n\r\n #input folder\r\n\r\n #input file\r\n\r\n #maptable, mapping old data headers (in \"file\") to new data headers\r\n \r\ndef file_process(t_args):\r\n\r\n input_folder = (t_args[0]) \r\n \r\n input_file = (t_args[1])\r\n\r\n maptable = (t_args[2]) #OrderedDict where key is old and value new header\r\n\r\n\r\n print (\" Filename matches with the specified file_ext -> processing..\") \r\n \r\n\r\n newrows = [] #processed data, process in function\r\n \r\n\r\n # input file reading\r\n\r\n with open(os.path.join(input_folder, input_file), \"rt\") as inputfile:\r\n\r\n reader = csv.reader(inputfile, delimiter = input_file_delimiter)\r\n \r\n\r\n # grab header information, into a list\r\n\r\n data_headers = next(reader) #reader.__next__() \r\n \r\n\r\n # loop file rows and cols, \r\n \r\n for r, row in islice(enumerate(reader), 0, 40): #None\r\n \r\n newrow = []\r\n\r\n for h, header in enumerate(maptable.keys()):#enumerate(header_keys):\r\n\r\n ncol = h #od_headers[key]\r\n \r\n try: #try to accces data element\r\n\r\n foo = row[ncol]\r\n\r\n except(IndexError): #if index oob, use element of previuous row\r\n\r\n foo = newrows[r-1]\r\n\r\n foo = foo[k]\r\n\r\n foo = manipulate(foo, header)\r\n \r\n newrow.append(foo)\r\n\r\n newrows.append(newrow)\r\n\r\n return newrows, maptable.values()#list(header_keys)\r\n \r\n\r\n \r\n##\r\n#routine for processing gazedata, read, manipulate, return\r\n\r\n\r\ndef manipulate(data, header): \r\n\r\n # manipulate data\r\n\r\n # take away the null-values if they exist \r\n\r\n foo = data\r\n\r\n if foo not in null_values: #if row[ncol] not in null_values:\r\n\r\n if header in x_coord: #['LEFT_GAZE_X', 'RIGHT_GAZE_X']:\r\n\r\n foo = float(foo) / screen_x_dim#newrow.append(float(foo) / screen_x_dim) #newrow.append(float(row[ncol]) / 1920.0)\r\n\r\n elif header in y_coord: #['LEFT_GAZE_Y', 'RIGHT_GAZE_Y']:\r\n\r\n foo = float(foo) / screen_y_dim #newrow.append(float(foo) / screen_y_dim) #newrow.append(float(row[ncol]) / 1020.0)\r\n\r\n else:\r\n\r\n foo = foo # newrow.append(foo) #newrow.append(row[ncol])\r\n\r\n return foo\r\n \r\n else:\r\n\r\n return null_values_new\r\n\r\n\r\n##\r\ndef string_or_number(s):\r\n try:\r\n z = int(s)\r\n return z\r\n except ValueError:\r\n try:\r\n z = float(s)\r\n return z\r\n except ValueError:\r\n return s\r\n\r\n##\r\nfrom datetime import datetime\r\nfrom datetime import timedelta\r\n\r\nstart_time = datetime.now()\r\n\r\n#returns the elapsed milliseconds since the start of the program\r\ndef millis(start_time):\r\n dt = datetime.now() - start_time\r\n ms = (dt.days * 24 *60 *60 + dt.seconds) * 1000 +dt.microseconds / 1000\r\n return ms \r\n"
},
{
"alpha_fraction": 0.6352672576904297,
"alphanum_fraction": 0.6515645384788513,
"avg_line_length": 40.61111068725586,
"blob_id": "83ea942b178a51dad4a61fcbd291a8a3b8601827",
"content_id": "e83803b0b84411406dc53a9b2abe6249aaa72461",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3068,
"license_type": "permissive",
"max_line_length": 142,
"num_lines": 72,
"path": "/check_data_multi.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "import os\r\nimport csv\r\nfrom my_classes import DataFolder\r\nfrom datetime import datetime, date\r\nfrom collections import OrderedDict\r\n\r\n# script for writing headers and data stats from different gazedata to file(s)\r\n\r\n##\r\n#parameter setting\r\n\r\n#set output file\r\noutput_file_name = (\"multi_folder_data_\" + str(date.today()) + \".txt\")\r\noutput_folder = os.getcwd()\r\n\r\n#limits data analysis for quick peek...\r\nlimit_last_row = None\r\nlimit_last_file = None\r\n\r\n# percentiles parameter for numerical stats\r\npercentiles = (0.1,99.9)\r\n\r\n# paths to data eg., 7mo,trec2\r\nfolders = OrderedDict()\r\nfolders['tre5mo_old'] = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\5mo,trec2\"\r\nfolders['tre5mo_new'] = \"D:\\\\lasayr\\\\Aaltonen\\\\5mo\"\r\nfolders['tre7mo_old'] = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\7mo,trec2\"\r\nfolders['tre24mo_old'] = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\24mo, trec2\"\r\nfolders['tre24mo_new'] = \"D:\\\\lasayr\\\\Aaltonen\\\\24mo\"\r\nfolders['ct_6mo'] = \"D:\\\\lasayr\\\\Aaltonen\\\\ct\\\\6mo\"\r\nfolders['ct_18mo'] = \"D:\\\\lasayr\\\\Aaltonen\\\\ct\\\\18mo\"\r\nfolders['hki'] = \"D:\\\\lasayr\\\\Aaltonen\\\\hki\"\r\nfolders['mi'] = \"D:\\\\lasayr\\\\Aaltonen\\\\mi\"\r\n\r\n#testing with only some folders\r\nfolders = OrderedDict()\r\nfolders['tre24mo_old'] = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\24mo, trec2\"\r\nfolders['tre24mo_new'] = \"D:\\\\lasayr\\\\Aaltonen\\\\24mo\"\r\nfolders['ct_6mo'] = \"D:\\\\lasayr\\\\Aaltonen\\\\ct\\\\6mo\"\r\nfolders['ct_18mo'] = \"D:\\\\lasayr\\\\Aaltonen\\\\ct\\\\18mo\"\r\n\r\n##\r\n#write \"logfile\" output to list statistics from differt files\r\nwith open(os.path.join(output_folder, output_file_name),\r\n \"wt\", newline = \"\\n\") as outputfile:\r\n #construct csv.writer based on outputfile\r\n writer = csv.writer(outputfile, delimiter=\"\\t\")\r\n\r\n #loop through folders containing gazedata files \r\n for folder in folders:\r\n # use timer to time\r\n start_time = datetime.now()\r\n #print folder at hand for tracking process\r\n print(\"\\n\" + \"process folder: \" + folders[folder])\r\n \r\n # create new DataFolder to be inspected/processed, \r\n data_folder = DataFolder(folders[folder],\r\n limit_rows = limit_last_row,\r\n limit_files = (0,limit_last_file)) \r\n data_folder.set_output_folder(output_folder)\r\n \r\n #write stats directly with DataFolder class (to seprate files)\r\n data_folder.write_stats_to_file(percentiles)\r\n\r\n #\"logfile\", based on data headers\r\n writer.writerow( [folder]) #print time elapsed )\r\n writer.writerow( [str(datetime.now() - start_time)]) #print time elapsed )\r\n headers = data_folder.get_headers()\r\n output = []\r\n for header in headers:\r\n writer.writerow([header] + [str(data_folder.get_stats(header))]) \r\n writer.writerow([])\r\n"
},
{
"alpha_fraction": 0.5138759016990662,
"alphanum_fraction": 0.5188227295875549,
"avg_line_length": 38.29308319091797,
"blob_id": "7e4f210786eaadbfdfe404d866b2dd5fb15a700f",
"content_id": "e8a9f66ed4a6ecaca19c5db215f96a5f44ada091",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 32142,
"license_type": "permissive",
"max_line_length": 152,
"num_lines": 795,
"path": "/my_classes.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "import os\r\n\r\nimport glob\r\n\r\nimport csv\r\n\r\nimport time\r\n\r\nfrom collections import OrderedDict\r\n\r\nfrom collections import namedtuple\r\n\r\nimport routine\r\n\r\nfrom itertools import islice\r\n\r\nimport numpy as np\r\n\r\nfrom datetime import date\r\n\r\nimport random\r\n\r\nEMPTY_STRING = \" \"\r\n\r\nHEADERFILENAME = 'headers.txt'\r\n\r\nOBSOLETE_HEADER = 'OBSOLETE'\r\n\r\nNULL_VALUES = [\".\", \"\", \"1.#INF\", \"-1.#INF\", \"1.#IND\", \"-1.#IND\", \r\n \"-1.#QNAN\" , \"1.#QNAN\", \"-\"] # possible kinds values for missing samples\r\n\r\nNULL_VALUES_NEW = \"-999999\" #\r\n\r\nINPUT_DELIMITER_DEFAULT = '\\t'\r\n\r\nX_COORD_HEADERS = ['LEFT_GAZE_X', 'RIGHT_GAZE_X']\r\nY_COORD_HEADERS = ['LEFT_GAZE_Y', 'RIGHT_GAZE_Y']\r\nSUBJECT_HEADER = 'Filename'#'Subject'\r\nSTIM_HEADER = 'Stim'\r\nTIME_HEADER = 'TETTime'\r\nLATERAL_STIM_HEADER = 'LateralStimPos'\r\n\r\nSCREEN_X_DIM = 1920.0 #screen width, adjust for differnt sites? \r\n\r\nSCREEN_Y_DIM = 1020.0 #screen height, adjust for differnt sites?\r\n\r\n\r\n\r\nclass MyClass:\r\n \"\"\"A simple example class\"\"\"\r\n #i = 12345\r\n\r\n def __init__(self, arku = \"qwerty\"):\r\n self.i = 12345\r\n self.word = arku\r\n\r\n def f(self):\r\n if not self.word:\r\n return 'hello world'\r\n else:\r\n return self.word\r\n\r\n def get_i(self):\r\n return self.i\r\n\r\n\r\n\r\nclass GazeReader:\r\n \"\"\"A class for Reading and processing gazedata\"\"\"\r\n\r\n def __init__(self, t_args, limit = None, percentiles = (1,99), anonymize = False ):\r\n self.input_folder = (t_args[0]) \r\n self.input_file = (t_args[1])\r\n self.maptable = (t_args[2]) #OrderedDict where key is old and value new header\r\n #self.mapCurrent = (t_args[3]) #OrderedDict where key is old and value new header\r\n# self.newrows = [] #processed data, process in function\r\n self._limit_row = limit# (t_args[3]) # limit for rows to process, None if no limit\r\n self._input_file_delimiter = INPUT_DELIMITER_DEFAULT\r\n \r\n self.output_file = self.input_file\r\n \r\n # extension of the file extracted\r\n self.file_ext = os.path.splitext(self.input_file)[-1]\r\n \r\n # anonymize if needed\r\n if anonymize:\r\n self._anon, self._timestamp, = self._anonymize_fun() #finds 1st timevalue\r\n else:\r\n self._anon = False\r\n \r\n print('Read data to GazeReader')\r\n datrows, datheaders = self._read_data() #reads in data\r\n self.data_rows = datrows\r\n self.data_headers = datheaders\r\n\r\n self.data_od = {}#OrderedDict()\r\n\r\n # percentile points for extracting nearly min-max range of gazedata values\r\n print(percentiles[0])\r\n self.lo_perc = percentiles[0] \r\n self.hi_perc = percentiles[1]\r\n \r\n self.r_ind = -1\r\n \r\n def _anonymize_fun(self): \r\n # do anonymization-related procedures\r\n anonymizationBool = True\r\n \r\n timevalue = self._find_timestart()\r\n \r\n file_anon = 'Anon-' + time.strftime(\"%H%M%S\", time.localtime()) + '%d' % (time.time() % 1 * 1000) \r\n self.set_filename_out(file_anon)\r\n \r\n return anonymizationBool, timevalue \r\n \r\n def _read_data(self):\r\n # input file reading\r\n # this should be private?\r\n # this doesn't return data if no maptable provided...\r\n # this returns headers anyway\r\n\r\n # First row of data values is by default 1,\r\n # that is, first col after headers. If no headers must\r\n # be set 0.\r\n #first_datarow = 1\r\n\r\n with open(os.path.join(self.input_folder, self.input_file), \"rt\") as inputfile:\r\n reader = csv.reader(inputfile, delimiter = self._input_file_delimiter)\r\n \r\n # Grab header information, into a list\r\n data_headers = next(reader) #reader.__next__() \r\n\r\n # Check if headers are numerical (or strings that can\r\n # be converted to numbers).\r\n headers_numform=[]\r\n truly_strings=[]\r\n for header in data_headers: \r\n # Convert header to num if possible.\r\n headers_numform.append(routine.string_or_number(header))\r\n # Check if header remained a string.\r\n truly_strings.append(isinstance(headers_numform[-1], str)) #[-1] is the last element...!\r\n # If no good headers (i.e., strings) make headers empty.\r\n if not all(truly_strings):\r\n data_headers = self._read_headers()\r\n # \"Restart\" the reader, so that no data will be missed afterwards.\r\n inputfile.seek(0) \r\n\r\n \r\n # Data rows storage for this function \r\n newrows = []\r\n \r\n if not self.maptable:\r\n newheaders = data_headers\r\n return newrows, data_headers\r\n\r\n hdrKeys = self.maptable.getKeys()\r\n #newheaders = [] #['' for i in range(len(hdrKeys))]\r\n newheaders = [\" \" for i in range(self.maptable.get_n_of_uniqueCols())]\r\n\r\n #make new headers\r\n for header in enumerate(hdrKeys):#enumerate(header_keys):\r\n if not self.maptable.get_header_newName(header[1]) == OBSOLETE_HEADER:\r\n newCol = self.maptable.get_header_colNum(header[1])-1\r\n newheaders.pop(newCol)\r\n newheaders.insert(newCol,self.maptable.get_header_newName(header[1]))\r\n \r\n \r\n # Loop file rows and cols,\r\n for r, row in islice(enumerate(reader), 0, self._limit_row): #None \r\n # initialize newrow as list with elements upto deisred cols\r\n newrow = [EMPTY_STRING for i in range(len(newheaders))]#[\" \" for i in range(self.maptable.get_n_of_uniqueCols())]\r\n\r\n #Loop Headers\r\n for header in enumerate(hdrKeys):#enumerate(header_keys):\r\n # Find column number of Standard Header\r\n # from current input file's headers\r\n try:\r\n col = data_headers.index(header[1])\r\n #print(\"header: \"+header[1])\r\n #print(\"col: \"+ str(col))\r\n except(ValueError):\r\n col = -1\r\n #print(\"header: \"+header[1])\r\n #print(\"header not found in current input\")\r\n \r\n if col < len(row) and col > 0:\r\n dataCell = row[col]\r\n else:\r\n dataCell = EMPTY_STRING\r\n \r\n a = self._anon \r\n dataCell = self._manipulate(dataCell, self.maptable.get_header_newName(header[1]), a)\r\n if not self.maptable.get_header_newName(header[1]) == OBSOLETE_HEADER:\r\n newCol = self.maptable.get_header_colNum(header[1])-1\r\n #print(header[1])\r\n #print('newCOl: ' + str(newCol))\r\n #newrow.append(dataCell)\r\n if newrow[newCol] == EMPTY_STRING:\r\n newrow.pop(newCol)\r\n newrow.insert(newCol, dataCell)\r\n \r\n newrows.append(newrow)\r\n \r\n \r\n return newrows, newheaders#self.maptable.values()#list(header_keys)\r\n##\r\n def get_data_stats(self, header_key):\r\n # returns ~min, ~max of number (actually lo/hi percentiles)\r\n # or unique of strings of variable\r\n # uses OrderedDict self.data_od as main data structure\r\n # header_key is used to index specific variable\r\n\r\n if not header_key:\r\n return []\r\n \r\n # initialize data if not alreadey \r\n if not self.data_od:\r\n self._odictionarize_data()\r\n\r\n # list of statistics from the current file/variable\r\n stats_file = self.data_od[header_key]\r\n if not stats_file:\r\n print(\"no data for: \" + header_key)\r\n return []\r\n\r\n # check if variable includes string values\r\n stats_include_str = []\r\n for stat in stats_file:\r\n if isinstance(stat, str): stats_include_str.append(1)\r\n else: stats_include_str.append(0)\r\n stats_include_str = any(stats_include_str)\r\n\r\n # extract min, max of numerical values\r\n if not stats_include_str:\r\n min_value = np.percentile(self.data_od[header_key] , self.lo_perc) #min(self.data_od[header_key])\r\n max_value = np.percentile(self.data_od[header_key] , self.hi_perc)\r\n return_value = [min_value, max_value]\r\n # extract unique strings (no duplicates)\r\n else:\r\n return_value = (list(set(self.data_od[header_key])))\r\n\r\n return return_value\r\n\r\n\r\n##\r\n def _find_timestart(self):\r\n # input file reading\r\n # this is \"private\" function for GazeReader\r\n # it returns the timestamp of first ET frame\r\n # (Can be used for anonymization, when removing unique timestamp...)\r\n\r\n # Only some first data rows are needed for this function\r\n # So set row limit to some small number (2 enuff?)\r\n limit_original = self._limit_row \r\n self._limit_row = 10\r\n\r\n # Run data into Ordered Dictionary\r\n # initialize variable to zero\r\n #self._timestamp = 0\r\n self._odictionarize_data(anonymize = False)\r\n # Find 1st timevalue\r\n timeval = self.data_od[TIME_HEADER][0]\r\n \r\n #Return things as default\r\n self.data_od = {}#OrderedDict()\r\n self._limit_row = limit_original\r\n \r\n # return\r\n return timeval\r\n \r\n \r\n##\r\n def _odictionarize_data(self, anonymize = False):\r\n # input file reading\r\n # this is \"private\" function for GazeReader\r\n # it reads fat into OrderedDict\r\n \r\n with open(os.path.join(self.input_folder, self.input_file), \"rt\") as inputfile:\r\n reader = csv.reader(inputfile, delimiter = self._input_file_delimiter)\r\n\r\n # grab header information, into a list\r\n headers = next(reader)\r\n\r\n # return if no good headers\r\n if not isinstance(headers[0], str):\r\n for i, el in enumerate(headers):\r\n headers[el] = \"Header_\" + str(i)\r\n return \"No string headers\"\r\n \r\n #initialize od with headers as keys\r\n self.data_od = OrderedDict.fromkeys(headers) \r\n \r\n # loop file rows and cols,\r\n for r, row in islice(enumerate(reader), 0, self._limit_row): \r\n #newrow = []\r\n # loop cols\r\n for h, header in enumerate(self.data_od.keys()):\r\n try: # try to accces data element \r\n foo = row[h] \r\n except(IndexError): #if index oob, use element of previuous row \r\n #print(\"bad row: \" + str(r) + \" for: \" + header)\r\n foo = []\r\n\r\n # process data value \r\n foo = self._manipulate(foo, header, anonymize)\r\n # convert to number if possible\r\n foo = routine.string_or_number(foo)\r\n # initialize variable or append new value\r\n if not self.data_od[header]:\r\n self.data_od[header] = [foo]\r\n else:\r\n self.data_od[header].append(foo)\r\n \r\n \r\n## \r\n def set_row_limit(self, number):\r\n # set limit for how may rows will be read from input file\r\n self._limit_row = number\r\n\r\n def set_percentiles(self, lo_percentile, hi_percentile):\r\n # set percentiles for very low and high data values\r\n self.lo_perc = lo_percentile \r\n self.hi_perc = hi_percentile\r\n\r\n def _read_headers(self):\r\n # read headers from current folder\r\n\r\n print(\"Writing headers from a separate file for \" + self.input_file + \".\")\r\n \r\n with open(os.path.join(self.input_folder, HEADERFILENAME), \"rt\") as inputfile:\r\n reader = csv.reader(inputfile, delimiter = self._input_file_delimiter)\r\n headers = next(reader)\r\n return headers\r\n \r\n \r\n def _manipulate(self, dataIn, header, anonymize = False): \r\n # manipulate data\r\n # more manipulations could be included...\r\n \r\n if not dataIn: return NULL_VALUES_NEW\r\n else: dataOut = dataIn\r\n\r\n #truth = isinstance(\"1\", int)\r\n #routine.string_or_number(123)\r\n \r\n # take away the null-values if they exist \r\n if dataOut not in NULL_VALUES: \r\n if header == SUBJECT_HEADER: #eg. \"Subject\"\r\n if dataIn == EMPTY_STRING:\r\n dataOut = self.input_file\r\n if anonymize: # set filename to anonynous \r\n dataOut = self.output_file\r\n elif header == STIM_HEADER: #eg. \"Stim\"\r\n dataIn = routine.string_or_number(dataIn)\r\n if isinstance(dataIn, str):\r\n dataOut = dataIn\r\n else:\r\n dataIn = int(dataIn)\r\n if 1 <= dataIn <= 2:\r\n dataOut = \"fearful.bmp\"\r\n elif 3 <= dataIn <= 4:\r\n dataOut = \"control.bmp\"\r\n elif 5 <= dataIn <= 6:\r\n dataOut = \"happy.bmp\"\r\n elif 7 <= dataIn <= 8:\r\n dataOut = \"neutral.bmp\"\r\n \r\n elif header == LATERAL_STIM_HEADER: #eg. \"LateralStimPos\"\r\n #print(dataIn)\r\n if routine.string_or_number(dataIn) == 1:\r\n dataOut = \"left\"\r\n elif routine.string_or_number(dataIn) == 2:\r\n dataOut = \"right\"\r\n \r\n elif header == TIME_HEADER: #eg. \"LateralStimPos\"\r\n #print(dataIn)\r\n #try: #Possible error caused by non-existing var: self._anon \r\n if anonymize:\r\n dataOut = str(float(dataIn) - self._timestamp)\r\n #except AttributeError: \r\n # pass\r\n #print('variable \\\"self._anon\\\" not found')\r\n \r\n \r\n #print(\"dataOut = right\")\r\n # Currently no need for scaling gaze coordinates...\r\n #elif header in X_COORD_HEADERS: #['LEFT_GAZE_X', 'RIGHT_GAZE_X']:\r\n # dataOut = float(dataOut) / SCREEN_X_DIM#newrow.append(float(dataOut) / SCREEN_X_DIM) #newrow.append(float(row[ncol]) / 1920.0)\r\n #elif header in Y_COORD_HEADERS: #['LEFT_GAZE_Y', 'RIGHT_GAZE_Y']: \r\n # dataOut = float(dataOut) / SCREEN_Y_DIM #newrow.append(float(dataOut) / SCREEN_Y_DIM) #newrow.append(float(row[ncol]) / 1020.0)\r\n else:\r\n dataOut = dataOut\r\n return dataOut \r\n else:\r\n #print(header)\r\n return NULL_VALUES_NEW\r\n\r\n\r\n def get_filename(self, no_ext = True, in_out = 'out'): \r\n # returns the file being read/processed\r\n # in_out defines whether input or output filename is returned\r\n \r\n if in_out == 'out':\r\n fn = self.output_file\r\n elif in_out == 'in':\r\n fn = self.input_file\r\n else:\r\n print(\"Only in/out are acceptable parameter values!\")\r\n \r\n if no_ext:\r\n #return filename without extension\r\n #find extendsion start\r\n i_ext = fn.find('.') \r\n return fn[0:i_ext]\r\n else:\r\n return fn\r\n\r\n def get_row_count(self): \r\n # returns the number of rows read and stored\r\n return (len(self.data_rows))\r\n\r\n def get_headers(self): \r\n # returns list of headers\r\n\r\n # if no maptable for headers available, return \"plain\" headers\r\n if not self.maptable:\r\n return self.data_headers\r\n # if conversion map for new headers available, return new headers\r\n else:\r\n if isinstance(self.maptable, HeaderReader):\r\n newheaders = self.data_headers# list(self.maptable.values()) #list(d.values())\r\n # remove obsolete headers\r\n if OBSOLETE_HEADER in newheaders:\r\n newheaders.remove(OBSOLETE_HEADER)\r\n return newheaders\r\n \r\n def get_new_row(self): \r\n # returns a new data row at each call\r\n self.r_ind += 1 \r\n if self.r_ind < self.get_row_count():\r\n return self.data_rows[self.r_ind]\r\n else:\r\n return False\r\n \r\n def set_filename_out(self, filename_new): \r\n # sets a new name fot the file being read/processed\r\n fn = str(filename_new)\r\n if fn.endswith(self.file_ext):\r\n self.output_file = fn\r\n else:\r\n self.output_file = fn + self.file_ext\r\n\r\n \r\n \r\n def restart(self):\r\n # resets the counter for new data rows, starts over again\r\n self.r_ind = -1\r\n\r\n##\r\nclass DataFolder:\r\n \"\"\"A class for accessing gazedata in a specific folder \"\"\"\r\n \"\"\"We have many folders with vairiable gazedata. The headers, \"\"\"\r\n \"\"\"datavalue scales, tags, and structure may all be variable. \"\"\"\r\n \"\"\"With DataFolder, it is possible to output these things for comparison\"\"\"\r\n## \r\n def __init__(self,\r\n path,\r\n limit_rows = None,\r\n limit_files = (0, None),\r\n file_ext = \".gazedata\",\r\n input_file_delimiter = '\\t',\r\n map_header = None,\r\n date_limit = \"1 Jan 00\",\r\n date_limit_type = \"c\", #c=created, m=modified\r\n #map_header_current = None,\r\n output_folder = \"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\testing data\",\r\n ): #t_args,\r\n\r\n self.dirpath = path # input folder path\r\n self.limit_rows = limit_rows # limit data rows per file processed\r\n self.limit_files = limit_files # limit n files\r\n self.file_ext = file_ext # input file type\r\n self.file_delimiter = input_file_delimiter # delimiter e.g., tab \"\\t\"\r\n # first header map is for the exemplary data\r\n self.map_header = map_header # map for old and new headers,\r\n # second header map is for current data folder idiosyncracies\r\n #self.map_header_current = map_header_current # map for old and new headers\r\n self.date_limit = time.strptime(date_limit, \"%d %b %y\")\r\n self.output_folder = output_folder # output folder\r\n self.headers_folder = os.getcwd() # folder of header transform map\r\n\r\n self.folder_level_data = OrderedDict() # data from all files in folder\r\n self.out_stats = OrderedDict() #extracted descriptive stat\r\n\r\n # for investigating file headers\r\n # if no header map is provided, the aim is to\r\n # read headers \"bottom-up\" and store them to outputfile\r\n if not self.map_header:\r\n folder_path = os.path.split(self.dirpath)\r\n folder_tail = folder_path[1] \r\n self.output_file = ( \"headers in \" + folder_tail + \"_\" +\r\n str(date.today()) + \".txt\")\r\n else:\r\n self.output_file = ( \"output_\" + str(date.today()) + \".txt\")\r\n \r\n\r\n # get list of files \r\n #self.diritems = os.listdir(path)\r\n self.diritems = [fileName for fileName in os.listdir(path) if fileName.endswith(file_ext)]\r\n self.diritems = self.diritems[self.limit_files[0]:self.limit_files[1]]\r\n self.diritems = self.timethreshold_items(self.diritems,date_limit_type)\r\n print(\"-------------------------\")\r\n print(\"Files selected: \" + str(self.diritems))\r\n print(\"-------------------------\")\r\n #self.diritems = glob.glob(os.path.join(path,\"*\" ,file_ext)) //not work!\r\n #print (\"Directory contains \" + str(len(self.diritems)) + \" files.\")\r\n\r\n \r\n##\r\n def set_output_folder(self, folder):\r\n # chahnge output folder\r\n print (\"Output to: \" + folder)\r\n\r\n if not os.path.isdir(folder):\r\n os.mkdir(folder)\r\n \r\n self.output_folder = folder\r\n\r\n##\r\n def write_headers_to_file(self):\r\n # function for storing into outputfile headers used in files in the folder\r\n\r\n with open(os.path.join(self.output_folder, self.output_file),\r\n \"wt\", newline = \"\\n\") as outputfile:\r\n \r\n writer = csv.writer(outputfile, delimiter=self.file_delimiter)\r\n \r\n for filenum, file in islice(enumerate(self.diritems), self.limit_files[0], self.limit_files[1]): \r\n #print (\"Checking file \" + str(filenum + 1) + '/' + str(len(diritems)))\r\n if file.endswith(self.file_ext):\r\n #print(os.path.join(self.output_folder, self.output_file))\r\n print (\"Process file \" + str(filenum + 1) + '/' + str(len(self.diritems)))\r\n print(file)\r\n\r\n #read in data, process, and strore in newrows\r\n args_pro = self.dirpath, file, self.map_header\r\n \r\n # make new GazeReader object for reading and processing input file\r\n f_processor = GazeReader(args_pro, self.limit_rows) #40 is optional limit for rows\r\n \r\n #f_processor.set_row_limit(40) # limit rows, good for debugging\r\n row_list_to_write = f_processor.get_headers()\r\n row_list_to_write.insert(0, file)\r\n writer.writerow( row_list_to_write )\r\n\r\n##\r\n def write_stats_to_file(self, percentiles):\r\n # function for summarizing variable scales, with min,max or string tags\r\n\r\n # make specific output file with this function\r\n _output_file = \"daata stats and \" + self.output_file\r\n\r\n # collect statistics from all files in folder \r\n for filenum, file in islice(enumerate(self.diritems), self.limit_files[0], self.limit_files[1]): \r\n #print (\"Checking file \" + str(filenum + 1) + '/' + str(len(self.diritems)))\r\n if file.endswith(self.file_ext):\r\n #print(os.path.join(self.output_folder, self.output_file))\r\n print (\"Process file \" + str(filenum + 1) + '/' + str(len(self.diritems)))\r\n print(file)\r\n\r\n #read in data, process, and strore in newrows\r\n args_pro = self.dirpath, file, self.map_header\r\n \r\n # make new GazeReader object for reading and processing input file\r\n f_processor = GazeReader(args_pro, self.limit_rows, percentiles) #40 is optional limit for rows\r\n\r\n for header in f_processor.get_headers():\r\n #print(\"header: \" + header)\r\n stats = f_processor.get_data_stats(header)\r\n if header not in self.folder_level_data.keys():\r\n self.folder_level_data[header] = stats\r\n else:\r\n for el in stats:\r\n self.folder_level_data[header].append(el)\r\n #if isinstance(el, str):\r\n #print(header + \" has strings\")\r\n \r\n #!!assign list instead!!!1\r\n\r\n #reduce statistical data for outputting \r\n #self.out_stats already defined at __init__()\r\n\r\n ## \r\n # loop through variables/headers \r\n for header in self.folder_level_data.keys():\r\n stats_folder = self.folder_level_data[header]\r\n\r\n if not stats_folder: continue\r\n \r\n # check if variable includes string values\r\n stats_include_str = []\r\n for stat in stats_folder:\r\n if isinstance(stat, str): stats_include_str.append(1)\r\n else: stats_include_str.append(0) \r\n\r\n # extract min, max of numerical values\r\n if not any(stats_include_str):\r\n min_value = min(self.folder_level_data[header])\r\n max_value = max(self.folder_level_data[header])\r\n self.out_stats[header] = min_value, max_value\r\n \r\n # extract unique strings\r\n else:\r\n if all(stats_include_str):\r\n self.out_stats[header] = sorted(list(set(self.folder_level_data[header])))\r\n else:\r\n self.out_stats[header] = list(set(self.folder_level_data[header]))\r\n #print(header + \" has strings\")\r\n \r\n\r\n # do the writing\r\n with open(os.path.join(self.output_folder, _output_file),\r\n \"wt\", newline = \"\\n\") as outputfile:\r\n writer = csv.writer(outputfile, delimiter=self.file_delimiter)\r\n writer.writerow( self.out_stats.keys() )\r\n writer.writerow( self.out_stats.values() )\r\n\r\n ##\r\n def rewrite_data(self, outputfolderIn = None, anonymize = False):\r\n #function for rewriting data with new format\r\n\r\n if outputfolderIn:\r\n self.set_output_folder(outputfolderIn)\r\n \r\n if anonymize: \r\n random.shuffle(self.diritems)\r\n \r\n # access data from all files in folder \r\n for filenum, file in islice(enumerate(self.diritems), self.limit_files[0], self.limit_files[1]): \r\n #print (\"Checking file \" + str(filenum + 1) + '/' + str(len(diritems)))\r\n if file.endswith(self.file_ext):\r\n #print(os.path.join(self.output_folder, self.output_file))\r\n print (\"\\nProcess file \" + str(filenum + 1) + '/' + str(len(self.diritems)))\r\n print(file + '\\n') \r\n print(\"Write new file to: \" + self.output_folder)\r\n\r\n #read in data, process, and strore in newrows\r\n args_pro = self.dirpath, file, self.map_header#,\r\n #self.map_header_current #None#self.map_header\r\n\r\n # make new GazeReader object for reading and processing input file\r\n f_processor = GazeReader(args_pro, self.limit_rows, anonymize = anonymize)#, percentiles = percentiles) #40 is optional limit for rows\r\n\r\n # make name for new gazedata file\r\n _output_file = (f_processor.get_filename(no_ext=True) + \"_std.gazedata\")\r\n print(_output_file)\r\n # output/gazedatafile opening\r\n with open(os.path.join(self.output_folder, _output_file),\r\n \"wt\", newline = \"\\n\") as outputfile:\r\n\r\n writer = csv.writer(outputfile, delimiter=self.file_delimiter)\r\n\r\n #write headers to new file\r\n headers = f_processor.get_headers()\r\n \r\n #NEW HEADERS ARE ALREADY IN USE FOR GazeReader, if initialized with a header map!!!\r\n writer.writerow( headers )\r\n \r\n #write data rows to new file\r\n found_new_row = f_processor.get_new_row()\r\n while found_new_row:\r\n #print(found_new_row)\r\n writer.writerow( found_new_row )\r\n found_new_row = f_processor.get_new_row()\r\n \r\n ## \r\n def get_headers(self): \r\n # returns list of headers\r\n \r\n #if stats calculated do this\r\n if len(self.out_stats.keys()) > 0:\r\n return self.out_stats.keys()\r\n else:\r\n #read in data from first file to get it's headers\r\n args_pro = self.dirpath, self.diritems[1], self.map_header\r\n \r\n # make new GazeReader object for reading and processing input file\r\n limit_rows = 1\r\n f_processor = GazeReader(args_pro, limit_rows)\r\n return f_processor.get_headers()\r\n \r\n \r\n\r\n def get_stats(self, header):\r\n #return stats of specific variable\r\n return self.out_stats[header]\r\n\r\n def timethreshold_items(self, items, type):\r\n #return stats of specific variable\r\n\r\n items_v2 = []\r\n timeThreshold = time.mktime(self.date_limit) \r\n print(\"Number of files in folder: \" + str(len(items)))\r\n for itemNum, item in (enumerate(items)): \r\n if type == \"m\": \r\n itemModified = os.path.getmtime(self.dirpath + '\\\\' + item)\r\n if type == \"c\":\r\n itemModified = os.path.getctime(self.dirpath + '\\\\' + item)\r\n if itemModified < timeThreshold:\r\n print(item + \" is too old\") \r\n else:\r\n filedate = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime(itemModified))\r\n items_v2.append(item)\r\n print(item + \" has good date: \" + filedate)\r\n print(\"Number of files selected for processing: \" + str(len(items_v2)))\r\n return items_v2\r\n\r\n def get_filelist(self):\r\n #return stats of specific variable\r\n return self.diritems\r\n \r\n\r\n def print_header_map(self):\r\n print(\"List header map, folder: \"+ self.dirpath +\"; old: new\")\r\n\r\n #try if the self.map_headers is ordered dictionary\r\n try: \r\n for k in self.map_header.keys():\r\n print (k + \": \" + self.map_header[k])\r\n \r\n except(AttributeError): #if it is HeaderReader\r\n for k in self.map_header.getKeys():\r\n #print(k)\r\n print (k + \": \" + self.map_header.get_header_newName(k) +\r\n \", col: \" + str(self.map_header.get_header_colNum(k)))\r\n\r\nclass HeaderReader:\r\n \"\"\"A class for Reading and processing Headers\"\"\"\r\n\r\n def __init__(self, path, file ):\r\n\r\n self.file = file #\r\n self.path = path #\r\n self._input_file_delimiter = INPUT_DELIMITER_DEFAULT\r\n self.od = self._read_headers()\r\n \r\n \r\n \r\n def _read_headers(self): \r\n # returns list of headers\r\n\r\n od = OrderedDict()\r\n \r\n headerInfo = namedtuple('headerInfo', 'newName colNum')\r\n \r\n with open(os.path.join(self.path, self.file), \"rt\", ) as inputfile:\r\n\r\n reader = csv.reader(inputfile, delimiter = self._input_file_delimiter)\r\n\r\n # grab header information, into \r\n \r\n for row in reader:\r\n #print(row)\r\n #maptable[row[0]] = row[len(row)-1] \r\n #od['TitanicThing'] = Strong('FamousName', 88)\r\n od[row[0]] = headerInfo(row[1], row[2])\r\n \r\n \r\n return od\r\n\r\n\r\n def getKeys(self):\r\n return self.od.keys()\r\n\r\n def get_n_of_uniqueCols(self):\r\n nCols = 0\r\n previousColNum = -1\r\n for key in self.od.keys():\r\n thisColNum = self.get_header_colNum(key)\r\n if thisColNum != previousColNum and thisColNum > 0:\r\n nCols += 1\r\n #print(key)\r\n #print(thisColNum)\r\n previousColNum = thisColNum\r\n \r\n return nCols\r\n \r\n\r\n def get_header_colNum(self, header):\r\n\r\n return int(self.od[header].colNum)\r\n \r\n def get_header_newName(self, header):\r\n #print(header)\r\n return self.od[header].newName\r\n\r\n \r\n \r\n \r\n \r\n\r\n"
},
{
"alpha_fraction": 0.5828994512557983,
"alphanum_fraction": 0.6042751669883728,
"avg_line_length": 21.09493637084961,
"blob_id": "e01c6375968d1f83e51d70827bff829f1144a181",
"content_id": "6d1bfad28130d7a6856203ba8d4528a2d1c4ea6b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3649,
"license_type": "permissive",
"max_line_length": 118,
"num_lines": 158,
"path": "/py_test_2.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "from datetime import datetime, date\r\n\r\nimport os\r\n\r\nimport time\r\n\r\nimport csv\r\n\r\nimport sys\r\n\r\nimport calendar\r\n#import math\r\n\r\nfrom collections import OrderedDict\r\nfrom collections import namedtuple\r\n\r\n\r\nimport routine # a script with functions\r\n\r\nimport numpy as np\r\n\r\nimport my_classes\r\n#from my_classes import GazeReader\r\n#from my_classes import HeaderReader\r\n##from itertools import islice\r\nimport random\r\nrandom.shuffle(files)\r\nfiles\r\nprint('heillo')\r\n\r\nprint('acsascscaheillo')\r\n\r\n\r\n##\r\n##input_folder = \"D:\\\\lasayr\\\\Aaltonen\\\\\\ct\\\\6mo\"\r\n###\"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\7mo,trec2\"\r\n##\r\n##diritems = [fileName for fileName in os.listdir(input_folder) if fileName.endswith('.gazedata')]\r\n##\r\n###write date\r\n##tt = time.strptime(\"19 Oct 17\", \"%d %b %y\")\r\n###convert date to epoch time, seconds\r\n##seconds = time.mktime(tt) \r\n##\r\n##print(diritems[len(diritems)-1])\r\n##print(os.path.getctime(input_folder + '\\\\' + diritems[1]))\r\n##print(time.time())\r\n##print(tt)\r\n##print(seconds)\r\n\r\n\r\n\r\n#a = os.path.split(input_folder)\r\n#print(os.path.split(input_folder)[1])\r\n\r\n\r\n\r\n##truth = isinstance(\"1\", int)\r\n##routine.string_or_number(123)\r\n##print(\"truth:\" + str(truth))\r\n##\r\n##\r\n##\r\n##input_folder = \"D:\\\\lasayr\\\\Aaltonen\\\\24mo\"\r\n##print (input_folder + \"\\\\testing\")\r\n##\r\n##a=3\r\n##if 1 <= a <= 2:\r\n## print(str(a))\r\n##else:\r\n## print(\"a not in range\")\r\n\r\n\r\n##\r\n##with open(os.path.join('header map 3D.txt'), \"rt\") as inputfile:\r\n## reader = csv.reader(inputfile, delimiter = '\\t')\r\n## for r, row in islice(enumerate(reader), 0, 20):\r\n## print(str(r) + \": \" + str(row))\r\n## inputfile.seek(0)\r\n\r\n \r\n##folder = \"C:/Users/infant/Documents/GitHub/py_gazedat\"\r\n##file = \"header map 3D.txt\"\r\n##hr = HeaderReader(folder, file)\r\n##\r\n##print(hr.get_header_colNum('TETTime'))\r\n##print(hr.get_header_newName('r_cam_y'))\r\n##\r\n##hKeys = hr.getKeys()\r\n##\r\n####for header in enumerate(hKeys):\r\n#### #print(header)\r\n#### print(hr.get_header_newName(header[1]))\r\n##\r\n##headersList = ['' for i in range(4)]#['cl1', 'cl2', 'cl3']\r\n###ind = headersList.index('cl4')\r\n##\r\n##newheaders = []\r\n##\r\n##for header in enumerate(hKeys):#enumerate(header_keys):\r\n## if not hr.get_header_newName(header[1]) == my_classes.OBSOLETE_HEADER:\r\n## newheaders.append(hr.get_header_newName(header[1]))\r\n##\r\n##\r\n##\r\n##print('Rubject' in newheaders)\r\n##print(type(hr))\r\n##val = isinstance(hr, HeaderReader)\r\n##print(val)\r\n##\r\n##n = hr.get_n_of_uniqueCols()\r\n##print(n)\r\n##\r\n##l= [\"x\" for i in range(9)]\r\n##target = 3\r\n##print(l)\r\n##l.pop(target)\r\n##l.insert(target, \"X\")\r\n##print(l)\r\n\r\n\r\nod = OrderedDict([('sape', 4139), ('guido', 4127), ('jack', 4098)])\r\nod = OrderedDict.fromkeys(headers) \r\nheader = '6'\r\n#od = OrderedDict.fromkeys(headers) \r\n\r\nl = tuple('name')\r\nod['Thing'] = l\r\n\r\nprint('l:' + str(l))\r\nprint('od:' + str(od))\r\nprint('od(Thing):' + str(od['Thing']))\r\nprint('num in od(Thing):' + str(od['Thing'][1]))\r\n\r\n#od['Thing'].append(13)\r\n#od['guido'].append(31)\r\n#od['sape'].append(13)\r\n#od['guido'].append(31)\r\n\r\n\r\n\r\n#di = dict([('sape', 4139), ('guido', 4127), ('jack', 4098)])\r\n#\r\n#folder = os.getcwd() + '\\\\'\r\n#with open((folder + 'foo.gazedata'),'w') as data: data.write(str(od))\r\n#with open((folder + 'food.gazedata'),'w') as data: data.write(str(di))\r\n\r\n##\r\n##Strong = namedtuple('Strong', 'name num')\r\n##\r\n##strong1 = Strong('name', 123)\r\n##od['StrongThing'] = strong1\r\n##od['TitanicThing'] = Strong('FamousName', 88)\r\n##\r\n##print(strong1.name)\r\n##print(od['StrongThing'].num)\r\n##print(od['TitanicThing'].num)\r\n##\r\n"
},
{
"alpha_fraction": 0.6794202923774719,
"alphanum_fraction": 0.695652186870575,
"avg_line_length": 37.79069900512695,
"blob_id": "2de209a5c583fc144470dac38552a05aaa31286c",
"content_id": "af9fa9e46a763202032094a959aa31a68026c65f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1725,
"license_type": "permissive",
"max_line_length": 217,
"num_lines": 43,
"path": "/standardizeGazedataAnon.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "##Script for reading, modifying and re-writing gazedata\r\n#%reset -f\r\n\r\nimport os\r\n#import routine\r\nfrom my_classes import DataFolder\r\nfrom my_classes import HeaderReader\r\n\r\n#read header conversion map \r\nfolder = \"C:/Users/lasayr/Documents/GitHub/py_gazedat\"\r\nfile = \"header map 3D_std.txt\"\r\nhr = HeaderReader(folder, file)\r\n\r\n# Source folder:\r\ninput_folder = \"C:\\\\Users\\\\lasayr\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\7mo,trec2\"\r\n#input_folder = \"D:\\\\lasayr\\\\Aaltonen\\\\\\ct\\\\ct_18mo\"\r\n#input_folder = \"D:\\\\lasayr\\\\Aaltonen\\\\mi\"\r\n#input_folder = \"D:\\\\lasayr\\\\Aaltonen\\\\24mo,trec2\"\r\ninFolderUnique = os.path.split(input_folder)[1]\r\noutput_folder = \"C:\\\\Users\\\\lasayr\\\\Documents\\\\D\\\\Aaltonen\\\\\" + inFolderUnique + \"_std\\\\anon\"\r\n\r\nfolder_an = 'C:\\\\Users\\\\lasayr\\Documents\\\\D\\\\Aaltonen\\\\7mo,trec2_std\\\\anon'\r\ninput_folder = folder_an\r\noutput_folder = folder_an + '\\\\moreAnon'\r\n\r\n# Init DataFolder\r\ndl = \"01 Jan 00\"\r\ndata_folder = DataFolder(input_folder, map_header = hr, date_limit = dl ,limit_files = (0,2), limit_rows = None)#, fileModelCur)#, limit_files = (0,3))#, limit_rows = 20, limit_files = (1,3)) \r\n\r\n#get files\r\nfiles = data_folder.get_filelist()\r\n\r\n# Print header map, conversion table\r\ndata_folder.print_header_map()\r\n\r\nheaders = data_folder.get_headers()\r\n\r\n#data_folder.write_stats_to_file(percentiles = (1,99))\r\n\r\nprint(\"\\nFiles selected: \" + str(data_folder.get_filelist()))\r\n# Change output folder, default is: C:\\Users\\Public\\Documents\\Tampereen yliopisto\\Eye tracker\\TRE Cohort 2\\gazeAnalysisLib analyses\\testing data\r\n#data_folder.set_output_folder(output_folder)\r\ndata_folder.rewrite_data(output_folder, anonymize = True) \r\n \r\n"
},
{
"alpha_fraction": 0.611716628074646,
"alphanum_fraction": 0.6188691854476929,
"avg_line_length": 23.736841201782227,
"blob_id": "d21bfd553856b3b5da400e9ae1cc622bc6887619",
"content_id": "533d0fbcfc85917f8ca09a5b31c5b45505941f97",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2936,
"license_type": "permissive",
"max_line_length": 171,
"num_lines": 114,
"path": "/check_headers_2.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "import os\r\n\r\nimport csv\r\n\r\n#import routine\r\n\r\n#from collections import OrderedDict\r\n\r\nfrom itertools import islice\r\n\r\nfrom my_classes import GazeReader\r\n\r\n\r\ndefault_input_folder = \"D:\\lasayr\\Aaltonen\\mi\" #\"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\testing 7mo,trec2\"\r\n\r\ninput_folder = \"mi\" #'testing 7mo,trec2' #\"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\testing 7mo,trec2\"\r\n\r\nroot_folder = \"D\\\\\" #\"C:\\\\Users\\\\\"\r\n\r\n\r\nn_files = None # set limit for files to be processed, None if no limit desired\r\n\r\noutput_folder = os.getcwd()#\"C:\\\\Users\\\\Public\\\\Documents\\\\Tampereen yliopisto\\\\Eye tracker\\\\TRE Cohort 2\\\\gazeAnalysisLib analyses\\\\testing data\"\r\n\r\noutput_file = ( \"headers in \" + input_folder + \".txt\")\r\n\r\nfile_ext = \".gazedata\" #input file extension, .txt\r\n\r\ninput_file_delimiter = \"\\t\"\r\n\r\noutput_file_delimiter = input_file_delimiter\r\n\r\nheaders_folder = os.getcwd() #path for headers inputfile\r\n\r\nheaders_inputfile = \"headers_tre_5mo_to_7mo.txt\"\r\n\r\n\r\n# no headers are specified, instead find them in files\r\n\r\nmap_header = None\r\n\r\n\r\n# find directory by \"walking\" through the system\r\n\r\nif os.path.isdir(default_input_folder):\r\n start_folder = default_input_folder\r\nelse:\r\n start_folder = root_folder\r\n\r\n\r\nfor root, dirs, files in os.walk(start_folder):\r\n \r\n if input_folder in root: \r\n \r\n print(root, \" \", end=\" FOUND! \")\r\n \r\n print(\"\")\r\n \r\n input_folder = root\r\n\r\n else:\r\n print(root)\r\n \r\n \r\nprint (input_folder)\r\n\r\n# list files in a directory, \r\n\r\ndiritems = os.listdir(input_folder)\r\n\r\nprint (\"Directory contains \" + str(len(diritems)) + \" files.\")\r\n\r\n\r\n\r\n# open output file \r\n\r\nwith open(os.path.join(output_folder, output_file), \"wt\") as outputfile:\r\n\r\n writer = csv.writer(outputfile, delimiter=output_file_delimiter)\r\n\r\n\r\n\r\n\r\n#headers_in_files = []\r\n\r\n #loop through files, limit loop by isslice(items, start, stop), can be None\r\n\r\n for filenum, file in islice(enumerate(diritems), 0, n_files): \r\n\r\n #print (\"Checking file \" + str(filenum + 1) + '/' + str(len(diritems)))\r\n\r\n if file.endswith(file_ext):\r\n\r\n print (\"Process file \" + str(filenum + 1) + '/' + str(len(diritems)))\r\n\r\n print(file)\r\n\r\n\r\n #read in data, process, and strore in newrows\r\n\r\n args_pro = input_folder, file, map_header\r\n \r\n \r\n # make new GazeReader object for reading and processing input file \r\n \r\n f_processor = GazeReader(args_pro, 40) #40 is optional limit for rows\r\n \r\n #f_processor.set_row_limit(40) # limit rows, good for debugging\r\n \r\n row_list_to_write = f_processor.get_headers()\r\n\r\n row_list_to_write.insert(0, file)\r\n \r\n writer.writerow( row_list_to_write )\r\n\r\n"
},
{
"alpha_fraction": 0.5306181311607361,
"alphanum_fraction": 0.536683976650238,
"avg_line_length": 17.312170028686523,
"blob_id": "f6469e930cf0127d2a9a2c82adbe121e070f633a",
"content_id": "dda4389a79fad0ad14beffd4cbb357f148a7c097",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3462,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 189,
"path": "/csv2gazedata-2b-6.py",
"repo_name": "yrttiahoS/py_gazedat",
"src_encoding": "UTF-8",
"text": "import os\r\rimport csv\r\r\r\r# Input folder needs to be relative to the script location in the folder tree.\r\r# In this case the folder where this script is located needs to have a folder\r\r# named \"files_to_change\" where the files are located.\r\rinput_folder = \"files_to_change\"\r\rending = \".txt\"\r\routput_file_ending = \".gazedata\"\r\rinput_file_delimiter = \"\\t\"\r\rnull_values = [\".\", \"\"]\r\rreplace_null_values = \"-999999\"\r\r\r\r \r\r\r\rmaptable = {\"TIMESTAMP\":\"TETTime\",\r\r \"RECORDING_SESSION_LABEL\":\"Subject\",\r\r \"LEFT_GAZE_X\":\"XGazePosLeftEye\",\r\r \"LEFT_GAZE_Y\":\"YGazePosLeftEye\",\r\r \"RIGHT_GAZE_X\":\"XGazePosRightEye\",\r\r \"RIGHT_GAZE_Y\":\"YGazePosRightEye\",\r\r \"TRIAL_INDEX\":\"TrialId\",\r\r \"SAMPLE_MESSAGE\":\"UserDefined_1\",\r\r \"RIGHT_PUPIL_SIZE\":\"DiameterPupilRightEye\",\r\r \"stimulus_right_2\":\"Stim\",\r\r \"__target_x__1\":\"Target\"\r\r\r\r# scan through files in a directory\r\rdiritems = os.listdir(input_folder)\r\r\r\rprint \"Directory contains \" + str(len(diritems)) + \" files.\"\r\r\r\rfor filenum, file in enumerate(diritems):\r\r print \"Checking file \" + str(filenum + 1) + '/' + str(len(diritems))\r\r\r\r if file.endswith(ending):\r\r print \" Filename matches with the specified ending -> processing..\"\r\r #self.liststore_exp.append([file])\r\r input_file = file\r\r\r\r # input file reading\r\r newrows = []\r\r with open(os.path.join(input_folder, input_file), \"rb\") as inputfile:\r\r reader = csv.reader(inputfile, delimiter='\\t')\r\r\r\r # grab header information, into a list\r\r headers = reader.next()\r\r\r\r # calculate list index numbers for map-keys\r\r indexed_maptable = {}\r\r for key in maptable:\r\r indexed_maptable[key] = headers.index(key)\r\r\r\r # loop file rows\r\r imkeys = indexed_maptable.keys()\r\r for row in reader:\r\r newrow = []\r\r for key in imkeys:\r\r ncol = indexed_maptable[key]\r\r # take away the null-values if they exist\r\r if row[ncol] not in null_values:\r\r if key in ['LEFT_GAZE_X', 'RIGHT_GAZE_X']:\r\r newrow.append(float(row[ncol]) / 1920.0)\r\r elif key in ['LEFT_GAZE_Y', 'RIGHT_GAZE_Y']:\r\r newrow.append(float(row[ncol]) / 1020.0)\r\r else:\r\r newrow.append(row[ncol])\r\r else:\r\r newrow.append(replace_null_values)\r\r newrows.append(newrow)\r\r\r\r # output file formation\r\r # resolve the output file name\r\r input_filename_parts = input_file.split(\".\")\r\r output_file = input_filename_parts[0] + output_file_ending\r\r\r\r # open file\r\r with open(os.path.join(input_folder, output_file), \"wb\") as outputfile:\r\r writer = csv.writer(outputfile, delimiter='\\t')\r\r\r\r # form header row\r\r newheaders = []\r\r for key in imkeys:\r\r newheaders.append(maptable[key])\r\r\r\r # write header row\r\r writer.writerow(newheaders)\r\r\r\r # write datarows\r\r for newrow in newrows:\r\r writer.writerow(newrow)\r\r\r\r print \" File processed.\"\r\r else:\r\r print \" Filename did not match the ending -> did nothing.\"\r\r"
}
] | 17 |
RedxLus/k8s-secret-editor | https://github.com/RedxLus/k8s-secret-editor | fb1baf7d0ab1c8e592902e059feb309b4d62e359 | e89a51220df828bbb24034e193d806fa0ea44168 | 67165e7e9ce7fb0eec1675bdf387e1ed6730b9d0 | refs/heads/master | 2020-07-27T03:59:58.507522 | 2019-09-16T17:45:28 | 2019-09-16T17:45:28 | 208,860,044 | 0 | 0 | null | 2019-09-16T17:38:42 | 2019-03-04T14:22:26 | 2017-10-13T05:56:32 | null | [
{
"alpha_fraction": 0.7541284561157227,
"alphanum_fraction": 0.7657492160797119,
"avg_line_length": 34.543479919433594,
"blob_id": "43354a98403f3a50750fac0ce93942a840f01001",
"content_id": "6299bcbfd5404fc687369ddabd62147c2c4d0d12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1635,
"license_type": "no_license",
"max_line_length": 318,
"num_lines": 46,
"path": "/README.md",
"repo_name": "RedxLus/k8s-secret-editor",
"src_encoding": "UTF-8",
"text": "# k8s-secret-editor\nSecret Editor Web interface for Kubernetes\n\nThis is a web tool to edit [Secrets](http://kubernetes.io/docs/user-guide/secrets/) in Kubernetes. A secret is a resource which contains one or several files encoded inside, which are then mounted to a pod. Defining those files within a YAML is complicated so we created this tool to edit them directly in the browser.\n\nThe application is plug & play. It uses K8S' service accounts to access the cluster, so no more configuration is needed.\n\n\n\n# How to deploy\n\nWe offer two options:\n\n### Directly deploy\n\nWe include YAMLs to directly deploy this tool in Kubernetes:\n\n```\nkubectl create -f k8s-secrets-editor.yml\n```\n\n( If your kubernetes cluster version < 1.6 )\n```\nkubectl create -f pre16-k8s-secrets-editor.yml\n```\n\nAnd enjoy it at http://SERVICE_IP_ADDRESS or mapping the port to your local\n\n```\nkubectl --namespace kube-system port-forward <POD_NAME> 8080:80\n```\n\n### Just pull the image\n\nYou can also just pull the Docker image (bqitdevops/k8s-secret-editor) and deploy on your own.\n\nIt will only work if deployed to Kubernetes as it uses injected service account and environment variables to connect to K8S API service.\n\n```\ndocker pull bqitdevops/k8s-secret-editor\n```\n\n# Authentication\nAs it will be used to manage sensitive information, we secured the access to the web with basic http authentication:\n* User: *admin*\n* Password: Defined in the environment variable ADMIN_PASSWORD. If you are deploying with the file k8s-deployment.yaml, by default it is *admin*\n"
},
{
"alpha_fraction": 0.40625,
"alphanum_fraction": 0.59375,
"avg_line_length": 15,
"blob_id": "f2b312c844e2d86163603065e93cded48f25ff2b",
"content_id": "79fdd34271cdf0f9c2c11c14629bf1f5aac8f270",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 64,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 4,
"path": "/src/requirements.txt",
"repo_name": "RedxLus/k8s-secret-editor",
"src_encoding": "UTF-8",
"text": "Flask == 0.10.1\nWTForms == 2.1\nRequests == 2.9.1\nPyYAML == 3.11\n"
},
{
"alpha_fraction": 0.8131313323974609,
"alphanum_fraction": 0.8131313323974609,
"avg_line_length": 32,
"blob_id": "b7b663d94f2bbc92bc41c41b8fb9ce32a07af148",
"content_id": "d74a4810242a4a854b2d4a6ad7e368f527ff5875",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 198,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 6,
"path": "/src/app/forms.py",
"repo_name": "RedxLus/k8s-secret-editor",
"src_encoding": "UTF-8",
"text": "from flask.ext.wtf import Form\nfrom wtforms import StringField, BooleanField, RadioField\nfrom wtforms.validators import DataRequired\n\nclass SearchForm(Form):\n namespace = RadioField('Namespace')\n"
},
{
"alpha_fraction": 0.6376306414604187,
"alphanum_fraction": 0.6689895391464233,
"avg_line_length": 15.882352828979492,
"blob_id": "c3d7094b0c272c4e9d51cdb874390b0496a6a6cb",
"content_id": "7aaf3424f25ad18e953e4ce6bee6f8def9f58b87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 287,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 17,
"path": "/src/webapp.py",
"repo_name": "RedxLus/k8s-secret-editor",
"src_encoding": "UTF-8",
"text": "#!flask/bin/python\n# -*- coding: utf-8 -*-\n\nfrom app import app\nimport sys\nfrom config import *\nimport os\n\nreload(sys)\nsys.setdefaultencoding('utf8')\n\ndebug = False\n\nif 'DEBUG' in os.environ and os.environ['DEBUG'] == \"1\":\n debug = True\n\napp.run(debug=debug,host='0.0.0.0', port=80)\n"
},
{
"alpha_fraction": 0.7333333492279053,
"alphanum_fraction": 0.7333333492279053,
"avg_line_length": 22,
"blob_id": "7e75aa1486b96c149051be1a2f77934a06bf95e0",
"content_id": "88a3b2c7afe388b8897fe99c0e5cf5c3d2f19279",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 45,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 2,
"path": "/src/config.py",
"repo_name": "RedxLus/k8s-secret-editor",
"src_encoding": "UTF-8",
"text": "WTF_CSRF_ENABLED = True\nSECRET_KEY = 'bqmola'"
},
{
"alpha_fraction": 0.5960813164710999,
"alphanum_fraction": 0.6119952201843262,
"avg_line_length": 38.245487213134766,
"blob_id": "2126e151c04ea44940b35bb8ac48143445f482d7",
"content_id": "49946c3c9a11b5fb5185c741da51f7d161d9cc32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10871,
"license_type": "no_license",
"max_line_length": 272,
"num_lines": 277,
"path": "/src/app/views.py",
"repo_name": "RedxLus/k8s-secret-editor",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom requests.auth import HTTPDigestAuth\nfrom functools import wraps\nfrom flask import render_template, flash, redirect, request, send_from_directory, Response\nfrom app import app\n#from .forms import SearchForm\nimport requests\nimport json\nimport yaml\nimport pprint\nimport base64\nimport os\nfrom config import *\n\ndef check_auth(username, password):\n \"\"\"This function is called to check if a username /\n password combination is valid.\n \"\"\"\n\n # If ADMIN_PASSWORD defined, then check that password is correct\n if 'ADMIN_PASSWORD' in os.environ and os.environ['ADMIN_PASSWORD'] != '':\n return username == 'admin' and password == os.environ['ADMIN_PASSWORD']\n else:\n return True\n\ndef authenticate():\n \"\"\"Sends a 401 response that enables basic auth\"\"\"\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})\n\ndef requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n auth = request.authorization\n if not auth or not check_auth(auth.username, auth.password):\n return authenticate()\n return f(*args, **kwargs)\n return decorated\n\n# Custom static data\[email protected]('/css/<path:filename>')\ndef custom_static_css(filename):\n return send_from_directory(app.root_path + '/static/css/', filename)\n\n# Custom static data\[email protected]('/js/<path:filename>')\ndef custom_static_js(filename):\n return send_from_directory(app.root_path + '/static/js/', filename)\n\n# Custom static data\[email protected]('/fonts/<path:filename>')\ndef custom_static_fonts(filename):\n return send_from_directory(app.root_path + '/static/fonts/', filename)\n\n# Custom static data\[email protected]('/codemirror/<path:filename>')\ndef custom_static_codemirror(filename):\n return send_from_directory(app.root_path + '/codemirror/', filename)\n\n# Custom static data\[email protected]('/static/<path:filename>')\ndef custom_static_(filename):\n return send_from_directory(app.root_path + '/static/', filename)\n\n\ndef read_api(query):\n k8s_token = open('/var/run/pod/kubernetes.io/serviceaccount/token').read()\n ip = os.getenv('KUBERNETES_SERVICE_HOST','127.0.0.1')\n port = os.getenv('KUBERNETES_PORT_443_TCP_PORT','443')\n response = requests.get('https://' + ip + ':' + port + query,\n verify=False,\n headers={'Authorization':'Bearer ' + k8s_token})\n return response\n\ndef post_api(query,data):\n k8s_token = open('/var/run/pod/kubernetes.io/serviceaccount/token').read()\n ip = os.getenv('KUBERNETES_SERVICE_HOST','127.0.0.1')\n port = os.getenv('KUBERNETES_PORT_443_TCP_PORT','443')\n response = requests.post('https://' + ip + ':' + port + query,\n verify=False,\n headers={'Authorization':'Bearer ' + k8s_token, 'content-type': 'application/json'},\n data=data)\n return response\n\ndef delete_api(query):\n k8s_token = open('/var/run/pod/kubernetes.io/serviceaccount/token').read()\n ip = os.getenv('KUBERNETES_SERVICE_HOST','127.0.0.1')\n port = os.getenv('KUBERNETES_PORT_443_TCP_PORT','443')\n response = requests.delete('https://' + ip + ':' + port + query,\n verify=False,\n headers={'Authorization':'Bearer ' + k8s_token})\n return response\n\n\[email protected]('/', methods=['GET'])\n@requires_auth\ndef search_namespaces():\n # namespaces=['nm1','nm2','nm3']\n namespaces=[]\n r = read_api('/api/v1/namespaces')\n d = json.loads(r.content)\n for i in d['items']:\n if i['metadata']['name'] != 'kube-system':\n namespaces.append(i['metadata']['name'])\n return render_template('select_namespace.html', namespaces=namespaces, titulo='Selecciona namespace')\n\[email protected]('/<string:namespace>', methods=['GET'])\n@requires_auth\ndef search_pod(namespace):\n # pod=['secret-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA','secret B', 'secret C']\n # namespaces=['nm1','nm2','nm3']\n namespaces=[]\n pod=[]\n r = read_api('/api/v1/namespaces/'+namespace+'/pod')\n d = json.loads(r.content)\n for i in d['items']:\n if 'default-token' not in i['metadata']['name']:\n pod.append(i['metadata']['name'])\n r = read_api('/api/v1/namespaces')\n d = json.loads(r.content)\n for i in d['items']:\n if i['metadata']['name'] != 'kube-system':\n namespaces.append(i['metadata']['name'])\n return render_template('select_secret.html', namespace=namespace , namespaces=namespaces, pod=pod, titulo='Selecciona secret')\n\[email protected]('/<string:namespace>', methods=['POST'])\n@requires_auth\ndef create_secret(namespace):\n request.get_data()\n secret = request.form['secret']\n new = {\n \"kind\": \"Secret\",\n \"apiVersion\": \"v1\",\n \"metadata\": {\n \"name\": secret,\n \"namespace\": namespace\n },\n \"data\": {\n },\n \"type\": \"Opaque\"\n }\n print new\n rc = post_api('/api/v1/namespaces/'+namespace+'/pod', data=json.dumps(new))\n #rc = requests.post('https://104.155.45.53/api/v1/namespaces/'+namespace+'/pod', data=json.dumps(new), headers={'Authorization':'Basic YWRtaW46QWhpSWdPcmRFOXBVdjRHeA==','content-type': 'application/json'}, auth=('admin', 'AhiIgOrdE9pUv4Gx'),verify=False)\n print 'CREATE:'\n print rc.status_code\n print rc.json()\n print rc.content\n if rc.status_code != 201:\n flash('ERROR WHEN CREATING SECRET ' + secret)\n return redirect(\"/\"+namespace)\n else:\n flash('Created secret ' + secret)\n return redirect(\"/\"+namespace+\"/\"+secret)\n\n\n\[email protected]('/<string:namespace>/<string:secret>', methods=['GET'])\n@requires_auth\ndef edit_secret(namespace,secret):\n namespaces=[]\n pod=[]\n r = read_api('/api/v1/namespaces/'+namespace+'/pod')\n d = json.loads(r.content)\n for i in d['items']:\n if 'default-token' not in i['metadata']['name']:\n pod.append(i['metadata']['name'])\n r = read_api('/api/v1/namespaces')\n d = json.loads(r.content)\n for i in d['items']:\n if i['metadata']['name'] != 'kube-system':\n namespaces.append(i['metadata']['name'])\n\n r = read_api('/api/v1/namespaces/'+namespace+'/pod/'+secret)\n if r.status_code == 200:\n d = json.loads(r.content)\n pprint.pprint(d)\n data={}\n if 'data' in d:\n for x in d['data']:\n data[x] = base64.b64decode(d['data'][x])\n print data[x]\n return render_template('edit_secret.html',namespaces=namespaces, pod=pod, namespace=d['metadata']['namespace'], secret=d['metadata']['name'], data=data, titulo='Edit secret', errors='')\n else:\n return render_template('select_secret.html', namespaces=namespaces, pod=pod, namespace=namespace, titulo='Select secret', error='Secret does not exist in selected namespace')\n\n\[email protected]('/<string:namespace>/<string:secret>', methods=['POST'])\n@requires_auth\ndef submit_secret(namespace,secret):\n request.get_data()\n data = request.form\n pprint.pprint(data)\n new = {\n \"kind\": \"Secret\",\n \"apiVersion\": \"v1\",\n \"metadata\": {\n \"name\": secret,\n \"namespace\": namespace\n },\n \"data\": {\n },\n \"type\": \"Opaque\"\n }\n for key in data:\n new[\"data\"][key] = base64.b64encode(data[key].encode('utf-8'))\n body = json.dumps(new, indent=4)\n print body\n\n #rp = requests.get('https://104.155.45.53/api/v1/namespaces/'+namespace+'/pod/'+secret, auth=('admin', 'AhiIgOrdE9pUv4Gx'),verify=False)\n rp = read_api('/api/v1/namespaces/'+namespace+'/pod/'+secret)\n previous = json.loads(rp.content)\n print 'BACKUP:'\n pprint.pprint(previous)\n\n #rd = requests.delete('https://104.155.45.53/api/v1/namespaces/'+namespace+'/pod/'+secret, auth=('admin', 'AhiIgOrdE9pUv4Gx'),verify=False)\n rd = delete_api('/api/v1/namespaces/'+namespace+'/pod/'+secret)\n print 'delete:'\n print rd.status_code\n print rd.json()\n print rd.content\n\n #rc = requests.post('https://104.155.45.53/api/v1/namespaces/'+namespace+'/pod', data=body, headers={'Authorization':'Basic YWRtaW46QWhpSWdPcmRFOXBVdjRHeA==','content-type': 'application/json'}, auth=('admin', 'AhiIgOrdE9pUv4Gx'),verify=False)\n rc = post_api('/api/v1/namespaces/'+namespace+'/pod', data=body)\n print 'CREATE:'\n print rc.status_code\n print rc.json()\n print rc.content\n\n if rc.status_code != 201:\n #rr = requests.post('https://104.155.45.53/api/v1/namespaces/'+namespace+'/pod', data=json.dumps(previous), headers={'Authorization':'Basic YWRtaW46QWhpSWdPcmRFOXBVdjRHeA==','content-type': 'application/json'}, auth=('admin', 'AhiIgOrdE9pUv4Gx'),verify=False)\n rr = post_api('/api/v1/namespaces/'+namespace+'/pod', data=json.dumps(previous))\n print 'RESTORE:'\n print rr.status_code\n print rr.json()\n\n data={}\n for x in previous['data']:\n data[x] = base64.b64decode(previous['data'][x])\n error = json.loads(rc.content)['message']\n return render_template('edit_secret.html', namespace=namespace, secret=secret, data=data, titulo='Edit secret', errors=error)\n else:\n flash('Updated secret %s in namespace %s' %(secret, namespace))\n return redirect(\"/\"+namespace+\"/\"+secret)\n\[email protected]('/<string:namespace>/<string:secret>/delete', methods=['GET'])\n@requires_auth\ndef delete_secret(namespace,secret):\n\n rd = delete_api('/api/v1/namespaces/'+namespace+'/pod/'+secret)\n print 'delete:'\n print rd.status_code\n print rd.json()\n print rd.content\n\n namespaces=[]\n pod=[]\n r = read_api('/api/v1/namespaces/'+namespace+'/pod')\n d = json.loads(r.content)\n for i in d['items']:\n if 'default-token' not in i['metadata']['name']:\n pod.append(i['metadata']['name'])\n r = read_api('/api/v1/namespaces')\n d = json.loads(r.content)\n for i in d['items']:\n if i['metadata']['name'] != 'kube-system':\n namespaces.append(i['metadata']['name'])\n\n if rd.status_code == 200:\n flash('Removed secret: ' + secret)\n return render_template('select_secret.html', namespace=namespace , namespaces=namespaces, pod=pod, titulo='Select secret')\n else:\n flash('ERROR WHEN REMOVING SECRET: ' + secret)\n return render_template('select_secret.html', namespace=namespace , namespaces=namespaces, pod=pod, titulo='Select secret', error='Secret could not be removed '+secret)\n"
}
] | 6 |
rere-dnaw/cardano-data-pull | https://github.com/rere-dnaw/cardano-data-pull | 62de175933ca7b01e2d8d308a79f0dff22cf2a86 | 66b41763a619951bc9f42c9c7e2738ba431eca4d | e1e545b77eff3e038b1eda4e417666f7e91ada78 | refs/heads/master | 2023-07-14T22:51:50.501792 | 2021-08-20T15:17:55 | 2021-08-20T15:17:55 | 398,316,388 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6620370149612427,
"alphanum_fraction": 0.7546296119689941,
"avg_line_length": 18.636363983154297,
"blob_id": "7ca114ae2d3f564ae6846966163b887f2aef1fc0",
"content_id": "dea7f62203c38c377e809592df6bb0a53c73c0f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 216,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 11,
"path": "/README.md",
"repo_name": "rere-dnaw/cardano-data-pull",
"src_encoding": "UTF-8",
"text": "# cardano-data-pull\nfunctions related to pulling data for cardano blockchain\n\nOtput files:\nThe data is based on epoch 283.\n\nrange1.csv - 50 000 - 100 000 live_stake\n\nrange2.csv - all\n\nrange3.csv - top 100 live_stake\n"
},
{
"alpha_fraction": 0.4821831285953522,
"alphanum_fraction": 0.4975191652774811,
"avg_line_length": 30.23239517211914,
"blob_id": "a6ba77f23d031c90809f348bdfffc5ec4677a0a5",
"content_id": "1746b6c0dfdea3cf86a42af5cc2192eb56b9e8f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4434,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 142,
"path": "/cardano-pools-data.py",
"repo_name": "rere-dnaw/cardano-data-pull",
"src_encoding": "UTF-8",
"text": "import requests\nimport csv\nfrom constant import api_key\n\n\n\ndef make_pools_list(api_key, url='https://cardano-mainnet.blockfrost.io/api/v0/pools'):\n '''\n This function create full list of cardano pools\n '''\n\n print('Start making list of all pools.')\n headers = {'project_id': '{key}'.format(key=api_key)}\n\n params = {\n 'count':100,\n 'page':1\n }\n\n data = []\n\n while params['page'] != 0:\n tmp_data = requests.get(url, headers=headers, params=params)\n if len(tmp_data.json()) == 100:\n data += tmp_data.json()\n params['page'] += 1\n else:\n data += tmp_data.json()\n params['page'] = 0\n\n with open('pool_list.txt', 'w') as f:\n f.write('\\n'.join(data))\n \n print('End making list of all pools. {0} pools have been found.'.format(str(len(data))))\n\n\ndef check_stake_range(poolData, pool_id, min, max):\n '''\n Will return pool data if active stake\n is between min and max.\n '''\n if poolData:\n if min <= float(poolData['active_stake']) <= max:\n print('Pool active_stake in range. Pool ID: {0},'\n ' Active Stake: {1},'\n ' Range: {2} - {3}'.format(pool_id,\n poolData['active_stake'],\n min,\n max))\n return True\n else:\n print('Pool active_stake NOT in range. Pool ID: {0},'\n ' Active Stake: {1},'\n ' Range: {2} - {3}'.format(pool_id,\n poolData['active_stake'],\n min,\n max))\n return False\n else:\n print('poolData empty! PoolId: {0}'.format(pool_id))\n return False\n\n\ndef pool_add_id(poolData, pool_id):\n \"\"\"\n This function will add pool_id into data\n \"\"\"\n try:\n poolData['pool_id'] = pool_id\n print('PoolId added: {0}'.format(pool_id))\n except:\n print('Failed when adding ID to pool: {0}'.format(pool_id))\n \n return poolData\n\ndef get_largest_pool(poolData, topNumber):\n '''\n Will return top number of pools based on active_stake\n '''\n return sorted(poolData, key=lambda k: k['active_stake'], reverse=True)[:(topNumber -1 )]\n\n\ndef save_data_csv(outData, fileName):\n '''\n This function will save data into file\n '''\n csv_columns = outData[0].keys()\n\n try:\n with open(fileName, 'a') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\n writer.writeheader()\n for data in outData:\n writer.writerow(data)\n except IOError:\n print(\"I/O error\")\n\n#make_pools_list(api_key)\n\npoolList = []\nwith open('pool_list.txt') as f:\n poolList = f.read().splitlines()\n\npools_range1 = []\npools_range2 = []\n\nfor count, pool_id in enumerate(poolList):\n print ('{0} out of {1}'.format(count+1, len(poolList)))\n url = \"https://cardano-mainnet.blockfrost.io/api/v0/pools/\" + pool_id + \"/history\"\n headers = {'project_id': '{key}'.format(key=api_key)}\n\n params = {\n 'count':3,\n 'page':1,\n 'order':\"desc\",\n }\n tmp_data = []\n\n try:\n tmp_data = requests.get(url, headers=headers, params=params).json()[2]\n except:\n print('Error when pulling history data for pool: ' + pool_id)\n\n if tmp_data:\n if check_stake_range(tmp_data, pool_id, 50000000000, 100000000000):\n pools_range1.append(pool_add_id(tmp_data, pool_id))\n pools_range2.append(pool_add_id(tmp_data, pool_id))\n\n\nprint(\"collecting data ..... Done\")\n\n\n# with open('eggs.csv', newline='') as csvfile:\n# spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n# for row in spamreader:\n# print(', '.join(row))\n\nsave_data_csv(pools_range1, \"range1.csv\")\nsave_data_csv(pools_range2, \"range2.csv\")\nsave_data_csv(get_largest_pool(pools_range2, 100), \"range3.csv\")\n\nprint(\"creating csv ..... Done\")"
}
] | 2 |
millington/data_mining_amadeus | https://github.com/millington/data_mining_amadeus | bf9934864ff45c50a442a4321e18058845ff73af | 7e0a9917ab8bfb24b7ec452dcc75fa842fba1b48 | 20baae0a924d8f1112f68312596e2e0cedde1b58 | refs/heads/master | 2019-01-28T11:30:36.197119 | 2014-11-01T13:14:18 | 2014-11-01T13:14:18 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6807738542556763,
"alphanum_fraction": 0.692865788936615,
"avg_line_length": 37.28703689575195,
"blob_id": "3e289a526c3c1e9181a1e88de5d04cd617b9e870",
"content_id": "010a1dd3d982c7a01108d4f8c3982b3c90b1c222",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4135,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 108,
"path": "/data_mining.py",
"repo_name": "millington/data_mining_amadeus",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/env python\nfrom __future__ import print_function\nimport csv\nimport os\nimport numpy\nimport matplotlib.pyplot as plt\nimport pandas\nimport flask\nimport datetime\n\n#Exercise 1: Print total Number of lines in each file\nprint(\"Exercise 1: Total Number of lines in each file\")\nfiles = [\"bookings.csv\",\"searches.csv\"]\n#create dialect for csv reader to parse file using the '^' delimiter\ndia= csv.register_dialect('booking_format',delimiter='^')\n\nfor file in files:\n num_rows = 0\n f_obj = open(file,'rU')\n data_reader = csv.reader(f_obj, dialect='booking_format')\n header = data_reader.next()\n #count each csv row in the file\n for row in data_reader:\n num_rows+=1\n print(\"Number of Rows in %s: %i\" % (file, num_rows))\n\n#Exercise 2:\n#input -> booking file (default=bookings.csv), n (number of top airports to return)\n#returns -> top n arrival airports in 2013 by pax #s\nprint(\"Exercise 2: Top 10 arrival airports in 2013 by passenger numbers\")\ndef top_10_arr_port(n=10):\n #read date and grab only arr_port and pax columns and then group the data by each airport. Then take the sum of each aiport based on value of pax\n bookings_file = 'bookings.csv'\n panda_data = pandas.read_csv(bookings_file, delimiter='^', error_bad_lines=False)\n pax_arr_cols = panda_data[['arr_port','pax']]\n arr_port_groups = pax_arr_cols.groupby('arr_port')\n pax_sum=arr_port_groups['pax'].aggregate(numpy.sum)\n pax_sum.sort(ascending=False)\n return pax_sum[:n]\nprint(top_10_arr_port())\n\n#Exercise 3: Plot of arrival flights (Malaga, Madrid, Barcelona)\n#read only date from columns 0 and 6 containing Destination and Date info\nsearch_data = pandas.read_csv('searches.csv',delimiter='^', usecols=[0,6], error_bad_lines=False, parse_dates=['Date'], dayfirst=True)\n#create month columns to be able to sort data by month\nsearch_data['month'] = search_data['Date'].apply(lambda x: int(x[5:7]))\n\n#create instance count per row for use in summing occurences\nsearch_data['count'] = 1\nmonth_data = search_data[['Destination','month','count']]\n#create new data frame for each airport based on IATA code\nmad_arr = month_data[month_data.Destination.isin(['MAD'])]\nagp_arr = month_data[month_data.Destination.isin(['AGP'])]\nbcn_arr = month_data[month_data.Destination.isin(['BCN'])]\n\n#process and plot data for Madrid\nplt.ion()\nmad_grp = mad_arr.groupby(['Destination','month']).agg({'count':sum})\nmaster_fig = mad_grp.plot( y='count')\nplt.draw()\n\n#process and plot data for Malaga\nagp_grp = agp_arr.groupby(['month']).agg({'count':sum})\nagp_grp_dict =agp_grp.to_dict()\nfor month in range(1,13):\n if not month in agp_grp_dict['count'].keys():\n agp_grp_dict['count'][month] = 0\nagp_grp = pandas.DataFrame(agp_grp_dict)\nagp_grp['month'] = agp_grp.index\nagp_grp['Destination'] = 'AGP'\nagp_grp =agp_grp[['Destination','month','count']]\nagp_grp.index = range(12)\nagp_grp.groupby(['Destination','month']).agg({'count':sum})\nagp_grp.plot(y='count',ax=master_fig)\nplt.draw()\n\n#process and plot data for Barcelona\nbcn_grp = bcn_arr.groupby(['Destination','month'],as_index=False).agg({'count':sum})\nbcn_grp.plot(y='count', ax=master_fig)\nplt.draw()\n\n#set plot attributes to properly label data\nmaster_fig.legend(('MAD','AGP','BCN'))\nmaster_fig.set_xticks(range(12))\nmaster_fig.set_xticklabels(['January', 'February', 'March', 'April', 'May', 'June', 'July','August','September','Oktober','November','December'])\nplt.title('Monthly Number of Searches for Flights Arriving in MAD AGP BCN')\nplt.xlabel('Month')\nplt.ylabel('# of Searches')\nplt.show()\n\n#Bonus Exercise 2\n#used the Flask framework to create the web service\napp = flask.Flask(__name__)\n\[email protected](\"/top_arr_port/api/v1.0/<int:n>\", methods=['GET'])\ndef top_10_web_serv(n):\n res = {}\n #use user arguments to determine what data gets returned in json data\n if n != None and n > 0:\n res = top_10_arr_port(n)\n return flask.jsonify(res)\n #use bookings file and n default value of 10 for number of top airports to return\n else:\n res = top_10_arr_port()\n return flask.jsonify(res)\n\nif __name__ == \"__main__\":\n app.run()\n"
}
] | 1 |
stevekochscience/DataMungingExample | https://github.com/stevekochscience/DataMungingExample | 859115d1a2c36614fcce85178851906da0af88be | 9ec59167f0f5bd0ba8d63477d9f587db446fc294 | eb2da95ba4a2cd362d8f1ac58c4381363e9cfbdb | refs/heads/master | 2021-01-19T08:32:52.755749 | 2014-04-17T00:51:20 | 2014-04-17T00:51:20 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5569159388542175,
"alphanum_fraction": 0.5688285827636719,
"avg_line_length": 31.13829803466797,
"blob_id": "f249e4967173666dcea6dbf10d413ed581e56c35",
"content_id": "4ec25229cac1fe3fa1d6f56dce60f5db3a34f348",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3022,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 94,
"path": "/CircStatsYear.py",
"repo_name": "stevekochscience/DataMungingExample",
"src_encoding": "UTF-8",
"text": "import csv\n\n# Make a list of all the filenames 2012_01.txt etc.\nfnamebase = ''\nfnames = []\nYEAR = '2013'\nNUMMONTHS = 12 # For partial year usage (normally 12 of course)\nfor i in range(1,NUMMONTHS+1):\n ex = \"_%.2d.txt\" %i\n fn = fnamebase + YEAR + ex\n fnames.append(fn)\noutfname = fnamebase + YEAR + '.csv'\n \ndef process_location(locationCodes = [], filenames = []):\n yeardata = []\n codes = []\n for month, filename in enumerate(filenames):\n csvfile = open(filename, 'rb')\n reader = csv.reader(csvfile, delimiter='|')\n ftitle = reader.next() #title row of file\n headers = reader.next()\n \n \n # from the third row on, all rows from the millennium export should have 8 columns\n data = []\n for row in reader:\n data.append(row)\n \n codelist = [] #list of found codes\n countlist = [] #list of counts (these two arrays paired as x, y\n for index, row in enumerate(data):\n newrow = row\n newrow[0] = row[0].strip()\n data[index] = newrow\n \n if data[index][0] in locationCodes:\n subrow = [data[index][0], data[index][1]]\n codelist.append(data[index][0])\n countlist.append(data[index][1])\n \n # The list is missing rows that have zero, need to add these zeros\n for index, code in enumerate(locationCodes):\n if code not in codelist:\n codelist.insert(index, code)\n countlist.insert(index, 0)\n countlist = map(int, countlist)\n yeardata.append(countlist)\n codes = codelist\n return(codes, yeardata)\n\n\ndef write_location(outfile, locName, locationCodes, fnames):\n headers = [locName]\n codes, counts = process_location(locationCodes, fnames)\n totals = []\n for i, month in enumerate(counts):\n sum = 0\n for count in month:\n sum += count\n totals.append(sum)\n headers.append('%d/1/%s' % ((i+1), YEAR))\n \n for header in headers:\n outfile.write('%s,' % header)\n outfile.write('\\n')\n \n for index, code in enumerate(codes):\n line = \"%s\" % code\n for j in range(len(counts)):\n line += \",%s\" % counts[j][index]\n line += '\\n'\n outfile.write(line)\n \n outfile.write('TOTAL,')\n for total in totals:\n outfile.write('%d,' % total)\n outfile.write('\\n\\n\\n')\n\noutfile = open(outfname, 'wb')\n\nlocCodes = ['fgc', 'fmin', 'fovs', 'fsto1', 'fsto2', 'fstor', 'fxx', 'fxxov', 'zcfsa']\nwrite_location(outfile, 'FAL', locCodes, fnames)\n\nlocCodes = ['pgc', 'pgcll', 'povs']\nwrite_location(outfile, 'PML', locCodes, fnames)\n\nlocCodes = ['sel', 'sgc', 'sgc2', 'sovs', 'sovs2', 'sstor', 'sxx']\nwrite_location(outfile, 'CSEL', locCodes, fnames)\n\nlocCodes = ['zcopy', 'zgc2', 'zgc3', 'zgc-2', 'zint', 'zjv', 'zjvl', 'zjvo', 'zjvol',\n 'zovs', 'zpww', 'zstpr', 'zvalt', 'zww']\nwrite_location(outfile, 'ZIMM', locCodes, fnames)\n\noutfile.close()\n\n"
},
{
"alpha_fraction": 0.763870120048523,
"alphanum_fraction": 0.7841677665710449,
"avg_line_length": 122.16666412353516,
"blob_id": "d6471b08f05b351cd5e0564b225e0a56ad66106f",
"content_id": "0653254423454d6f8f51607f487cc234db845f1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1478,
"license_type": "no_license",
"max_line_length": 613,
"num_lines": 12,
"path": "/README.md",
"repo_name": "stevekochscience/DataMungingExample",
"src_encoding": "UTF-8",
"text": "CircStatsYear.py is an example of a data munging script I've used recently at work. Previously, someone was processing exported text files from an old library circulation system (Millennium) and creating an Excel spreadsheet summary by manually searching the text files and then copying by hand. This was tedious and subject to error. I was unable to connect to the Millennium database programmatically. The end product also needed to be viewable in Excel. So, the script I created reads the exported text files and creates a new CSV file that will open in Excel and look the same as the previous Excel summaries.\n\n* The exported millennium files have been renamed to \"2013_01.txt\" etc. (01 = January)\n* These files should be in the directory named in fnambase\n* The year should be specified in the YEAR string\n* The output file will be based on year, e.g. \"2013.csv\"\n\nI have included the input files for 12 months of 2013. I have also included what should be the output CSV file as `2013_ex.csv`. If the code runs correctly, the output file, `2013.csv` should be identical.\n\nTo run the code, type `python CircStatsYear.py`\n\nI have included this code as an example for the 2014 April \"data munging\" session for the ABQ Python Meetup. I didn't add any extra commenting or try to make the code efficient or anything--so it should have lots of deficiencies and room for criticism! But it's a real example of something that works (for now!) and saves people time at the Library. :)\n"
}
] | 2 |
johnhess/ssda | https://github.com/johnhess/ssda | 274511521ee08c39390c6d2321a6e0ccc3795f2d | 0d420005d2a5c9ecdd69a985bb7572e411f97c04 | 6f8ab7fb2e5ffd53adbb875f78b872f4790ee62f | refs/heads/master | 2021-06-24T00:03:41.417126 | 2017-08-16T03:54:52 | 2017-08-16T03:54:52 | 100,438,860 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.672245442867279,
"alphanum_fraction": 0.6750348806381226,
"avg_line_length": 25.55555534362793,
"blob_id": "971a06b8d9259fafc8a482798ed0f233f6002fa4",
"content_id": "fb9e16833fcd6df6ac6bf7a98140c5db67b16819",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1434,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 54,
"path": "/app/views.py",
"repo_name": "johnhess/ssda",
"src_encoding": "UTF-8",
"text": "import datetime\nimport os\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.template import RequestContext\n\nimport redis\n\n\nREDIS_URL = os.environ.get('REDIS_URL', 'redis://localhost')\n\n\ndef get_db():\n return redis.StrictRedis.from_url(REDIS_URL)\n\ndef store_message(message):\n get_db().set(datetime.datetime.utcnow(), message.encode('utf-8'))\n\ndef get_messages():\n \"\"\"All messages in reverse chronological order.\"\"\"\n db = get_db()\n return [\n db.get(key).decode('utf-8') \n for key in reversed(sorted(db.keys()))\n ]\n\ndef home(request):\n return render(request=request, template_name='templates/home.html')\n\ndef guestbook(request):\n # version 2.0\n # CSRF Middleware protects this by default in Django\n\n # store the new message, if there is one\n if request.method == 'POST':\n store_message(request.POST.get('message'))\n # either way, give back the list of messages\n context = {'messages': get_messages()}\n return render(\n request=request,\n template_name='templates/guestbook.html',\n context=context\n )\n\ndef greeter(request):\n # Not used in the talk, but here's a vulnerable django greeter\n who = request.GET.get('name', 'friend')\n return HttpResponse(\"Hello, {}\".format(who))\n\ndef nuke(request):\n \"\"\"Clears the guestbook message board.\"\"\"\n get_db().flushall()\n return HttpResponse('database nuked')\n"
},
{
"alpha_fraction": 0.8131868243217468,
"alphanum_fraction": 0.8131868243217468,
"avg_line_length": 29.66666603088379,
"blob_id": "269b9e6b54ca2b007cb085c3d3d7171a41f4d132",
"content_id": "2fa5a9df0e959d944e62f569ab3d683c557e893f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 91,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 3,
"path": "/README.md",
"repo_name": "johnhess/ssda",
"src_encoding": "UTF-8",
"text": "# Seemingly Secure Django Application\n\nPart of a talk (see veryveryvulnerable.com for more)"
}
] | 2 |
Javeria-Arif/LinearRegression | https://github.com/Javeria-Arif/LinearRegression | 58fa41b40b55d9d67b5c39336d44925df0cd6186 | c959cf21166cf8390428d5f355dd617758b2be2e | c2a5fd42b3701c7137bc26e4822f341b0c022342 | refs/heads/master | 2022-04-11T23:27:37.593668 | 2020-03-28T18:55:19 | 2020-03-28T18:55:19 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6538461446762085,
"alphanum_fraction": 0.7307692170143127,
"avg_line_length": 25,
"blob_id": "909b322225cbae1e2b25f1a59443ef432bba313d",
"content_id": "139cdcb3ca2307bc47ef91f775764d5c4762a578",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 26,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 1,
"path": "/README.md",
"repo_name": "Javeria-Arif/LinearRegression",
"src_encoding": "UTF-8",
"text": "<h1>MACHINE LEARNING</h1>\n"
},
{
"alpha_fraction": 0.6767330169677734,
"alphanum_fraction": 0.6829100847244263,
"avg_line_length": 26.60784339904785,
"blob_id": "7384512bfd7fe4e66a09f169971813d0545f7cde",
"content_id": "f899d5460c64b3d285c1f2a9deeeb598be837470",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1457,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 51,
"path": "/assignment_2.py",
"repo_name": "Javeria-Arif/LinearRegression",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n\r\n'Opening the data file'\r\ndata = pd.read_csv('dataset.csv')\r\nprint(data.shape) #returns rows and columns\r\n#data.head(6) Shows the data table upto nth values\r\n\r\n\r\n'Choosing the desired columns'\r\nX = data.iloc[:, 2:3].values\r\nY = data.iloc[:, 3:4].values\r\n#print(X)\r\n#print (Y)\r\n\r\n#plt.scatter(X,Y, color='red')\r\n\r\n\r\n'Splitting my data into training and test samples'\r\nfrom sklearn.model_selection import train_test_split\r\nX_train,X_test,Y_train,Y_test = train_test_split(X,Y, test_size = 0.2,random_state = 10)\r\n#print(len(X_test))\r\n\r\n'Applying Linear Regression'\r\nfrom sklearn.linear_model import LinearRegression\r\nreg = LinearRegression()\r\n\r\nreg.fit(X_train,Y_train) #Training data\r\n\r\ny_pred = reg.predict(X_test) #Predicting results\r\n\r\n\r\n'Visualizing trained data'\r\nplt.scatter(X_train, Y_train, color = 'green')\r\nplt.plot(X_train, reg.predict(X_train), color = 'red')\r\nplt.xlabel('Head size(in cms)')\r\nplt.ylabel('Brain weight(in grams)')\r\nplt.title('Head size VS Brain weight (Training set)')\r\nplt.show()\r\n\r\n'Visualizing tested data'\r\nplt.scatter(X_test, Y_test, color = 'blue')\r\nplt.plot(X_train, reg.predict(X_train), color = 'red')\r\nplt.xlabel('Head size(in cms)')\r\nplt.ylabel('Brain weight(in grams)')\r\nplt.title('Head size VS Brain weight (test set)')\r\nplt.show()\r\n\r\nprint(reg.score(X_test,Y_test)) #Gives the accuracy of prediction"
}
] | 2 |
tosone/python-interview | https://github.com/tosone/python-interview | 2bdfcfe3f37811f7849c34e442d1ed21bb3cefd7 | 37adefeec7e3a128d44ca3cdfb7b679be973488d | e8e2c0dacb24d0b2bec915bfc0fc8a166113342d | refs/heads/master | 2022-12-09T11:34:02.540066 | 2020-09-07T11:30:27 | 2020-09-07T11:30:27 | 284,379,474 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6381215453147888,
"alphanum_fraction": 0.6519337296485901,
"avg_line_length": 26.846153259277344,
"blob_id": "37ac333bc318810bfbad21512db80ff91373bb64",
"content_id": "e675cfaf3abd0712f8aa4bee4d8995b6fd9d096e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 362,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 13,
"path": "/kafka/producer.py",
"repo_name": "tosone/python-interview",
"src_encoding": "UTF-8",
"text": "from kafka import KafkaProducer\nproducer = KafkaProducer(bootstrap_servers='localhost:9092')\n\nwhile True:\n print(\"\\n\\nType \\\"quit\\\" to exit\")\n print(\"Enter message to be sent:\")\n msg = input()\n if msg == \"quit\":\n print(\"Exiting...\")\n break\n producer.send('test', msg.encode('utf-8'))\n print(\"Sending msg \\\"{}\\\"\".format(msg))\n print(\"Message sent!\")\n"
},
{
"alpha_fraction": 0.5142857432365417,
"alphanum_fraction": 0.5600000023841858,
"avg_line_length": 6.291666507720947,
"blob_id": "35c742984c786553fe06bcde79cbfb16182ecdf6",
"content_id": "49f2638c29833518cfcbf0f32e86c93b9ec3e653",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 235,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 24,
"path": "/20200802/git.md",
"repo_name": "tosone/python-interview",
"src_encoding": "UTF-8",
"text": "# 20200802 第一次\n\n### Git\n\n- 分布式\n- 改动追踪\n- 版本管理\n- 分支管理\n\n#### 概念\n\n- 工作区\n- 暂存区\n- commit\n- branch\n- tag\n- release\n- pull request\n\n#### 远程仓库\n\n#### pull push fetch\n\n#### merge rebase\n"
}
] | 2 |
PradeepNalluri/Prefix-Tuning-Bert | https://github.com/PradeepNalluri/Prefix-Tuning-Bert | 379753b37d47b391f1ad29b8780ec382f4e1f027 | 982558537177612d9f06f19a1c5de6111177585a | 47e35f302adbfbdbb5ec803bdb9a82c805a1f769 | refs/heads/master | 2023-05-23T19:51:28.127847 | 2022-06-28T03:16:02 | 2022-06-28T03:16:02 | 424,397,041 | 13 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6690049171447754,
"alphanum_fraction": 0.6731618642807007,
"avg_line_length": 34.971961975097656,
"blob_id": "c89a5451a55db30709f587762577220174b1cae9",
"content_id": "8e5aa2054fbc2ef3587e45703c999b7d84e72c4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3849,
"license_type": "no_license",
"max_line_length": 193,
"num_lines": 107,
"path": "/README.md",
"repo_name": "PradeepNalluri/Prefix-Tuning-Bert",
"src_encoding": "UTF-8",
"text": "# Prefix-Tuning-Bert\n## Installation\n\nThis assignment is implemented in python 3.6 and torch 1.9.0. Follow these steps to setup your environment:\n\n1. [Download and install Conda](https://conda.io/projects/conda/en/latest/user-guide/install/index.html \"Download and install Conda\")\n2. Create a Conda environment with Python 3.6\n```\nconda create -n nlp-project python=3.6\n```\n\n3. Activate the Conda environment. You will need to activate the Conda environment in each terminal in which you want to use this code.\n```\nconda activate nlp-project\n```\n4. Install the requirements:\n```\npip3 install -r requirements.txt\ncd transformers; pip install -e .;\n```\n\n5. Download Sarcasm Data:\n```\nNeed to download sarcasm data from kaggle: https://www.kaggle.com/danofer/sarcasm/\nThe file can be passed as an input argument during runtime\n```\n\n## Modeling Files\n\nThe main code to build models is contained in only one file:\n\n- `train.py`\n\nThere are several tuning_modes that can be chosen from to train the models \n* baseline_finetune\n* baseline_lightweight_finetune\n* prefix_bottom_two_layers\n* prefix_top_two_layers\n* prefix_bert_embedding_layer\n* prefix_custom_initializaition\n* prefix_random_initializaition\n* noprefix_top_two_layers\n* noprefix_bottom_two_layers\n* noprefix_embedding_layer_update\n\n### Training command:\n\nBelow are some of the examples of training command:\n\n```\n# By default train.py trains prefix tuning with random embedding initializaition of 5 tokens:\n# However all the parameters are configurable with the arguments described in hyper-parameters section.\n\n# Default training:\npython train.py\n\n# Training with different tuning mode\npython train.py --tuning_mode noprefix_top_two_layers\n```\n### Training hyper-parameters\n```\noptional arguments:\n -h, --help show this help message and exit\n --train_data TRAIN_DATA\n training dataset file that have to be used\n --prepare_data if passed, will prepare data.\n --save_processed_data\n if passed, save the processed data.\n --batch_size BATCH_SIZE\n batch_size\n --custom if passed, use no custom.\n --epochs EPOCHS epochs\n --learning_rate LEARNING_RATE\n learning_rate\n --save_model if passed, save model.\n --prefix_length PREFIX_LENGTH\n number of prefix tokens\n --model_save_directory MODEL_SAVE_DIRECTORY\n save the model to\n --tuning_mode {baseline_finetune,baseline_lightweight_finetune,prefix_bottom_two_layers,\n prefix_top_two_layers,prefix_bert_embedding_layer,\n prefix_custom_initializaition,prefix_random_initializaition,noprefix_top_two_layers,\n noprefix_bottom_two_layers,noprefix_embedding_layer_update}\n Name of the tuning_mode\n --use_multi_gpu USE_MULTI_GPU\n Use Multiple GPUs\n --phrase_for_init PHRASE_FOR_INIT\n If using custom initialization this will be used to\n initialize the prefix tokens\n --checkpoint CHECKPOINT\n to checkpoint the model at each epoch\n --analyze_tokens ANALYZE_TOKENS\n Closest words in bert vocab in each epoch are\n extracted\n --test_file TEST_FILE\n test file that have to be used\n \n --evaluate To run the script in Evaluation mode\n \n --saved_model_location SAVED_MODEL_LOCATION\n Loaction of the stored model, must be used when only\n evaluation is called\n```\n\n## Disclaimer\n\nWe are trying to do distributed training using `distributed_training.py`, which is a work in progress. We just wanted to include this in the submission to show what we are currently working on.\n"
},
{
"alpha_fraction": 0.5867925882339478,
"alphanum_fraction": 0.596236526966095,
"avg_line_length": 38.19613265991211,
"blob_id": "a531e92cd9f00390d3b9ff5ae4f7cdf92cc4daae",
"content_id": "9ec7afa7d527d0b4a2170d182ac39537617f95fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14189,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 362,
"path": "/distributed_training.py",
"repo_name": "PradeepNalluri/Prefix-Tuning-Bert",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport pandas as pd \nimport torch\nfrom transformers import BertTokenizer\nimport numpy as np\nimport multiprocessing as mp\nimport time\nfrom tqdm import tqdm\nimport os\nos.environ[\"MASTER_ADDR\"] = \"127.0.0.1\"\nos.environ['MASTER_PORT'] = '8888'\nfrom transformers import BertModel\nfrom transformers.modeling_outputs import SequenceClassifierOutput\nfrom torch.nn import CrossEntropyLoss\nimport torch.nn as nn\nfrom transformers import BertForSequenceClassification, AdamW, BertConfig\nimport pickle\nfrom torch.utils.data import TensorDataset, random_split\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nimport torch.distributed as dist\nfrom SARCBertClassifier import SARCBertClassifier\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom transformers import get_linear_schedule_with_warmup\nimport random\nimport numpy as np\n\nimport numpy as np\nimport time\nimport datetime\nimport tempfile\n\n# Function to calculate the accuracy of our predictions vs labels\ndef flat_accuracy(preds, labels):\n pred_flat = np.argmax(preds, axis=1).flatten()\n labels_flat = labels.flatten()\n return np.sum(pred_flat == labels_flat) / len(labels_flat)\n\n\n\ndef format_time(elapsed):\n '''\n Takes a time in seconds and returns a string hh:mm:ss\n '''\n # Round to the nearest second.\n elapsed_rounded = int(round((elapsed)))\n \n # Format as hh:mm:ss\n return str(datetime.timedelta(seconds=elapsed_rounded))\n\n\n\ndef train(rank, world_size):\n \n f = open('processed_data.pckl', 'rb')\n test = pickle.load(f)\n f.close()\n\n\n input_ids = test[0]\n attention_masks = test[1]\n labels = test[2]\n\n # Combine the training inputs into a TensorDataset.\n dataset = TensorDataset(input_ids, attention_masks, labels)\n\n # Create a 90-10 train-validation split.\n\n # Calculate the number of samples to include in each set.\n train_size = int(0.9 * len(dataset))\n val_size = len(dataset) - train_size\n\n # Divide the dataset by randomly selecting samples.\n train_dataset, val_dataset = random_split(dataset, [train_size, val_size])\n\n print('{:>5,} training samples'.format(train_size))\n print('{:>5,} validation samples'.format(val_size))\n\n # The DataLoader needs to know our batch size for training, so we specify it \n # here. For fine-tuning BERT on a specific task, the authors recommend a batch \n # size of 16 or 32.\n batch_size = 32\n\n # Create the DataLoaders for our training and validation sets.\n # We'll take training samples in random order. \n train_dataloader = DataLoader(\n train_dataset, # The training samples.\n sampler = RandomSampler(train_dataset), # Select batches randomly\n batch_size = batch_size # Trains with this batch size.\n )\n\n # For validation the order doesn't matter, so we'll just read them sequentially.\n validation_dataloader = DataLoader(\n val_dataset, # The validation samples.\n sampler = SequentialSampler(val_dataset), # Pull out batches sequentially.\n batch_size = batch_size # Evaluate with this batch size.\n )\n # This training code is based on the `run_glue.py` script here:\n # https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128\n\n # Set the seed value all over the place to make this reproducible.\n dist.init_process_group(\"gloo\", rank=rank, world_size=world_size)\n # create local model\n custom=True\n if custom:\n model = SARCBertClassifier.from_pretrained(\n \"bert-base-uncased\", # Use the 12-layer BERT model, with an uncased vocab.\n num_labels = 2, # The number of output labels--2 for binary classification.\n output_attentions = False, # Whether the model returns attentions weights.\n output_hidden_states = False, # Whether the model returns all hidden-states.\n )\n\n model.update_network_sarc(0,rank,freeze_bert_layers=False)\n else:\n model = BertForSequenceClassification.from_pretrained(\n \"bert-base-uncased\", # Use the 12-layer BERT model, with an uncased vocab.\n num_labels = 2, # The number of output labels--2 for binary classification. \n output_attentions = False, # Whether the model returns attentions weights.\n output_hidden_states = False, # Whether the model returns all hidden-states.\n )\n \n model.to(rank)\n # construct DDP model\n model = DDP(model, device_ids=[rank])\n # Note: AdamW is a class from the huggingface library (as opposed to pytorch) \n # I believe the 'W' stands for 'Weight Decay fix\"\n optimizer = AdamW(model.parameters(),\n lr = 2e-5, # args.learning_rate - default is 5e-5, our notebook had 2e-5\n eps = 1e-8 # args.adam_epsilon - default is 1e-8.\n )\n\n # Number of training epochs. The BERT authors recommend between 2 and 4. \n # We chose to run for 4, but we'll see later that this may be over-fitting the\n # training data.\n epochs = 4\n\n # Total number of training steps is [number of batches] x [number of epochs]. \n # (Note that this is not the same as the number of training samples).\n total_steps = len(train_dataloader) * epochs\n\n # Create the learning rate scheduler.\n scheduler = get_linear_schedule_with_warmup(optimizer, \n num_warmup_steps = 0, # Default value in run_glue.py\n num_training_steps = total_steps)\n seed_val = 42\n\n random.seed(seed_val)\n np.random.seed(seed_val)\n torch.manual_seed(seed_val)\n torch.cuda.manual_seed_all(seed_val)\n\n # We'll store a number of quantities such as training and validation loss, \n # validation accuracy, and timings.\n training_stats_custom = []\n\n # Measure the total training time for the whole run.\n total_t0 = time.time()\n\n # For each epoch...\n for epoch_i in range(0, epochs):\n \n # ========================================\n # Training\n # ========================================\n \n # Perform one full pass over the training set.\n\n print(\"\")\n print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))\n print('Training...')\n\n # Measure how long the training epoch takes.\n t0 = time.time()\n\n # Reset the total loss for this epoch.\n total_train_loss = 0\n\n # Put the model into training mode. Don't be mislead--the call to \n # `train` just changes the *mode*, it doesn't *perform* the training.\n # `dropout` and `batchnorm` layers behave differently during training\n # vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch)\n model.train()\n \n total_training_loss = 0\n total_correct_predictions, total_predictions = 0, 0\n generator_tqdm = tqdm(train_dataloader)\n \n # For each batch of training data...\n for step, batch in enumerate(generator_tqdm):\n\n # Progress update every 40 batches.\n b_input_ids = batch[0].to(rank)\n b_input_mask = batch[1].to(rank)\n b_labels = batch[2].to(rank)\n \n model.zero_grad() \n\n result = model(b_input_ids, \n token_type_ids=None, \n attention_mask=b_input_mask, \n labels=b_labels,\n return_dict=True)\n\n loss = result.loss\n logits = result.logits\n\n total_train_loss += loss.mean()\n\n # Perform a backward pass to calculate the gradients.\n loss.mean().backward()\n\n # Clip the norm of the gradients to 1.0.\n # This is to help prevent the \"exploding gradients\" problem.\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n\n # Update parameters and take a step using the computed gradient.\n # The optimizer dictates the \"update rule\"--how the parameters are\n # modified based on their gradients, the learning rate, etc.\n optimizer.step()\n\n # Update the learning rate.\n scheduler.step()\n \n batch_predictions = np.argmax(nn.Softmax(dim=1)(logits).detach().cpu().numpy(), axis=-1)\n total_correct_predictions += (batch_predictions == b_labels.detach().cpu().numpy()).sum()\n total_predictions += b_labels.shape[0]\n \n description = (\"Average training loss: %.2f Accuracy: %.2f Lable sum: %2f\"\n % (total_train_loss/(step+1), total_correct_predictions/total_predictions,batch_predictions.sum()))\n generator_tqdm.set_description(description, refresh=False)\n break\n\n # Calculate the average loss over all of the batches.\n avg_train_loss = total_train_loss / len(train_dataloader) \n \n # Measure how long this epoch took.\n training_time = format_time(time.time() - t0)\n\n CHECKPOINT_PATH = \"./model.checkpoint\"\n if rank == 0:\n # All processes should see same parameters as they all start from same\n # random parameters and gradients are synchronized in backward passes.\n # Therefore, saving it in one process is sufficient.\n torch.save(model.state_dict(), CHECKPOINT_PATH)\n \n print(\"\")\n print(\" Average training loss: {0:.2f}\".format(avg_train_loss))\n print(\" Training epcoh took: {:}\".format(training_time))\n if(rank==0):\n # ========================================\n # Validation\n # ========================================\n # After the completion of each training epoch, measure our performance on\n # our validation set.\n\n print(\"\")\n print(\"Running Validation...\")\n\n t0 = time.time()\n\n # Put the model in evaluation mode--the dropout layers behave differently\n # during evaluation.\n model.eval()\n\n # Tracking variables \n total_eval_accuracy = 0\n total_eval_loss = 0\n nb_eval_steps = 0\n print(\"Evaluation In Progress\")\n # Evaluate data for one epoch\n \n for batch in tqdm(validation_dataloader):\n b_input_ids = batch[0].to(rank)\n b_input_mask = batch[1].to(rank)\n b_labels = batch[2].to(rank)\n with torch.no_grad():\n result = model(b_input_ids, \n token_type_ids=None, \n attention_mask=b_input_mask,\n labels=b_labels,\n return_dict=True)\n\n # Get the loss and \"logits\" output by the model. The \"logits\" are the \n # output values prior to applying an activation function like the \n # softmax.\n loss = result.loss\n logits = result.logits\n \n # Accumulate the validation loss.\n total_eval_loss += loss.mean()\n \n # Move logits and labels to CPU\n logits = logits.detach().cpu().numpy()\n label_ids = b_labels.to('cpu').numpy()\n\n # Calculate the accuracy for this batch of test sentences, and\n # accumulate it over all batches.\n total_eval_accuracy += flat_accuracy(logits, label_ids)\n \n\n # Report the final accuracy for this validation run.\n avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)\n print(\" Accuracy: {0:.2f}\".format(avg_val_accuracy))\n\n # Calculate the average loss over all of the batches.\n avg_val_loss = total_eval_loss / len(validation_dataloader)\n \n # Measure how long the validation run took.\n validation_time = format_time(time.time() - t0)\n \n print(\" Validation Loss: {0:.2f}\".format(avg_val_loss))\n print(\" Validation took: {:}\".format(validation_time))\n\n # Record all statistics from this epoch.\n training_stats_custom.append(\n {\n 'epoch': epoch_i + 1,\n 'Training Loss': avg_train_loss,\n 'Valid. Loss': avg_val_loss,\n 'Valid. Accur.': avg_val_accuracy,\n 'Training Time': training_time,\n 'Validation Time': validation_time\n }\n )\n \n if(rank==0):\n print(\"\")\n print(\"Training complete!\")\n\n print(\"Total training took {:} (h:mm:ss)\".format(format_time(time.time()-total_t0)))\n # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n\n output_dir = './model_save_DDP/'\n\n # Create output directory if needed\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n print(\"Saving model to %s\" % output_dir)\n\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n # Good practice: save your training arguments together with the trained model\n # torch.save(args, os.path.join(output_dir, 'training_args.bin'))\n\n\ndef main():\n world_size = 8\n mp.spawn(train,args=(world_size,),nprocs=world_size,join=True)\n\n\nif __name__==\"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.6774942874908447,
"alphanum_fraction": 0.7015141844749451,
"avg_line_length": 50.069915771484375,
"blob_id": "f2cc2e896228c69bfba74a0bd471ee77a88d1719",
"content_id": "0abf9bccadab157a2cec0f64cf10bcd1a41e7c1e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "reStructuredText",
"length_bytes": 24138,
"license_type": "permissive",
"max_line_length": 184,
"num_lines": 472,
"path": "/transformers/docs/source/quicktour.rst",
"repo_name": "PradeepNalluri/Prefix-Tuning-Bert",
"src_encoding": "UTF-8",
"text": ".. \n Copyright 2020 The HuggingFace Team. All rights reserved.\n\n Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on\n an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n specific language governing permissions and limitations under the License.\n\nQuick tour\n=======================================================================================================================\n\nLet's have a quick look at the 🤗 Transformers library features. The library downloads pretrained models for Natural\nLanguage Understanding (NLU) tasks, such as analyzing the sentiment of a text, and Natural Language Generation (NLG),\nsuch as completing a prompt with new text or translating in another language.\n\nFirst we will see how to easily leverage the pipeline API to quickly use those pretrained models at inference. Then, we\nwill dig a little bit more and see how the library gives you access to those models and helps you preprocess your data.\n\n.. note::\n\n All code examples presented in the documentation have a switch on the top left for Pytorch versus TensorFlow. If\n not, the code is expected to work for both backends without any change needed.\n\nGetting started on a task with a pipeline\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThe easiest way to use a pretrained model on a given task is to use :func:`~transformers.pipeline`.\n\n.. raw:: html\n\n <iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/tiZFewofSLM\" title=\"YouTube video player\"\n frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope;\n picture-in-picture\" allowfullscreen></iframe>\n\n🤗 Transformers provides the following tasks out of the box:\n\n- Sentiment analysis: is a text positive or negative?\n- Text generation (in English): provide a prompt and the model will generate what follows.\n- Name entity recognition (NER): in an input sentence, label each word with the entity it represents (person, place,\n etc.)\n- Question answering: provide the model with some context and a question, extract the answer from the context.\n- Filling masked text: given a text with masked words (e.g., replaced by ``[MASK]``), fill the blanks.\n- Summarization: generate a summary of a long text.\n- Translation: translate a text in another language.\n- Feature extraction: return a tensor representation of the text.\n\nLet's see how this work for sentiment analysis (the other tasks are all covered in the :doc:`task summary\n</task_summary>`):\n\nInstall the following dependencies (if not already installed):\n\n.. code-block:: bash\n\n ## PYTORCH CODE\n pip install torch\n ## TENSORFLOW CODE\n pip install tensorflow\n\n.. code-block::\n\n >>> from transformers import pipeline\n >>> classifier = pipeline('sentiment-analysis')\n\nWhen typing this command for the first time, a pretrained model and its tokenizer are downloaded and cached. We will\nlook at both later on, but as an introduction the tokenizer's job is to preprocess the text for the model, which is\nthen responsible for making predictions. The pipeline groups all of that together, and post-process the predictions to\nmake them readable. For instance:\n\n\n.. code-block::\n\n >>> classifier('We are very happy to show you the 🤗 Transformers library.')\n [{'label': 'POSITIVE', 'score': 0.9998}]\n\nThat's encouraging! You can use it on a list of sentences, which will be preprocessed then fed to the model, returning\na list of dictionaries like this one:\n\n.. code-block::\n\n >>> results = classifier([\"We are very happy to show you the 🤗 Transformers library.\",\n ... \"We hope you don't hate it.\"])\n >>> for result in results:\n ... print(f\"label: {result['label']}, with score: {round(result['score'], 4)}\")\n label: POSITIVE, with score: 0.9998\n label: NEGATIVE, with score: 0.5309\n\nTo use with a large dataset, look at :doc:`iterating over a pipeline <./main_classes/pipelines>`\n\nYou can see the second sentence has been classified as negative (it needs to be positive or negative) but its score is\nfairly neutral.\n\nBy default, the model downloaded for this pipeline is called \"distilbert-base-uncased-finetuned-sst-2-english\". We can\nlook at its `model page <https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english>`__ to get more\ninformation about it. It uses the :doc:`DistilBERT architecture </model_doc/distilbert>` and has been fine-tuned on a\ndataset called SST-2 for the sentiment analysis task.\n\nLet's say we want to use another model; for instance, one that has been trained on French data. We can search through\nthe `model hub <https://huggingface.co/models>`__ that gathers models pretrained on a lot of data by research labs, but\nalso community models (usually fine-tuned versions of those big models on a specific dataset). Applying the tags\n\"French\" and \"text-classification\" gives back a suggestion \"nlptown/bert-base-multilingual-uncased-sentiment\". Let's\nsee how we can use it.\n\nYou can directly pass the name of the model to use to :func:`~transformers.pipeline`:\n\n.. code-block::\n\n >>> classifier = pipeline('sentiment-analysis', model=\"nlptown/bert-base-multilingual-uncased-sentiment\")\n\nThis classifier can now deal with texts in English, French, but also Dutch, German, Italian and Spanish! You can also\nreplace that name by a local folder where you have saved a pretrained model (see below). You can also pass a model\nobject and its associated tokenizer.\n\nWe will need two classes for this. The first is :class:`~transformers.AutoTokenizer`, which we will use to download the\ntokenizer associated to the model we picked and instantiate it. The second is\n:class:`~transformers.AutoModelForSequenceClassification` (or\n:class:`~transformers.TFAutoModelForSequenceClassification` if you are using TensorFlow), which we will use to download\nthe model itself. Note that if we were using the library on an other task, the class of the model would change. The\n:doc:`task summary </task_summary>` tutorial summarizes which class is used for which task.\n\n.. code-block::\n\n >>> ## PYTORCH CODE\n >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification\n >>> ## TENSORFLOW CODE\n >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification\n\nNow, to download the models and tokenizer we found previously, we just have to use the\n:func:`~transformers.AutoModelForSequenceClassification.from_pretrained` method (feel free to replace ``model_name`` by\nany other model from the model hub):\n\n.. code-block::\n\n >>> ## PYTORCH CODE\n >>> model_name = \"nlptown/bert-base-multilingual-uncased-sentiment\"\n >>> model = AutoModelForSequenceClassification.from_pretrained(model_name)\n >>> tokenizer = AutoTokenizer.from_pretrained(model_name)\n >>> classifier = pipeline('sentiment-analysis', model=model, tokenizer=tokenizer)\n >>> ## TENSORFLOW CODE\n >>> model_name = \"nlptown/bert-base-multilingual-uncased-sentiment\"\n >>> # This model only exists in PyTorch, so we use the `from_pt` flag to import that model in TensorFlow.\n >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name, from_pt=True)\n >>> tokenizer = AutoTokenizer.from_pretrained(model_name)\n >>> classifier = pipeline('sentiment-analysis', model=model, tokenizer=tokenizer)\n\nIf you don't find a model that has been pretrained on some data similar to yours, you will need to fine-tune a\npretrained model on your data. We provide :doc:`example scripts </examples>` to do so. Once you're done, don't forget\nto share your fine-tuned model on the hub with the community, using :doc:`this tutorial </model_sharing>`.\n\n.. _pretrained-model:\n\nUnder the hood: pretrained models\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nLet's now see what happens beneath the hood when using those pipelines.\n\n.. raw:: html\n\n <iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/AhChOFRegn4\" title=\"YouTube video player\"\n frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope;\n picture-in-picture\" allowfullscreen></iframe>\n\nAs we saw, the model and tokenizer are created using the :obj:`from_pretrained` method:\n\n.. code-block::\n\n >>> ## PYTORCH CODE\n >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification\n >>> model_name = \"distilbert-base-uncased-finetuned-sst-2-english\"\n >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name)\n >>> tokenizer = AutoTokenizer.from_pretrained(model_name)\n >>> ## TENSORFLOW CODE\n >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification\n >>> model_name = \"distilbert-base-uncased-finetuned-sst-2-english\"\n >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name)\n >>> tokenizer = AutoTokenizer.from_pretrained(model_name)\n\nUsing the tokenizer\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nWe mentioned the tokenizer is responsible for the preprocessing of your texts. First, it will split a given text in\nwords (or part of words, punctuation symbols, etc.) usually called `tokens`. There are multiple rules that can govern\nthat process (you can learn more about them in the :doc:`tokenizer summary <tokenizer_summary>`), which is why we need\nto instantiate the tokenizer using the name of the model, to make sure we use the same rules as when the model was\npretrained.\n\nThe second step is to convert those `tokens` into numbers, to be able to build a tensor out of them and feed them to\nthe model. To do this, the tokenizer has a `vocab`, which is the part we download when we instantiate it with the\n:obj:`from_pretrained` method, since we need to use the same `vocab` as when the model was pretrained.\n\nTo apply these steps on a given text, we can just feed it to our tokenizer:\n\n.. code-block::\n\n >>> inputs = tokenizer(\"We are very happy to show you the 🤗 Transformers library.\")\n\nThis returns a dictionary string to list of ints. It contains the `ids of the tokens <glossary.html#input-ids>`__, as\nmentioned before, but also additional arguments that will be useful to the model. Here for instance, we also have an\n`attention mask <glossary.html#attention-mask>`__ that the model will use to have a better understanding of the\nsequence:\n\n\n.. code-block::\n\n >>> print(inputs)\n {'input_ids': [101, 2057, 2024, 2200, 3407, 2000, 2265, 2017, 1996, 100, 19081, 3075, 1012, 102],\n 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n\nYou can pass a list of sentences directly to your tokenizer. If your goal is to send them through your model as a\nbatch, you probably want to pad them all to the same length, truncate them to the maximum length the model can accept\nand get tensors back. You can specify all of that to the tokenizer:\n\n.. code-block::\n\n >>> ## PYTORCH CODE\n >>> pt_batch = tokenizer(\n ... [\"We are very happy to show you the 🤗 Transformers library.\", \"We hope you don't hate it.\"],\n ... padding=True,\n ... truncation=True,\n ... max_length=512,\n ... return_tensors=\"pt\"\n ... )\n >>> ## TENSORFLOW CODE\n >>> tf_batch = tokenizer(\n ... [\"We are very happy to show you the 🤗 Transformers library.\", \"We hope you don't hate it.\"],\n ... padding=True,\n ... truncation=True,\n ... max_length=512,\n ... return_tensors=\"tf\"\n ... )\n\nThe padding is automatically applied on the side expected by the model (in this case, on the right), with the padding\ntoken the model was pretrained with. The attention mask is also adapted to take the padding into account:\n\n.. code-block::\n\n >>> ## PYTORCH CODE\n >>> for key, value in pt_batch.items():\n ... print(f\"{key}: {value.numpy().tolist()}\")\n input_ids: [[101, 2057, 2024, 2200, 3407, 2000, 2265, 2017, 1996, 100, 19081, 3075, 1012, 102], [101, 2057, 3246, 2017, 2123, 1005, 1056, 5223, 2009, 1012, 102, 0, 0, 0]]\n attention_mask: [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]\n >>> ## TENSORFLOW CODE\n >>> for key, value in tf_batch.items():\n ... print(f\"{key}: {value.numpy().tolist()}\")\n input_ids: [[101, 2057, 2024, 2200, 3407, 2000, 2265, 2017, 1996, 100, 19081, 3075, 1012, 102], [101, 2057, 3246, 2017, 2123, 1005, 1056, 5223, 2009, 1012, 102, 0, 0, 0]]\n attention_mask: [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]\n\nYou can learn more about tokenizers :doc:`here <preprocessing>`.\n\nUsing the model\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nOnce your input has been preprocessed by the tokenizer, you can send it directly to the model. As we mentioned, it will\ncontain all the relevant information the model needs. If you're using a TensorFlow model, you can pass the dictionary\nkeys directly to tensors, for a PyTorch model, you need to unpack the dictionary by adding :obj:`**`.\n\n.. code-block::\n\n >>> ## PYTORCH CODE\n >>> pt_outputs = pt_model(**pt_batch)\n >>> ## TENSORFLOW CODE\n >>> tf_outputs = tf_model(tf_batch)\n\nIn 🤗 Transformers, all outputs are objects that contain the model's final activations along with other metadata. These\nobjects are described in greater detail :doc:`here <main_classes/output>`. For now, let's inspect the output ourselves:\n\n.. code-block::\n\n >>> ## PYTORCH CODE\n >>> print(pt_outputs)\n SequenceClassifierOutput(loss=None, logits=tensor([[-4.0833, 4.3364],\n [ 0.0818, -0.0418]], grad_fn=<AddmmBackward>), hidden_states=None, attentions=None)\n >>> ## TENSORFLOW CODE\n >>> print(tf_outputs)\n TFSequenceClassifierOutput(loss=None, logits=<tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n array([[-4.0833 , 4.3364 ],\n [ 0.0818, -0.0418]], dtype=float32)>, hidden_states=None, attentions=None)\n\nNotice how the output object has a ``logits`` attribute. You can use this to access the model's final activations.\n\n.. note::\n\n All 🤗 Transformers models (PyTorch or TensorFlow) return the activations of the model *before* the final activation\n function (like SoftMax) since this final activation function is often fused with the loss.\n\nLet's apply the SoftMax activation to get predictions.\n\n.. code-block::\n\n >>> ## PYTORCH CODE\n >>> from torch import nn\n >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1)\n >>> ## TENSORFLOW CODE\n >>> import tensorflow as tf\n >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1)\n\nWe can see we get the numbers from before:\n\n.. code-block::\n\n >>> ## TENSORFLOW CODE\n >>> print(tf_predictions)\n tf.Tensor(\n [[2.2043e-04 9.9978e-01]\n [5.3086e-01 4.6914e-01]], shape=(2, 2), dtype=float32)\n >>> ## PYTORCH CODE\n >>> print(pt_predictions)\n tensor([[2.2043e-04, 9.9978e-01],\n [5.3086e-01, 4.6914e-01]], grad_fn=<SoftmaxBackward>)\n\nIf you provide the model with labels in addition to inputs, the model output object will also contain a ``loss``\nattribute:\n\n.. code-block::\n\n >>> ## PYTORCH CODE\n >>> import torch\n >>> pt_outputs = pt_model(**pt_batch, labels = torch.tensor([1, 0]))\n >>> print(pt_outputs)\n SequenceClassifierOutput(loss=tensor(0.3167, grad_fn=<NllLossBackward>), logits=tensor([[-4.0833, 4.3364],\n [ 0.0818, -0.0418]], grad_fn=<AddmmBackward>), hidden_states=None, attentions=None)\n >>> ## TENSORFLOW CODE\n >>> import tensorflow as tf\n >>> tf_outputs = tf_model(tf_batch, labels = tf.constant([1, 0]))\n >>> print(tf_outputs)\n TFSequenceClassifierOutput(loss=<tf.Tensor: shape=(2,), dtype=float32, numpy=array([2.2051e-04, 6.3326e-01], dtype=float32)>, logits=<tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n array([[-4.0833 , 4.3364 ],\n [ 0.0818, -0.0418]], dtype=float32)>, hidden_states=None, attentions=None)\n\nModels are standard `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ or `tf.keras.Model\n<https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ so you can use them in your usual training loop. 🤗\nTransformers also provides a :class:`~transformers.Trainer` (or :class:`~transformers.TFTrainer` if you are using\nTensorFlow) class to help with your training (taking care of things such as distributed training, mixed precision,\netc.). See the :doc:`training tutorial <training>` for more details.\n\n.. note::\n\n Pytorch model outputs are special dataclasses so that you can get autocompletion for their attributes in an IDE.\n They also behave like a tuple or a dictionary (e.g., you can index with an integer, a slice or a string) in which\n case the attributes not set (that have :obj:`None` values) are ignored.\n\nOnce your model is fine-tuned, you can save it with its tokenizer in the following way:\n\n.. code-block::\n\n >>> ## PYTORCH CODE\n >>> pt_save_directory = './pt_save_pretrained'\n >>> tokenizer.save_pretrained(pt_save_directory)\n >>> pt_model.save_pretrained(pt_save_directory)\n >>> ## TENSORFLOW CODE\n >>> tf_save_directory = './tf_save_pretrained'\n >>> tokenizer.save_pretrained(tf_save_directory)\n >>> tf_model.save_pretrained(tf_save_directory)\n\nYou can then load this model back using the :func:`~transformers.AutoModel.from_pretrained` method by passing the\ndirectory name instead of the model name. One cool feature of 🤗 Transformers is that you can easily switch between\nPyTorch and TensorFlow: any model saved as before can be loaded back either in PyTorch or TensorFlow.\n\n\nIf you would like to load your saved model in the other framework, first make sure it is installed:\n\n.. code-block:: bash\n\n ## PYTORCH CODE\n pip install tensorflow\n ## TENSORFLOW CODE\n pip install torch\n\nThen, use the corresponding Auto class to load it like this:\n\n.. code-block::\n\n ## PYTORCH CODE\n >>> from transformers import TFAutoModel\n >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory)\n >>> tf_model = TFAutoModel.from_pretrained(pt_save_directory, from_pt=True)\n ## TENSORFLOW CODE\n >>> from transformers import AutoModel\n >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory)\n >>> pt_model = AutoModel.from_pretrained(tf_save_directory, from_tf=True)\n\n\nLastly, you can also ask the model to return all hidden states and all attention weights if you need them:\n\n\n.. code-block::\n\n >>> ## PYTORCH CODE\n >>> pt_outputs = pt_model(**pt_batch, output_hidden_states=True, output_attentions=True)\n >>> all_hidden_states = pt_outputs.hidden_states \n >>> all_attentions = pt_outputs.attentions\n >>> ## TENSORFLOW CODE\n >>> tf_outputs = tf_model(tf_batch, output_hidden_states=True, output_attentions=True)\n >>> all_hidden_states = tf_outputs.hidden_states\n >>> all_attentions = tf_outputs.attentions\n\nAccessing the code\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe :obj:`AutoModel` and :obj:`AutoTokenizer` classes are just shortcuts that will automatically work with any\npretrained model. Behind the scenes, the library has one model class per combination of architecture plus class, so the\ncode is easy to access and tweak if you need to.\n\nIn our previous example, the model was called \"distilbert-base-uncased-finetuned-sst-2-english\", which means it's using\nthe :doc:`DistilBERT </model_doc/distilbert>` architecture. As\n:class:`~transformers.AutoModelForSequenceClassification` (or\n:class:`~transformers.TFAutoModelForSequenceClassification` if you are using TensorFlow) was used, the model\nautomatically created is then a :class:`~transformers.DistilBertForSequenceClassification`. You can look at its\ndocumentation for all details relevant to that specific model, or browse the source code. This is how you would\ndirectly instantiate model and tokenizer without the auto magic:\n\n.. code-block::\n\n >>> ## PYTORCH CODE\n >>> from transformers import DistilBertTokenizer, DistilBertForSequenceClassification\n >>> model_name = \"distilbert-base-uncased-finetuned-sst-2-english\"\n >>> model = DistilBertForSequenceClassification.from_pretrained(model_name)\n >>> tokenizer = DistilBertTokenizer.from_pretrained(model_name)\n >>> ## TENSORFLOW CODE\n >>> from transformers import DistilBertTokenizer, TFDistilBertForSequenceClassification\n >>> model_name = \"distilbert-base-uncased-finetuned-sst-2-english\"\n >>> model = TFDistilBertForSequenceClassification.from_pretrained(model_name)\n >>> tokenizer = DistilBertTokenizer.from_pretrained(model_name)\n\nCustomizing the model\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nIf you want to change how the model itself is built, you can define a custom configuration class. Each architecture\ncomes with its own relevant configuration. For example, :class:`~transformers.DistilBertConfig` allows you to specify\nparameters such as the hidden dimension, dropout rate, etc for DistilBERT. If you do core modifications, like changing\nthe hidden size, you won't be able to use a pretrained model anymore and will need to train from scratch. You would\nthen instantiate the model directly from this configuration.\n\nBelow, we load a predefined vocabulary for a tokenizer with the\n:func:`~transformers.DistilBertTokenizer.from_pretrained` method. However, unlike the tokenizer, we wish to initialize\nthe model from scratch. Therefore, we instantiate the model from a configuration instead of using the\n:func:`~transformers.DistilBertForSequenceClassification.from_pretrained` method.\n\n.. code-block::\n\n >>> ## PYTORCH CODE\n >>> from transformers import DistilBertConfig, DistilBertTokenizer, DistilBertForSequenceClassification\n >>> config = DistilBertConfig(n_heads=8, dim=512, hidden_dim=4*512)\n >>> tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')\n >>> model = DistilBertForSequenceClassification(config)\n >>> ## TENSORFLOW CODE\n >>> from transformers import DistilBertConfig, DistilBertTokenizer, TFDistilBertForSequenceClassification\n >>> config = DistilBertConfig(n_heads=8, dim=512, hidden_dim=4*512)\n >>> tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')\n >>> model = TFDistilBertForSequenceClassification(config)\n\nFor something that only changes the head of the model (for instance, the number of labels), you can still use a\npretrained model for the body. For instance, let's define a classifier for 10 different labels using a pretrained body.\nInstead of creating a new configuration with all the default values just to change the number of labels, we can instead\npass any argument a configuration would take to the :func:`from_pretrained` method and it will update the default\nconfiguration appropriately:\n\n.. code-block::\n\n >>> ## PYTORCH CODE\n >>> from transformers import DistilBertConfig, DistilBertTokenizer, DistilBertForSequenceClassification\n >>> model_name = \"distilbert-base-uncased\"\n >>> model = DistilBertForSequenceClassification.from_pretrained(model_name, num_labels=10)\n >>> tokenizer = DistilBertTokenizer.from_pretrained(model_name)\n >>> ## TENSORFLOW CODE\n >>> from transformers import DistilBertConfig, DistilBertTokenizer, TFDistilBertForSequenceClassification\n >>> model_name = \"distilbert-base-uncased\"\n >>> model = TFDistilBertForSequenceClassification.from_pretrained(model_name, num_labels=10)\n >>> tokenizer = DistilBertTokenizer.from_pretrained(model_name)\n"
},
{
"alpha_fraction": 0.6155766248703003,
"alphanum_fraction": 0.6210684180259705,
"avg_line_length": 48.46296310424805,
"blob_id": "d26b969c8acbb796ba1b78f5b62afbb54c92d0ed",
"content_id": "693f303593d301c706c1a826fc0b7343ae0c5eaa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8012,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 162,
"path": "/SARCBertClassifier.py",
"repo_name": "PradeepNalluri/Prefix-Tuning-Bert",
"src_encoding": "UTF-8",
"text": "import transformers\nimport torch\nfrom transformers.modeling_outputs import SequenceClassifierOutput\nimport torch.nn as nn\nfrom transformers import BertTokenizer,BertForSequenceClassification,BertModel\n\nfrom torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss\n\nclass SARCBertClassifier(BertForSequenceClassification):\n \"\"\"\n Classifier to handle classification task on SARC dataset\n \"\"\"\n def __init__(self,config):\n super(SARCBertClassifier, self).__init__(config)\n# self.mlp_layer=None\n# self.prefix_embeddings =None\n self.run_device = None\n def update_network_sarc(self,num_layers,device,freeze_bert_layers=False,custom_embedding=False,custom_embedding_vector=None,add_user_information=False):\n \"\"\"\n Update the network architecture all the variable are class variables from source code of BerforSequenceClassification\n transformer module\n \"\"\"\n config=self.config\n if(freeze_bert_layers):\n for name,param in self.bert.named_parameters():\n if(name!=\"embeddings.prefix_embeddings.weight\"):\n param.requires_grad = False\n self.prefix_embeddings = nn.Embedding(config.prefix_length, config.hidden_size)\n self.prefix_length = config.prefix_length\n self.mlp_layer = nn.Sequential(nn.Linear(config.hidden_size, config.hidden_size),\n nn.Tanh(),\n nn.Linear(config.hidden_size,config.hidden_size))\n \n if(add_user_information):\n self.classifier = nn.Linear(config.hidden_size+2, config.num_labels)\n \n if(custom_embedding):\n self.prefix_length = config.prefix_length\n self.mlp_layer = nn.Sequential(nn.Linear(config.hidden_size, config.hidden_size),\n nn.Tanh(),\n nn.Linear(config.hidden_size,config.hidden_size))\n\n self.init_weights()\n \n custom_embedding_vector = custom_embedding_vector.expand(config.prefix_length,custom_embedding_vector.shape[0])\n self.prefix_embeddings=nn.Embedding.from_pretrained(custom_embedding_vector)\n else:\n self.prefix_embeddings = nn.Embedding(config.prefix_length, config.hidden_size)\n self.prefix_length = config.prefix_length\n self.mlp_layer = nn.Sequential(nn.Linear(config.hidden_size, config.hidden_size),\n nn.Tanh(),\n nn.Linear(config.hidden_size,config.hidden_size))\n self.init_weights()\n self.run_device = device\n \n def check_closest_matching_bert_model(self):\n prefix_tokens = self.prefix_embeddings(torch.LongTensor(torch.arange(1)).to(self.run_device)).detach()\n bert_base = self.bert.embeddings.word_embeddings(torch.LongTensor(torch.arange(30522)).to(self.run_device)).detach()\n closest_words_ids = [] \n for embd in prefix_tokens:\n closest_words_ids.append(torch.norm(bert_base - embd.unsqueeze(0), dim=1).topk(5).indices)\n tokenizer = BertTokenizer.from_pretrained(\"./prefix_tuning_model_random_initializations_prefix_tuninglr_2e-5/\")\n closest_words_ids=torch.stack(closest_words_ids)\n closest = {}\n for idx,t in enumerate(closest_words_ids):\n word_l = []\n for tok in t:\n word_l.append(tokenizer._convert_id_to_token(int(tok)))\n closest[idx]=word_l\n return closest\n \n def closest_matching_bert_model(self):\n prefix_tokens = self.prefix_embeddings(torch.LongTensor(torch.arange(self.prefix_embeddings.weight.shape[0])).to(self.run_device)).detach()\n bert_base = self.bert.embeddings.word_embeddings(torch.LongTensor(torch.arange(30522)).to(self.run_device)).detach()\n closest_words_ids = [] \n for embd in prefix_tokens:\n closest_words_ids.append(torch.norm(bert_base - embd.unsqueeze(0), dim=1).topk(5).indices)\n tokenizer = BertTokenizer.from_pretrained(\"./prefix_tuning_model_random_initializations_prefix_tuninglr_2e-5/\")\n closest_words_ids=torch.stack(closest_words_ids)\n closest = {}\n for idx,t in enumerate(closest_words_ids):\n word_l = []\n for tok in t:\n word_l.append(tokenizer._convert_id_to_token(int(tok)))\n closest[idx]=word_l\n return closest\n \n def forward(self,input_ids=None,attention_mask=None,token_type_ids=None,position_ids=None,\n head_mask=None,inputs_embeds=None,labels=None,output_attentions=None,output_hidden_states=None,return_dict=None,\n user_information=False):\n r\"\"\"\n\n FROM CORE HUGGINGFACE MODULE \n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n prefix_embds = self.prefix_embeddings(torch.arange(0, self.prefix_length).to(self.run_device))\n prefix_embds = self.mlp_layer(prefix_embds)\n prefix_embds = prefix_embds.expand(len(input_ids),prefix_embds.shape[0],prefix_embds.shape[1])\n attention_mask = torch.cat((torch.ones(self.prefix_length).to(self.run_device).expand(attention_mask.shape[0],self.prefix_length),attention_mask),1)\n \n# if(user_information):\n# attention_mask = attention_mask[:,2:] \n# user_ids = input_ids[:,:2]\n# input_ids = input_ids[:,2:].to(self.run_device).long()\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n prefix_embeddings=prefix_embds,\n )\n\n pooled_output = outputs[1]\n \n pooled_output = self.dropout(pooled_output)\n \n# if(user_information):\n# pooled_output = torch.cat((user_ids,pooled_output),dim=1)\n \n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )"
},
{
"alpha_fraction": 0.6129099726676941,
"alphanum_fraction": 0.6182135343551636,
"avg_line_length": 34.1225471496582,
"blob_id": "7f5f175d01dcfc66655e010dd0da4ea3ccb8dd80",
"content_id": "a0258422f3db9827adb41c88ed693a8166e15834",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14330,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 408,
"path": "/baselines/baseline_code.py",
"repo_name": "PradeepNalluri/Prefix-Tuning-Bert",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport pandas as pd \nfrom pandas import DataFrame\nimport torch\nimport torch.nn as nn\nimport multiprocessing as mp\nfrom torch.utils.data import TensorDataset, random_split\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nfrom transformers import BertTokenizer\nfrom transformers import BertForSequenceClassification, AdamW, BertConfig\nfrom transformers import get_linear_schedule_with_warmup\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import confusion_matrix\nfrom SARCBertClassifier import SARCBertClassifier\nfrom keras.preprocessing.sequence import pad_sequences\nfrom string import ascii_uppercase\nfrom tqdm import tqdm\nimport seaborn as sn\nimport time\nimport datetime\nimport pickle\nimport os\nimport random\nimport itertools\nimport json\nimport argparse\n\ndef parallelize(function_pointer,list_to_parallelize,NUM_CORE=2*mp.cpu_count()):\n '''\n Prallel apply the given function to the list the numeber of process will \n be twice the number of cpu cores by default \n '''\n start=time.time()\n component_list=np.array_split(list_to_parallelize,NUM_CORE*10)\n pool = mp.Pool(NUM_CORE)\n results = pool.map(function_pointer,component_list)\n pool.close()\n pool.join()\n end=time.time()\n print(\"Executed in:\",end-start)\n return results\n\ndef find_max_length(sentences):\n \"\"\"\n Find the max length of the senteces\n \"\"\"\n max_len = 0\n for _,row in sentences.iterrows():\n sent=row[\"comment\"]\n try:\n train_inputs_ids = tokenizer.encode(sent, add_special_tokens=True)\n except:\n train_inputs_ids = tokenizer.encode(\"\", add_special_tokens=True)\n max_len = max(max_len, len(train_inputs_ids))\n return max_len\n\ndef compute_accuracy(preds, labels):\n pred_flat = np.argmax(preds, axis=1).flatten()\n labels_flat = labels.flatten()\n return np.sum(pred_flat == labels_flat) / len(labels_flat)\n\n\n\ndef format_time(elapsed):\n '''\n Takes a time in seconds and returns a string hh:mm:ss\n '''\n elapsed_rounded = int(round((elapsed)))\n \n return str(datetime.timedelta(seconds=elapsed_rounded))\n\ndef main(args):\n prepare_data = args.prepare_data\n save_processed_data = args.save_processed_data\n batch_size = args.batch_size\n custom = args.custom\n epochs = args.epochs\n learning_rate = args.learning_rate\n save_model = args.save_model\n tuning_mode = args.tuning_mode\n model_save_directory = args.tuning_mode\n\n if torch.cuda.is_available(): \n device = torch.device(\"cuda\")\n else:\n device = torch.device(\"cpu\")\n print(\"Using:\",device)\n\n if(prepare_data):\n data = pd.read_csv(\"train-balanced-sarcasm.csv\")\n training_set,test_set = train_test_split(data,stratify=data[[\"label\"]], test_size=0.1)\n del data\n\n #Storing for future use across experiments\n test_set.to_csv(\"test_set.csv\",index=False)\n\n training_set.dropna(subset=[\"comment\"],inplace=True)\n training_set.reset_index(drop=False,inplace=True)\n training_set.rename(columns={\"index\":\"id\"},inplace=True)\n sentences = training_set[[\"id\",\"comment\"]]\n labels = training_set[[\"id\",\"label\"]]\n \n max_len = max(parallelize(tokenize,sentences))\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n\n sentences=training_set.comment.values\n labels = training_set.label.values\n\n train_inputs_ids = []\n training_attention_masks = []\n\n #Tokenizing the sentences\n for sent in tqdm(sentences):\n encoded_dict = tokenizer.encode_plus(sent,add_special_tokens = True,max_length = 64,pad_to_max_length = True,\n return_attention_mask = True,return_tensors = 'pt',)\n \n train_inputs_ids.append(encoded_sentences['train_inputs_ids'])\n \n training_attention_masks.append(encoded_sentences['attention_mask'])\n\n train_inputs_ids = torch.cat(train_inputs_ids, dim=0)\n training_attention_masks = torch.cat(training_attention_masks, dim=0)\n labels = torch.tensor(labels)\n #save data for future use\n if(save_processed_data):\n f = open('processed_data.pckl', 'wb')\n pickle.dump([train_inputs_ids,training_attention_masks,labels], f)\n f.close()\n \n else:\n #lOAD THE DATA\n f = open('processed_data.pckl', 'rb')\n input_processed_data = pickle.load(f)\n f.close()\n train_inputs_ids = input_processed_data[0]\n training_attention_masks = input_processed_data[1]\n labels = input_processed_data[2]\n \n print(\"Data Preperation Done\")\n\n main_dataset = TensorDataset(train_inputs_ids, training_attention_masks, labels)\n\n train_size = int(0.9 * len(main_dataset))\n val_size = len(main_dataset) - train_size\n\n train_dataset, validation_data = random_split(main_dataset, [train_size, val_size])\n\n train_dataloader = DataLoader(train_dataset,sampler = RandomSampler(train_dataset),batch_size = batch_size,)\n\n validation_dataloader = DataLoader(validation_data,sampler = SequentialSampler(validation_data), batch_size = batch_size,)\n \n if custom:\n model = SARCBertClassifier.from_pretrained(\"bert-base-uncased\",num_labels = 2,output_attentions = False,output_hidden_states = False,)\n model.update_network_sarc(2,device,freeze_bert_layers=tuning_mode==\"light_weight\")\n else:\n model = BertForSequenceClassification.from_pretrained(\"bert-base-uncased\",num_labels = 2,output_attentions = False,output_hidden_states = False,)\n\n if(torch.cuda.device_count()>1):\n print(\"Parallelizing Model\")\n model = nn.DataParallel(model)\n model.to(device)\n model = model.cuda()\n \n print(\"Model Initialization Done\")\n\n optimizer = AdamW(model.parameters(),lr = learning_rate,eps = 1e-8)\n\n total_steps = len(train_dataloader) * epochs\n\n scheduler = get_linear_schedule_with_warmup(optimizer,num_warmup_steps = 0,num_training_steps = total_steps)\n\n print(\"Optimizer setup done\")\n\n seed_val = 42\n random.seed(seed_val)\n np.random.seed(seed_val)\n torch.manual_seed(seed_val)\n torch.cuda.manual_seed_all(seed_val)\n training_stats = []\n\n total_t0 = time.time()\n for epoch_i in range(0, epochs):\n t0 = time.time()\n\n batch_train_loss = 0\n\n model.train()\n \n total_training_loss = 0\n correct_preds, total_predictions = 0, 0\n generator_tqdm = tqdm(train_dataloader)\n \n for step, batch in enumerate(generator_tqdm):\n\n b_input_ids = batch[0].to(device)\n b_input_mask = batch[1].to(device)\n b_labels = batch[2].to(device)\n \n model.zero_grad() \n\n result = model(b_input_ids, \n token_type_ids=None, \n attention_mask=b_input_mask, \n labels=b_labels,\n return_dict=True)\n\n loss = result.loss\n logits = result.logits\n\n batch_train_loss += loss.mean()\n\n loss.mean().backward()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n\n optimizer.step()\n optimizer.zero_grad() \n\n scheduler.step()\n \n batch_predictions = np.argmax(nn.Softmax(dim=1)(logits).detach().cpu().numpy(), axis=-1)\n correct_preds += (batch_predictions == b_labels.detach().cpu().numpy()).sum()\n total_predictions += b_labels.shape[0]\n \n description = (\"Average training loss: %.2f Accuracy: %.2f Lable sum: %2f\"\n % (batch_train_loss/(step+1), correct_preds/total_predictions,batch_predictions.sum()))\n generator_tqdm.set_description(description, refresh=False)\n \n train_loss = batch_train_loss / len(train_dataloader) \n \n training_time = format_time(time.time() - t0)\n \n t0 = time.time()\n\n model.eval()\n\n total_eval_accuracy = 0\n total_eval_loss = 0\n nb_eval_steps = 0\n \n for batch in tqdm(validation_dataloader):\n b_input_ids = batch[0].to(device)\n b_input_mask = batch[1].to(device)\n b_labels = batch[2].to(device)\n with torch.no_grad():\n result = model(b_input_ids, \n token_type_ids=None, \n attention_mask=b_input_mask,\n labels=b_labels,\n return_dict=True)\n\n loss = result.loss\n logits = result.logits\n \n total_eval_loss += loss.mean()\n \n logits = logits.detach().cpu().numpy()\n label_ids = b_labels.to('cpu').numpy()\n\n total_eval_accuracy += compute_accuracy(logits, label_ids)\n \n \n\n avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)\n\n avg_val_loss = total_eval_loss / len(validation_dataloader)\n \n validation_time = format_time(time.time() - t0)\n\n training_stats.append(\n {\n 'epoch': epoch_i + 1,\n 'Training Loss': train_loss,\n 'Valid. Loss': avg_val_loss,\n 'Valid. Accur.': avg_val_accuracy,\n 'Training Time': training_time,\n 'Validation Time': validation_time\n }\n )\n \n print(\"Training Time:\".format(format_time(time.time()-total_t0)))\n\n if(not prepare_data):\n tokenizer = BertTokenizer.from_pretrained(\"./model_save_new/\")\n\n if(save_model):\n output_dir = model_save_directory\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n print(\"Saving model to %s\" % output_dir)\n\n model_to_save = model.module if hasattr(model, 'module') else model \n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n pd.set_option('precision', 2)\n df_stats = pd.DataFrame(data=training_stats)\n df_stats = df_stats.set_index('epoch')\n df_stats.to_csv(output_dir+\"/perfomance_stats.csv\",index=False)\n print(\"Models Saved\")\n \n if(not prepare_data):\n test_set = pd.read_csv(\"test_set.csv\")\n \n print(\"Started Testing\")\n \n sentences = test_set.dropna(subset=[\"comment\"]).comment.values\n labels = test_set.dropna(subset=[\"comment\"]).label.values\n\n test_inputs_ids = []\n\n for sent in sentences:\n encoded_sent = tokenizer.encode(sent,add_special_tokens = True,)\n test_inputs_ids.append(encoded_sent)\n\n test_inputs_ids = pad_sequences(test_inputs_ids, maxlen=64, \n dtype=\"long\", truncating=\"post\", padding=\"post\")\n\n test_attention_masks = []\n\n for seq in tqdm(test_inputs_ids):\n seq_mask = [float(i>0) for i in seq]\n test_attention_masks.append(seq_mask) \n\n prediction_inputs = torch.tensor(test_inputs_ids)\n prediction_masks = torch.tensor(test_attention_masks)\n prediction_labels = torch.tensor(labels)\n\n batch_size = 32 \n\n prediction_data = TensorDataset(prediction_inputs, prediction_masks, prediction_labels)\n prediction_sampler = SequentialSampler(prediction_data)\n prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size)\n\n model.eval()\n\n predictions , true_labels = [], []\n\n for batch in tqdm(prediction_dataloader):\n batch = tuple(t.to(device) for t in batch)\n\n b_input_ids, b_input_mask, b_labels = batch\n with torch.no_grad():\n outputs = model(b_input_ids,token_type_ids=None,attention_mask=b_input_mask,labels=b_labels,return_dict=True)\n\n logits = outputs.logits\n\n logits = logits.detach().cpu().numpy()\n label_ids = b_labels.to('cpu').numpy()\n\n predictions.append(logits)\n true_labels.append(label_ids)\n \n \n preds=[]\n actuals=[]\n for i in range(len(true_labels)):\n preds.append(list(np.argmax(predictions[i], axis=1).flatten()))\n actuals.append(list(true_labels[i]))\n\n preds = list(itertools.chain(*preds)) \n actuals = list(itertools.chain(*actuals))\n\n test_metrics = {}\n test_metrics['accuracy_score'] = accuracy_score(actuals,preds)*100\n test_metrics['f1_score'] = f1_score(actuals, preds, average='macro')\n\n confm = confusion_matrix(actuals, preds,normalize='true')\n columns = [\"Sarcastic\",\"Normal\"]\n df_cm = DataFrame(confm, index=columns, columns=columns)\n ax = sn.heatmap(df_cm, cmap='Oranges', annot=True)\n\n test_metrics[\"confusion_matrix\"] = df_cm.to_dict('list')\n \n # if(save_processed_data):\n with open(output_dir+'/test_metrics.json', 'w') as fp:\n json.dump(test_metrics, fp)\n \nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Train Dependency Parsing Model')\n # General training arguments\n\n parser.add_argument('--prepare_data', action=\"store_true\", default=False,\n help='if passed, will prepare data.')\n parser.add_argument('--save_processed_data', action=\"store_true\", default=False,\n help='if passed, save the processed data.')\n parser.add_argument('--batch_size', type=int, help='batch_size ', default=128)\n parser.add_argument('--custom', action=\"store_true\", default=True,\n help='if passed, use no custom.')\n parser.add_argument('--epochs', type=int, help='epochs ', default=4)\n parser.add_argument('--learning_rate', type=float, help='learning_rate ', default=0.005)\n parser.add_argument('--save_model', action=\"store_true\", default=True,\n help='if passed, save model.')\n parser.add_argument('--tuning_mode', type=str, choices=(\"light_weight\", \"fine_tune\"),\n help='tuning_mode', default=\"light_weight\")\n\n parser.add_argument('--model_save_directory', type=str,\n help='tuning_mode', default=\"temper\")\n\n args = parser.parse_args()\n\n main(args)\n"
},
{
"alpha_fraction": 0.5187997221946716,
"alphanum_fraction": 0.7093275785446167,
"avg_line_length": 16.730770111083984,
"blob_id": "e32043494fd84d7584c6a260a822b4df8903c68d",
"content_id": "2683e9c3befcd80b4bea4311d8b9c0421b600e94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 2766,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 156,
"path": "/requirements.txt",
"repo_name": "PradeepNalluri/Prefix-Tuning-Bert",
"src_encoding": "UTF-8",
"text": "absl-py==0.15.0\nargon2-cffi==21.1.0\nastunparse==1.6.3\nasync-generator==1.10\nattrs==21.2.0\nbackports-abc==0.5\nbackports.functools-lru-cache==1.6.4\nbackports.shutil-get-terminal-size==1.0.0\nbertviz==1.2.0\nbleach==4.1.0\nbokeh==2.3.3\nboto3==1.20.18\nbotocore==1.23.18\ncached-property==1.5.2\ncachetools==4.2.4\ncertifi==2021.5.30\ncffi==1.15.0\nchardet==4.0.0\ncharset-normalizer==2.0.4\nclang==5.0\nclick==7.1.2\ncloudpickle==1.6.0\ncnsenti==0.0.7\ncontextvars==2.4\ncycler==0.11.0\ndask==2021.3.0\ndataclasses==0.8\ndecorator==4.4.2\ndefusedxml==0.7.1\ndistributed==2021.3.0\nentrypoints==0.3\nenum34==1.1.10\net-xmlfile==1.0.1\nfilelock==3.3.2\nflatbuffers==1.12\nfsspec==2021.8.1\nfutures==3.1.1\ngast==0.4.0\ngoogle-auth==1.35.0\ngoogle-auth-oauthlib==0.4.6\ngoogle-pasta==0.2.0\ngraphviz==0.19\ngrpcio==1.41.1\nh5py==3.1.0\nHeapDict==1.0.1\nhiddenlayer==0.3\nhuggingface-hub==0.1.0\nidna==2.10\nimmutables==0.16\nimportlib-metadata==4.8.1\nipykernel==4.10.1\nipython==5.10.0\nipython-genutils==0.2.0\nipywidgets==7.6.5\njdcal==1.4.1\njieba==0.42.1\nJinja2==3.0.2\njmespath==0.10.0\njoblib==1.1.0\njsonschema==3.2.0\njupyter-client==7.0.6\njupyter-core==4.9.1\njupyterlab-pygments==0.1.2\njupyterlab-widgets==1.0.2\nkeras==2.6.0\nKeras-Preprocessing==1.1.2\nkiwisolver==1.1.0\nlocket==0.2.1\nMarkdown==3.3.4\nMarkupSafe==2.0.1\nmatplotlib==3.3.4\nmistune==0.8.4\nmock==4.0.3\nmsgpack==1.0.2\nnbclient==0.5.4\nnbconvert==6.0.7\nnbformat==5.1.3\nnest-asyncio==1.5.1\nnotebook==6.4.5\nnumpy==1.19.5\noauthlib==3.1.1\nopenpyxl==2.6.4\nopt-einsum==3.3.0\npackaging==21.0\npandas==0.24.2\npandocfilters==1.5.0\npartd==1.2.0\npathlib2==2.3.6\npexpect==4.8.0\npickleshare==0.7.5\nPillow==8.3.1\nprometheus-client==0.12.0\nprompt-toolkit==1.0.18\nprotobuf==3.19.1\npsutil==5.8.0\nptyprocess==0.7.0\npyasn1==0.4.8\npyasn1-modules==0.2.8\npycparser==2.20\nPygments==2.5.2\npyparsing==2.4.7\nPyphen==0.10.0\npyreadr==0.4.2\npyrsistent==0.18.0\npython-dateutil==2.8.2\npytz==2021.1\nPyYAML==6.0\npyzmq==19.0.2\nregex==2021.7.6\nrepoze.lru==0.7\nrequests==2.26.0\nrequests-oauthlib==1.3.0\nrsa==4.7.2\ns3transfer==0.5.0\nsacremoses==0.0.46\nscandir==1.10.0\nscikit-learn==0.24.2\nscipy==1.5.4\nseaborn==0.9.1\nSend2Trash==1.8.0\nsentencepiece==0.1.96\nsimplegeneric==0.8.1\nsingledispatch==3.6.2\nsix==1.15.0\nsklearn==0.0\nsortedcontainers==2.4.0\nsubprocess32==3.5.4\ntblib==1.7.0\ntensorboard==2.6.0\ntensorboard-data-server==0.6.1\ntensorboard-plugin-wit==1.8.0\ntensorflow==2.6.2\ntensorflow-estimator==2.6.0\ntermcolor==1.1.0\nterminado==0.12.1\ntestpath==0.5.0\ntextstat==0.5.7\nthreadpoolctl==3.0.0\ntokenizers==0.10.3\ntoolz==0.11.2\ntorch==1.9.0\ntorchviz==0.0.2\ntornado==6.1\ntqdm==4.62.3\ntraitlets==4.3.3\ntyping-extensions==3.7.4.3\nurllib3==1.26.6\nvaderSentiment==3.3.2\nwcwidth==0.2.5\nwebencodings==0.5.1\nWerkzeug==2.0.2\nwidgetsnbextension==3.5.2\nwrapt==1.12.1\nxlrd==1.2.0\nzict==2.0.0\nzipp==3.6.0\n"
},
{
"alpha_fraction": 0.5809327960014343,
"alphanum_fraction": 0.584133505821228,
"avg_line_length": 46.55434799194336,
"blob_id": "4baed75099c36aa284f7636b569f0aefac34bf5e",
"content_id": "f47374281208c31cef3622015402dcad91ed59b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4374,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 92,
"path": "/baselines/SARCBertClassifier.py",
"repo_name": "PradeepNalluri/Prefix-Tuning-Bert",
"src_encoding": "UTF-8",
"text": "from transformers import BertModel\nfrom transformers.modeling_outputs import SequenceClassifierOutput\nfrom torch.nn import CrossEntropyLoss\nimport torch.nn as nn\nfrom transformers import BertForSequenceClassification, AdamW, BertConfig\n\nclass SARCBertClassifier(BertForSequenceClassification):\n \"\"\"\n Classifier to handle classification task on SARC dataset\n \"\"\"\n def __init__(self,config):\n super(SARCBertClassifier, self).__init__(config)\n \n def update_network_sarc(self,num_layers,device,freeze_bert_layers=False):\n \"\"\"\n Update the network architecture all the variable are class variables from source code of BerforSequenceClassification\n transformer module\n \"\"\"\n config=self.config\n if(freeze_bert_layers):\n for param in self.bert.parameters():\n param.requires_grad = False\n self.classifier = nn.Sequential()\n for layer in range(num_layers-1):\n self.classifier.add_module(\"classification_layer_\"+str(layer+1),nn.Linear(config.hidden_size, config.hidden_size))\n self.classifier.add_module(\"activation_layer_\"+str(layer+1),nn.ReLU())\n \n self.classifier.add_module(\"output_layer\",nn.Linear(config.hidden_size, config.num_labels))\n self.classifier.to(device)\n self.init_weights()\n \n def forward(self,input_ids=None,attention_mask=None,token_type_ids=None,position_ids=None,\n head_mask=None,inputs_embeds=None,labels=None,output_attentions=None,output_hidden_states=None,return_dict=None,):\n r\"\"\"\n\n FROM CORE HUGGINGFACE MODULE \n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,\n config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.bert(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n pooled_output = outputs[1]\n\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )"
},
{
"alpha_fraction": 0.5703253149986267,
"alphanum_fraction": 0.5749512314796448,
"avg_line_length": 41.60097885131836,
"blob_id": "0d948810acd26f862aa5ed723d3169c3c4c44f33",
"content_id": "3be435db1ee065e34f8345c3213e6cd82730bcb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 26157,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 614,
"path": "/train.py",
"repo_name": "PradeepNalluri/Prefix-Tuning-Bert",
"src_encoding": "UTF-8",
"text": "from urllib import parse\nimport numpy as np \nimport pandas as pd \nfrom pandas import DataFrame\nimport torch\nimport torch.nn as nn\nimport multiprocessing as mp\nfrom torch.utils.data import TensorDataset, random_split\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nfrom transformers import BertTokenizer\nfrom transformers import BertForSequenceClassification, AdamW, BertConfig,BertModel\nfrom transformers import get_linear_schedule_with_warmup\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import confusion_matrix\nfrom SARCBertClassifier import SARCBertClassifier\nfrom keras.preprocessing.sequence import pad_sequences\nfrom string import ascii_uppercase\nfrom tqdm import tqdm\nimport seaborn as sn\nimport time\nimport datetime\nimport pickle\nimport os\nimport random\nimport itertools\nimport json\nimport argparse\n\ndef get_bert_embedding(word):\n \"\"\"\n Create embedding for phrases this can be used to initialize the prefix embeddings based on phrases\n \"\"\"\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n tokenized_res = tokenizer.encode_plus(\n word, # Sentence to encode.\n add_special_tokens = True, # Add '[CLS]' and '[SEP]'\n max_length = 5, # Pad & truncate all sentences.\n pad_to_max_length = True,\n return_attention_mask = True, # Construct attn. masks.\n return_tensors = 'pt', # Return pytorch tensors.\n )\n embed_model = BertModel.from_pretrained('bert-base-uncased')\n embed_model.eval()\n embedding = embed_model(**tokenized_res)\n embedding = embedding.last_hidden_state.detach()#[:,tokenized_res['attention_mask']] \n required_embedding = embedding[0][tokenized_res['attention_mask'][0]==1,:]\n required_embedding = required_embedding.mean(dim=0)\n del embed_model\n del embedding \n return required_embedding\n\ndef parallelize(function_pointer,list_to_parallelize,NUM_CORE=2*mp.cpu_count()):\n '''\n Prallel apply the given function to the list the numeber of process will \n be twice the number of cpu cores by default \n '''\n start=time.time()\n component_list=np.array_split(list_to_parallelize,NUM_CORE*10)\n pool = mp.Pool(NUM_CORE)\n results = pool.map(function_pointer,component_list)\n pool.close()\n pool.join()\n end=time.time()\n print(\"Executed in:\",end-start)\n return results\n\ndef find_max_length(sentences,tokenizer):\n \"\"\"\n Find the max length of the senteces\n \"\"\"\n max_len = 0\n for _,row in sentences.iterrows():\n sent=row[\"comment\"]\n try:\n train_inputs_ids = tokenizer.encode(sent, add_special_tokens=True)\n except:\n train_inputs_ids = tokenizer.encode(\"\", add_special_tokens=True)\n max_len = max(max_len, len(train_inputs_ids))\n return max_len\n\ndef compute_accuracy(preds, labels):\n pred_flat = np.argmax(preds, axis=1).flatten()\n labels_flat = labels.flatten()\n return np.sum(pred_flat == labels_flat) / len(labels_flat)\n\n\n\ndef format_time(elapsed):\n '''\n Takes a time in seconds and returns a string hh:mm:ss\n '''\n elapsed_rounded = int(round((elapsed)))\n \n return str(datetime.timedelta(seconds=elapsed_rounded))\n\ndef main(args):\n prepare_data = args.prepare_data\n save_processed_data = args.save_processed_data\n batch_size = args.batch_size\n epochs = args.epochs\n learning_rate = args.learning_rate\n save_model = args.save_model\n tuning_mode = args.tuning_mode\n model_save_directory = \"model_experiment_\"+tuning_mode+\"_batch_\"+str(batch_size)+\"_lr_\"+str(learning_rate)+\"_epoch_\"+str(epochs)+\"/\"\n prefix_tuning = True if \"prefix\" in tuning_mode else False\n use_multi_gpu = args.use_multi_gpu\n phrase_for_init = args.phrase_for_init\n checkpoint = args.checkpoint\n analyze_tokens = args.analyze_tokens\n test_file = args.test_file\n train_model = not args.evaluate\n train_data = args.train_data\n model_directory_to_use = args.saved_model_location\n\n if(prefix_tuning):\n prefix_length = args.prefix_length\n\n if torch.cuda.is_available(): \n device = torch.device(\"cuda\")\n else:\n device = torch.device(\"cpu\")\n print(\"Using:\",device)\n \n if(train_model):\n if(prepare_data):\n try:\n data = pd.read_csv(train_data)\n except:\n raise Exception(\"File not found: Make sure you download the dataset from https://www.kaggle.com/danofer/sarcasm/ The data should be kept in main folder\")\n training_set,test_set = train_test_split(data,stratify=data[[\"label\"]], test_size=0.1)\n del data\n\n #Storing for future use across experiments\n test_set.to_csv(\"test_set.csv\",index=False)\n\n training_set.dropna(subset=[\"comment\"],inplace=True)\n training_set.reset_index(drop=False,inplace=True)\n training_set.rename(columns={\"index\":\"id\"},inplace=True)\n sentences = training_set[[\"id\",\"comment\"]]\n labels = training_set[[\"id\",\"label\"]]\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n\n sentences=training_set.comment.values\n labels = training_set.label.values\n\n train_inputs_ids = []\n training_attention_masks = []\n\n #Tokenizing the sentences\n for sent in tqdm(sentences):\n encoded_sentences = tokenizer.encode_plus(sent,add_special_tokens = True,max_length = 64,pad_to_max_length = True,\n return_attention_mask = True,return_tensors = 'pt',)\n \n train_inputs_ids.append(encoded_sentences['input_ids'])\n \n training_attention_masks.append(encoded_sentences['attention_mask'])\n\n train_inputs_ids = torch.cat(train_inputs_ids, dim=0)\n training_attention_masks = torch.cat(training_attention_masks, dim=0)\n labels = torch.tensor(labels)\n #save data for future use\n if(save_processed_data):\n f = open('processed_data.pckl', 'wb')\n pickle.dump([train_inputs_ids,training_attention_masks,labels], f)\n f.close()\n \n else:\n #lOAD THE DATA\n f = open('processed_data.pckl', 'rb')\n input_processed_data = pickle.load(f)\n f.close()\n train_inputs_ids = input_processed_data[0]\n training_attention_masks = input_processed_data[1]\n labels = input_processed_data[2]\n \n print(\"Data Preperation Done\")\n\n main_dataset = TensorDataset(train_inputs_ids, training_attention_masks, labels)\n\n train_size = int(0.9 * len(main_dataset))\n val_size = len(main_dataset) - train_size\n\n train_dataset, validation_data = random_split(main_dataset, [train_size, val_size])\n\n train_dataloader = DataLoader(train_dataset,sampler = RandomSampler(train_dataset),batch_size = batch_size,)\n\n validation_dataloader = DataLoader(validation_data,sampler = SequentialSampler(validation_data), batch_size = batch_size,)\n \n config = BertConfig.from_pretrained(\"bert-base-uncased\", # Use the 12-layer BERT model, with an uncased vocab.\n num_labels = 2, # The number of output labels--2 for binary classification.\n output_attentions = False, # Whether the model returns attentions weights.\n output_hidden_states = False, # Whether the model returns all hidden-states.\n )\n if(prefix_tuning):\n config.prefix_length = prefix_length\n phrase_for_init = phrase_for_init\n \n if prefix_tuning:\n model = SARCBertClassifier(config)\n model.update_network_sarc(2,device,freeze_bert_layers=True)\n model.to(device)\n if(tuning_mode == \"prefix_bottom_two_layers\"):\n for n,p in model.named_parameters():\n if(n==\"prefix_embeddings.weight\" or \"bert.encoder.layer.0.\" in n or \"bert.encoder.layer.1.\" in n or n==\"classifier.weight\" or n==\"classifier.bias\"):\n p.requires_grad = True\n else:\n p.requires_grad = False\n if p.requires_grad:\n print(\"Tuning:\",n)\n \n elif(tuning_mode == \"prefix_top_two_layers\"):\n for n,p in model.named_parameters():\n if(n==\"prefix_embeddings.weight\" or \"bert.encoder.layer.10.\" in n or \"bert.encoder.layer.11.\" in n or n==\"classifier.weight\" or n==\"classifier.bias\"):\n p.requires_grad = True\n else:\n p.requires_grad = False\n if p.requires_grad:\n print(\"Tuning:\",n)\n \n elif(tuning_mode == \"prefix_bert_embedding_layer\"):\n for n,p in model.named_parameters():\n if(n==\"prefix_embeddings.weight\" or \"bert.embeddings.word_embeddings.weight\" in n or n==\"classifier.weight\" or n==\"classifier.bias\"):\n p.requires_grad = True\n else:\n p.requires_grad = False\n if p.requires_grad:\n print(\"Tuning:\",n)\n \n elif(tuning_mode == \"prefix_custom_initializaition\"):\n del model\n custom_embedding = get_bert_embedding(phrase_for_init)\n model = SARCBertClassifier(config)\n model.update_network_sarc(2,device,freeze_bert_layers=True,custom_embedding=True,custom_embedding_vector=custom_embedding)\n\n for n,p in model.named_parameters():\n if(n==\"prefix_embeddings.weight\" or n==\"classifier.weight\" or n==\"classifier.bias\"):\n p.requires_grad = True\n else:\n p.requires_grad = False\n if p.requires_grad:\n print(\"Tuning:\",n)\n \n elif(tuning_mode == \"prefix_random_initializaition\"):\n for n,p in model.named_parameters():\n if(n==\"prefix_embeddings.weight\" in n or n==\"classifier.weight\" or n==\"classifier.bias\"):\n p.requires_grad = True\n else:\n p.requires_grad = False\n if p.requires_grad:\n print(\"Tuning:\",n)\n\n else:\n raise Exception(\"Exception: Unknow Experiment\")\n else:\n model = BertForSequenceClassification(config)\n \n if(tuning_mode == \"noprefix_top_two_layers\"):\n for n,p in model.named_parameters():\n if(\"bert.encoder.layer.10.\" in n or \"bert.encoder.layer.11.\" in n or n==\"classifier.weight\" or n==\"classifier.bias\"):\n p.requires_grad = True\n else:\n p.requires_grad = False\n if p.requires_grad:\n print(\"Tuning:\",n)\n \n elif(tuning_mode == \"noprefix_bottom_two_layers\"):\n for n,p in model.named_parameters(): \n if(\"bert.encoder.layer.0.\" in n or \"bert.encoder.layer.1.\" in n or n==\"classifier.weight\" or n==\"classifier.bias\"):\n p.requires_grad = True\n else:\n p.requires_grad = False\n if p.requires_grad:\n print(\"Tuning:\",n)\n \n elif(tuning_mode == \"noprefix_embedding_layer_update\"):\n for n,p in model.named_parameters(): \n if(\"bert.embeddings.word_embeddings.weight\" in n or n==\"classifier.weight\" or n==\"classifier.bias\"):\n p.requires_grad = True\n else:\n p.requires_grad = False\n if p.requires_grad:\n print(\"Tuning:\",n)\n\n elif(tuning_mode==\"baseline_finetune\"):\n for n,p in model.named_parameters(): \n p.requires_grad = True\n if p.requires_grad:\n print(\"Tuning:\",n)\n elif(tuning_mode==\"baseline_lightweight_finetune\"):\n for n,p in model.named_parameters(): \n if(n==\"classifier.weight\" or n==\"classifier.bias\"):\n p.requires_grad = True\n if p.requires_grad:\n print(\"Tuning:\",n)\n \n else:\n raise Exception(\"Exception: Unknow Experiment\")\n \n if(use_multi_gpu and torch.cuda.device_count()>1):\n print(\"Parallelizing Model\")\n model = nn.DataParallel(model)\n model.to(device)\n model = model.cuda()\n \n print(\"Model Initialization Done\")\n\n optimizer = AdamW(model.parameters(),lr = learning_rate,eps = 1e-8)\n\n total_steps = len(train_dataloader) * epochs\n\n scheduler = get_linear_schedule_with_warmup(optimizer,num_warmup_steps = 0,num_training_steps = total_steps)\n\n print(\"Optimizer setup done\")\n\n seed_val = 42\n random.seed(seed_val)\n np.random.seed(seed_val)\n torch.manual_seed(seed_val)\n torch.cuda.manual_seed_all(seed_val)\n training_stats = []\n\n total_t0 = time.time()\n \n for epoch_i in range(0, epochs):\n t0 = time.time()\n\n batch_train_loss = 0\n\n model.train()\n \n total_training_loss = 0\n correct_preds, total_predictions = 0, 0\n generator_tqdm = tqdm(train_dataloader)\n if(checkpoint):\n output_dir = model_save_directory+\"_checkpoint/\"\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n print(\"Saving model to %s\" % output_dir)\n model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir) \n if(analyze_tokens):\n with open(output_dir+\"prefix_embed_matching_words_epoch_\"+str(epoch_i)+\".json\", 'w') as fp:\n json.dump(model.closest_matching_bert_model(), fp) \n for step, batch in enumerate(generator_tqdm):\n if(prefix_length):\n b_input_ids = torch.cat((torch.arange(0, config.prefix_length).expand(batch[0].shape[0], config.prefix_length),batch[0]),1).to(device)\n b_input_mask = torch.cat((torch.ones(config.prefix_length).expand(batch[1].shape[0], config.prefix_length),batch[1]),1).to(device)\n else:\n b_input_ids = batch[0].to(device)\n b_input_mask = batch[1].to(device)\n b_labels = batch[2].to(device)\n \n model.zero_grad() \n\n result = model(b_input_ids, \n token_type_ids=None, \n attention_mask=b_input_mask, \n labels=b_labels,\n return_dict=True)\n\n loss = result.loss\n logits = result.logits\n\n batch_train_loss += loss.mean()\n\n loss.mean().backward()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n\n optimizer.step()\n optimizer.zero_grad() \n\n scheduler.step()\n \n batch_predictions = np.argmax(nn.Softmax(dim=1)(logits).detach().cpu().numpy(), axis=-1)\n correct_preds += (batch_predictions == b_labels.detach().cpu().numpy()).sum()\n total_predictions += b_labels.shape[0]\n \n description = (\"Average training loss: %.2f Accuracy: %.2f Lable sum: %2f\"\n % (batch_train_loss/(step+1), correct_preds/total_predictions,batch_predictions.sum()))\n generator_tqdm.set_description(description, refresh=False)\n \n train_loss = batch_train_loss / len(train_dataloader) \n \n training_time = format_time(time.time() - t0)\n \n t0 = time.time()\n\n model.eval()\n\n total_eval_accuracy = 0\n total_eval_loss = 0\n nb_eval_steps = 0\n \n for batch in tqdm(validation_dataloader):\n if(prefix_tuning):\n b_input_ids = torch.cat((torch.arange(0, config.prefix_length).expand(batch[0].shape[0], config.prefix_length),batch[0]),1).to(device)\n\n b_input_mask = torch.cat((torch.ones(config.prefix_length).expand(batch[1].shape[0], config.prefix_length),batch[1]),1).to(device)\n else:\n b_input_ids=batch[0].to(device)\n b_input_mask=batch[1].to(device)\n b_labels = batch[2].to(device)\n with torch.no_grad():\n result = model(b_input_ids, \n token_type_ids=None, \n attention_mask=b_input_mask,\n labels=b_labels,\n return_dict=True)\n\n loss = result.loss\n logits = result.logits\n \n total_eval_loss += loss.mean()\n \n logits = logits.detach().cpu().numpy()\n label_ids = b_labels.to('cpu').numpy()\n\n total_eval_accuracy += compute_accuracy(logits, label_ids)\n \n \n\n avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)\n\n avg_val_loss = total_eval_loss / len(validation_dataloader)\n \n validation_time = format_time(time.time() - t0)\n\n training_stats.append(\n {\n 'epoch': epoch_i + 1,\n 'Training Loss': train_loss,\n 'Valid. Loss': avg_val_loss,\n 'Valid. Accur.': avg_val_accuracy,\n 'Training Time': training_time,\n 'Validation Time': validation_time\n }\n )\n \n print(\"Training Time:\".format(format_time(time.time()-total_t0)))\n\n if(not prepare_data):\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n\n if(save_model):\n output_dir = model_save_directory\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n print(\"Saving model to %s\" % output_dir)\n\n model_to_save = model.module if hasattr(model, 'module') else model \n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n pd.set_option('precision', 2)\n df_stats = pd.DataFrame(data=training_stats)\n df_stats = df_stats.set_index('epoch')\n df_stats.to_csv(output_dir+\"/perfomance_stats.csv\",index=False)\n print(\"Models Saved\")\n \n else:\n if(not model_directory_to_use):\n raise Exception(\"Must give the folder location of the pretrained model during evaluation\")\n if(prefix_tuning):\n print(\"Loading Model from: \" , model_directory_to_use)\n output_dir = model_directory_to_use\n config = BertConfig.from_pretrained(output_dir,output_hidden_states=True, output_attentions=True)\n config.user_embeddings=False\n config.prefix_length = prefix_length\n model = SARCBertClassifier.from_pretrained(output_dir,config=config,)\n else:\n model = BertForSequenceClassification.from_pretrained(model_directory_to_use)\n \n model.to(device)\n model.prefix_length = prefix_length\n model.run_device = device\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n if(not prepare_data):\n test_set = pd.read_csv(test_file)\n \n print(\"Started Testing\")\n \n sentences = test_set.dropna(subset=[\"comment\"]).comment.values\n labels = test_set.dropna(subset=[\"comment\"]).label.values\n\n test_inputs_ids = []\n\n for sent in sentences:\n encoded_sent = tokenizer.encode(sent,add_special_tokens = True,)\n test_inputs_ids.append(encoded_sent)\n\n test_inputs_ids = pad_sequences(test_inputs_ids, maxlen=64, \n dtype=\"long\", truncating=\"post\", padding=\"post\")\n\n test_attention_masks = []\n\n for seq in tqdm(test_inputs_ids):\n seq_mask = [float(i>0) for i in seq]\n test_attention_masks.append(seq_mask) \n\n prediction_inputs = torch.tensor(test_inputs_ids)\n prediction_masks = torch.tensor(test_attention_masks)\n prediction_labels = torch.tensor(labels)\n\n batch_size = 32 \n\n prediction_data = TensorDataset(prediction_inputs, prediction_masks, prediction_labels)\n prediction_sampler = SequentialSampler(prediction_data)\n prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size)\n\n model.eval()\n\n predictions , true_labels = [], []\n\n for batch in tqdm(prediction_dataloader):\n batch = tuple(t.to(device) for t in batch)\n\n b_input_ids, b_input_mask, b_labels = batch\n if(prefix_tuning):\n b_input_ids = torch.cat((torch.arange(0, 5).expand(b_input_ids.shape[0], 5).to(device),b_input_ids),1).to(device)\n b_input_mask = torch.cat((torch.ones(5).expand(b_input_mask.shape[0], 5).to(device),b_input_mask),1).to(device)\n with torch.no_grad():\n outputs = model(b_input_ids,token_type_ids=None,attention_mask=b_input_mask,labels=b_labels,return_dict=True)\n\n logits = outputs.logits\n\n logits = logits.detach().cpu().numpy()\n label_ids = b_labels.to('cpu').numpy()\n\n predictions.append(logits)\n true_labels.append(label_ids)\n \n \n preds=[]\n actuals=[]\n for i in range(len(true_labels)):\n preds.append(list(np.argmax(predictions[i], axis=1).flatten()))\n actuals.append(list(true_labels[i]))\n\n preds = list(itertools.chain(*preds)) \n actuals = list(itertools.chain(*actuals))\n\n test_metrics = {}\n test_metrics['accuracy_score'] = accuracy_score(actuals,preds)*100\n test_metrics['f1_score'] = f1_score(actuals, preds, average='macro')\n\n confm = confusion_matrix(actuals, preds,normalize='true')\n columns = [\"Sarcastic\",\"Normal\"]\n df_cm = DataFrame(confm, index=columns, columns=columns)\n ax = sn.heatmap(df_cm, cmap='Oranges', annot=True)\n\n test_metrics[\"confusion_matrix\"] = df_cm.to_dict('list')\n \n # if(save_processed_data):\n with open(output_dir+'/test_metrics.json', 'w') as fp:\n json.dump(test_metrics, fp)\n \nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Train Bert Model - Prefix Tuning')\n # General training arguments\n parser.add_argument(\"--train_data\",type=str,help=\"training dataset file that have to be used\",default=\"sample_train.csv\")\n\n parser.add_argument('--prepare_data', action=\"store_true\", default=True,\n help='if passed, will prepare data.')\n \n parser.add_argument('--save_processed_data', action=\"store_true\", default=False,\n help='if passed, save the processed data.')\n \n parser.add_argument('--batch_size', type=int, help='batch_size ', default=64)\n \n parser.add_argument('--custom', action=\"store_true\", default=True,\n help='if passed, use no custom.')\n \n parser.add_argument('--epochs', type=int, help='epochs ', default=4)\n \n parser.add_argument('--learning_rate', type=float, help='learning_rate ', default=2e-5)\n \n parser.add_argument('--save_model', action=\"store_true\", default=True,\n help='if passed, save model.')\n \n parser.add_argument('--prefix_length', type=int, help='number of prefix tokens ', default=5)\n\n parser.add_argument('--model_save_directory', type=str,\n help='save the model to', default=\"model_store/\")\n\n parser.add_argument(\"--tuning_mode\", type=str,\n help='Name of the tuning_mode', default=\"prefix_random_initializaition\",choices=[\"prefix_bottom_two_layers\",\"prefix_top_two_layers\",\n \"prefix_bert_embedding_layer\",\"prefix_custom_initializaition\",\"prefix_random_initializaition\",\n \"noprefix_top_two_layers\",\"noprefix_bottom_two_layers\",\"baseline_finetune\",\n \"baseline_lightweight_finetune\",\"noprefix_embedding_layer_update\"])\n\n parser.add_argument(\"--use_multi_gpu\",type=bool,help=\"Use Multiple GPUs\",default=False)\n \n parser.add_argument(\"--phrase_for_init\",type=str,help=\"If using custom initialization this will be used to initialize the prefix tokens\",default=False)\n \n parser.add_argument(\"--checkpoint\",type=str,help=\"to checkpoint the model at each epoch\",default=True)\n \n parser.add_argument(\"--analyze_tokens\",type=bool,help=\"Closest words in bert vocab in each epoch are extracted\",default=False)\n \n parser.add_argument(\"--test_file\",type=str,help=\"test file that have to be used\",default=\"sample_test.csv\")\n \n parser.add_argument(\"--evaluate\",action=\"store_true\",help=\"To run the script in Evaluation mode\",default=False)\n\n parser.add_argument(\"--saved_model_location\",type=str,help=\"Loaction of the stored model, must be used when only evaluation is called\")\n\n args = parser.parse_args()\n\n main(args)\n"
}
] | 8 |
KaterinMlad/Working_Code | https://github.com/KaterinMlad/Working_Code | c604963c07909745ff293b84aa6ba9a4a257310b | e6063d00a01f26dcc28742ac71e87c3574fbf98a | b41d53166b84f3f8230ed6acfdec83f3425ab21f | refs/heads/main | 2023-09-01T16:31:54.330837 | 2021-11-02T20:23:10 | 2021-11-02T20:23:10 | 423,987,837 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5366413593292236,
"alphanum_fraction": 0.5795514583587646,
"avg_line_length": 27.617801666259766,
"blob_id": "876923f07841333bc45928b5fcc1964674f96b96",
"content_id": "9ec49dc4808628f7050647f24cd206e0f7577a93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5663,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 191,
"path": "/Joint_Creation.py",
"repo_name": "KaterinMlad/Working_Code",
"src_encoding": "UTF-8",
"text": "import maya.cmds as cmds\r\nimport maya.mel as mel\r\nimport sys\r\n\r\n# This function can probubly go into a class to allow for ease amount control\r\n\r\ndef create_Loc():\r\n\r\n\t# a = print input('set number of locators')\r\n\tamount = 6\r\n\tfor i in range(amount):\r\n\r\n\t\tcount = i+1\r\n\r\n\t\tloc = cmds.spaceLocator (n = 'locator_{}_PRX'.format (count), p = [0,i*2.5,0])\r\n\t\tcmds.makeIdentity (loc, a = 1, t = 1)\r\n\r\n\t\tmel.eval ('CenterPivot;')\r\n\tcmds.select ('*_PRX')\r\n\r\n\tlyr = cmds.createDisplayLayer (n = 'locators', num = 1)\r\n\tcmds.setAttr ('locators.color', 22)\r\n\r\n\r\ndef create_JNT():\r\n\r\n # Make joints\r\n\r\n\t# names = ['C_Pelvis_jnt','l_leg_jnt','l_knee_jnt','l_ankle_jnt','l_toe_jnt','l_foot_ball_jnt'] # Name of joints\r\n\tnames = ['C_l_wrist_jnt', 'C_l_elbow_jnt', 'C_l_shoulder_jnt', 'C_l_clav_jnt'] # ['C_l_clav_jnt', 'C_l_shoulder_jnt', 'C_l_elbow_jnt', 'C_l_wrist_jnt']\r\n\r\n\r\n\tsc = 0\r\n\tfor name in names:\r\n\r\n\t\tcmds.select (cl = 1)\r\n\t\tjnt = cmds.joint (n = name , sc = sc )\r\n\r\n\r\n\r\n# Constrain joints to Locators\r\n\tLoc = cmds.ls ('*_PRX')\r\n\r\n\tif Loc:\r\n\t\tcon1 = cmds.parentConstraint(Loc[0],names[5])\r\n\t\tcon2 = cmds.parentConstraint(Loc[1],names[4])\r\n\t\tcon3 = cmds.parentConstraint(Loc[2],names[3])\r\n\t\tcon4 = cmds.parentConstraint(Loc[3],names[2])\r\n\t\tcon5 = cmds.parentConstraint(Loc[4],names[1])\r\n\t\tcon6 = cmds.parentConstraint(Loc[5],names[0])\r\n\r\n\t\tcmds.delete(con1,con2,con3,con4,con5,con6) #delete Constraints\r\n\r\n\tfor i, n in enumerate(names):\r\n\t if i ==len(names)-1:\r\n\t continue\r\n\t cmds.parent (names[i+1], n)\r\n\r\n\tjnt = cmds.select('C_Pelvis_jnt', hi = 1)\r\n\tif jnt:\r\n\t\tcmds.makeIdentity (jnt, a = 1, t =1, r = 1, s = 1)\r\n\r\n\tcmds.mirrorJoint ('l_leg_jnt', mb = True, myz = True, sr = ('l_', 'r_') )\r\n\r\n\tcmds.select(cl = 1)\r\n\r\n\r\n\r\n\r\n# what do i want to code to do \r\n\r\n# I want to create a set of locators for the user to possition \r\n# I want the user to then be able to create a set of joints based on the locators position \r\n\r\n# what data do we know we know the names of the joints and we know the number of locators we need len()\r\n# class examples = https://github.com/CoreyMSchafer/code_snippets/tree/master/Object-Oriented\r\n\r\n\r\ndef GRP_Higher():\r\n\r\n\tnames1 = ['l_leg_grp','l_sd_ft_roll_r_SDK_grp','l_sd_ft_roll_l_SDK_grp','l_ball_twist_grp','l_pirouette_SDK_grp']\r\n\tnames2 = ['l_pirouette_grp','l_foot_tap_SDK_grp','l_foot_tap_grp']\r\n\tnames3 = ['l_heel_lift_SDK_grp','l_heel_lift_grp']\r\n\tnames4 = ['l_toe_tap_SDK_grp','l_toe_tap_grp']\r\n\r\n\r\n\r\n\tnames5 = names1 + names2 + names3 + names4\r\n\r\n\r\n\tfor name in names5:\r\n\t\tcmds.select (cl = 1)\r\n\t\tGRP = cmds.group (n = name, em = 1, w = 1)\r\n\r\n\tfor i, n in enumerate(names5):\r\n\t\t if i ==len(names5)-1:\r\n\t\t continue\r\n\t\t cmds.parent (names5[i+1], n)\r\n\r\n\t\t if cmds.listRelatives ('l_heel_lift_SDK_grp'):\r\n\t\t \tbreak\r\n\r\n\tcmds.parent (names4[1],names4[0])\r\n\tcmds.parent (names4[0],names5[7])\r\n\r\n\r\n\r\n\tPJnt = cmds.xform ('l_toe_jnt', q = 1, ws = 1, piv = 1) # find jnt position\r\n\r\n\t# create locators based on joint position for group piv possition\r\n\r\n\tamount = 3\r\n\tfor i in range(amount):\r\n\r\n\t\tcount = i+1\r\n\r\n\t\tloc = cmds.spaceLocator (n = 'locator_{}_PIV'.format (count), p = (PJnt[0],PJnt[1],PJnt[2]))\r\n\t\tcmds.makeIdentity (loc, a = 1, t = 1)\r\n\r\n\t\tmel.eval ('CenterPivot;')\r\n\r\n\r\n\t# move created locators to possition\r\n\r\n\tcmds.move (0,0,-3.2, 'locator_1_PIV', a = 1 )\r\n\tcmds.move (-1.6,0,0, 'locator_2_PIV', a = 1 )\r\n\tcmds.move (1.6,0,0, 'locator_3_PIV', a = 1 )\r\n\r\n\t# possitioning group pivit\r\n\r\n\tJPos1 = cmds.xform ('l_toe_jnt', q = 1, ws = 1, piv = 1 )\r\n\r\n\tcmds.xform (names5[3], names5[8], names5[9], names5[10], names5[11], ws = 1, piv = (JPos1[0], JPos1[1], JPos1[2]))\r\n\r\n\tJPos2 = cmds.xform ('l_foot_ball_jnt', q = 1, ws = 1, piv = 1 )\r\n\r\n\tcmds.xform (names5[4], names5[5], ws = 1, piv = (JPos2[0], JPos2[1], JPos2[2]))\r\n\r\n\tJPos3 = cmds.xform ('l_ankle_jnt', q = 1, ws = 1, piv = 1)\r\n\r\n\tcmds.xform (names5[0], ws = 1, piv = (JPos3[0], JPos3[1], JPos3[2]))\r\n\r\n\r\n\tloc1 = cmds.xform ('locator_1_PIV', q = 1, ws = 1, piv = 1 )\r\n\tcmds.xform ('l_foot_tap_SDK_grp','l_foot_tap_grp', ws = 1, piv = (loc1[0], loc1[1], loc1[2]))\r\n\r\n\tloc2 = cmds.xform ('locator_2_PIV', q = 1, ws = 1, piv = 1 )\r\n\tcmds.xform ('l_sd_ft_roll_r_SDK_grp', ws = 1, piv = (loc2[0], loc2[1], loc2[2]))\r\n\r\n\tloc3 = cmds.xform ('locator_3_PIV', q = 1, ws = 1, piv = 1 )\r\n\tcmds.xform ('l_sd_ft_roll_l_SDK_grp', ws = 1, piv = (loc2[0], loc2[1], loc2[2]))\r\n\r\n\tcmds.delete ('*_PIV')\r\n\r\n\r\n\tcmds.duplicate ('l_leg_grp', n = 'l_leg_grp'.replace ('l_','r_') )\r\n\tcmds.group (n = 'C_CTRL_Grp', em = 1, w = 1)\r\n\tcmds.parent ('r_leg_grp', 'C_CTRL_Grp')\r\n\tcmds.setAttr ('C_CTRL_Grp.scaleX', -1 )\r\n\tcmds.makeIdentity ('C_CTRL_Grp', a = 1, t = 1, s = 1 )\r\n\tcmds.parent ('l_leg_grp', 'C_CTRL_Grp')\r\n\r\n\r\ndef Ctrl_Names():\r\n\r\n\tsel = cmds.ls(sl = 1)\r\n\tcmds.rename(sel, '_leg_CTRL')\r\n\tcmds.addAttr (ln = 'Heel_Lift', at = 'double', min = 0, max = 60, dv = 0, k = 1)\r\n\tcmds.addAttr (ln = 'Toe_Tap', at = 'double', min = 0, max = 50, dv = 0, k = 1)\r\n\tcmds.addAttr (ln = 'Foot_Tap', at = 'double', min = 0, max = 40, dv = 0, k = 1)\r\n\tcmds.addAttr (ln = 'Ball_Twist', at = 'double', min = -70, max = 70, dv = 0, k = 1)\r\n\tcmds.addAttr (ln = 'Pirouette_U_D', at = 'double', min = 0, max = 60, dv = 0, k = 1)\r\n\tcmds.addAttr (ln = 'Pirouette_L_R', at = 'double', min = -70, max = 70, dv = 0, k = 1)\r\n\tcmds.addAttr (ln = 'Side_Roll', at = 'double', min = -30, max = 30, dv = 0, k = 1)\r\n\tcmds.addAttr (ln = 'PW_Foot_Roll', at = 'double', min = -30, max = 60, dv = 0, k = 1)\r\n\tcmds.addAttr (ln = 'Twist', at = 'double', min = -30, max = 30, dv = 0, k = 1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# GitHun guide\r\n# https://guides.github.com/activities/hello-world/\r\n\r\n\r\n\r\n"
}
] | 1 |
samuelskanberg/yr-forecast-data | https://github.com/samuelskanberg/yr-forecast-data | 7053c5f9e0dcad35bfb68205968f2e6eaeae295c | 5552cbff548d061485b065f7cd4b629dd52a1595 | 2d68315d88da9da5ec2b916cd87e6fe794230ca0 | refs/heads/master | 2023-01-12T14:43:55.456345 | 2020-11-21T15:55:10 | 2020-11-21T15:55:10 | 262,828,933 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5659536719322205,
"alphanum_fraction": 0.6506238579750061,
"avg_line_length": 27.049999237060547,
"blob_id": "629e06807cacff43609a886846dacb969aa921bf",
"content_id": "79a4dfea9ddc05967402fb625893edd8953a3220",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1122,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 40,
"path": "/get_from_yr.py",
"repo_name": "samuelskanberg/yr-forecast-data",
"src_encoding": "UTF-8",
"text": "import requests\nfrom bs4 import BeautifulSoup\n\n# Here is the xml data\nsthlm_xml_source_url = \"https://www.yr.no/place/Sweden/Stockholm/Stockholm/forecast.xml\"\n\n# Fetch the data\nr = requests.get(sthlm_xml_source_url)\n\nxml_raw = r.content\n\n# Read in the xml data using BeautifulSoup\nsoup = BeautifulSoup(xml_raw, \"xml\")\n\n# All forecast data will be in a <time> tag\n# E.g.\n\"\"\"\n<time from=\"2020-05-10T18:00:00\" to=\"2020-05-11T00:00:00\" period=\"3\">\n<!--\n Valid from 2020-05-10T18:00:00 to 2020-05-11T00:00:00 \n -->\n <symbol number=\"9\" numberEx=\"9\" name=\"Rain\" var=\"09\"/>\n <precipitation value=\"3.3\" minvalue=\"1.1\" maxvalue=\"3.4\"/>\n <!-- Valid at 2020-05-10T18:00:00 -->\n <windDirection deg=\"268.8\" code=\"W\" name=\"West\"/>\n <windSpeed mps=\"3.7\" name=\"Gentle breeze\"/>\n <temperature unit=\"celsius\" value=\"13\"/>\n <pressure unit=\"hPa\" value=\"1001.3\"/>\n </time>\n\"\"\"\n\ntimes = soup.find_all(\"time\")\nfor t in times:\n from_time = t.get(\"from\")\n to_time = t.get(\"to\")\n print(\"from_time: {}\".format(from_time))\n print(\"to_time: {}\".format(to_time))\n temp = t.find(\"temperature\")\n print(temp.get(\"value\"))\n print(\"\")\n"
},
{
"alpha_fraction": 0.7222222089767456,
"alphanum_fraction": 0.7301587462425232,
"avg_line_length": 12.777777671813965,
"blob_id": "ef09eefdbc9f15fa1d2790f91d17afafd1df6973",
"content_id": "f3deba4de18687efebb1a54ba97cb54d7c25972e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 126,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 9,
"path": "/README.md",
"repo_name": "samuelskanberg/yr-forecast-data",
"src_encoding": "UTF-8",
"text": "# Get data from yr\n\nWill get some weather forecast data from yr.\n\nInstall:\n\n```bash\npip install requests BeautifulSoup4\n```\n\n\n"
}
] | 2 |
elamb785/Tic-Tac-Toe-with-AI | https://github.com/elamb785/Tic-Tac-Toe-with-AI | e80001f5bd3d29a1d15b996972f432eb70828ea4 | 4adc89543bc03a2ad97b264560361bc620071b32 | 871d0c10838230ede00ac166cace2087afb626d5 | refs/heads/master | 2022-11-25T13:57:07.528079 | 2020-08-10T23:05:36 | 2020-08-10T23:05:36 | 286,162,537 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5307244658470154,
"alphanum_fraction": 0.5458171367645264,
"avg_line_length": 32.25089645385742,
"blob_id": "2290cf3b8368df3b064d19d38245e28ead160f70",
"content_id": "88d342df62579749210ce571c8d8b81afbff13cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9276,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 279,
"path": "/tictactoe.py",
"repo_name": "elamb785/Tic-Tac-Toe-with-AI",
"src_encoding": "UTF-8",
"text": "# TIC TAC TOE with AI\n# Written By: Evan Lamb\n# Date Written: 8/8/2020 - 8/10/20\n# Create a Tic Tac Toe game with 3 levels of AI difficulty, and 4 game modes\n# human vs AI, AI vs human, human vs human, and AI vs AI\n\nimport random\nfrom itertools import combinations\n\nclass Board():\n\n win_conditions = [\n ['1', '2', '3'],\n ['4', '5', '6'],\n ['7', '8', '9'],\n\n ['1', '4', '7'],\n ['2', '5', '8'],\n ['3', '6', '9'],\n\n ['1', '5', '9'],\n ['7', '5', '3']\n ]\n \n def __init__(self):\n self.board = {'7': ' ', '8': ' ', '9': ' ',\n '4': ' ', '5': ' ', '6': ' ',\n '1': ' ', '2': ' ', '3': ' '}\n\n def get_keys(self):\n return self.board.keys()\n\n def print_board(self):\n print(\"-------------\")\n print(f'| {self.board[\"7\"]} | {self.board[\"8\"]} | {self.board[\"9\"]} |')\n print(\"-------------\")\n print(f'| {self.board[\"4\"]} | {self.board[\"5\"]} | {self.board[\"6\"]} |')\n print(\"-------------\")\n print(f'| {self.board[\"1\"]} | {self.board[\"2\"]} | {self.board[\"3\"]} |')\n print(\"-------------\")\n\n def update_board(self, move, flag, player=None, isMaximizing=None):\n # for use in simulated games\n if isMaximizing != None and player == None:\n if flag == 1:\n self.board[move] = 'X' if isMaximizing else 'O'\n else:\n self.board[move] = 'O' if isMaximizing else 'X'\n # for use in real game\n elif player and isMaximizing == None:\n self.board[move] = player.token\n # for use in simulated games\n else:\n self.board[move] = ' '\n\n def is_open(self, move):\n return True if self.board[move] == ' ' else False\n\n def open_spaces(self):\n return [k for k,v in self.board.items() if v == ' ']\n\n def is_full(self):\n return True if set(self.board.values()) == {'X', 'O'} else False\n\n def win_possible(self, player):\n '''return where to play for a possible win'''\n for win in self.win_conditions:\n for i in combinations(win, 2):\n if self.board[i[0]] == self.board[i[1]] == player.token:\n move_prelim = [spot for spot in win if spot not in i][0] \n if self.is_open(move_prelim):\n return move_prelim \n return None\n\n def is_winner(self, game):\n '''return whether someone has won and if so, who'''\n for i, j, k in self.win_conditions:\n if self.board[i] == self.board[j] == self.board[k] == 'X':\n return True, game.p1\n elif self.board[i] == self.board[j] == self.board[k] == 'O':\n return True, game.p2\n return False, 'No winner'\n\n def game_over(self, game): \n '''returns bool game_over, bool is_winner, obj who_won'''\n results = self.is_winner(game)\n full = self.is_full()\n # Case 1: Game over\n if full or results[0] == True:\n return True, results[0], results[1]\n # Case 2: Game not over\n else:\n return False, False, 'No winner'\n\nclass Player():\n\n def __init__(self, strategy, token, name, scores):\n self.strategy = strategy\n self.token = token\n self.name = name\n self.scores = scores\n\n def make_move(self, game):\n return self.strategy(self, game)\n\ndef minimax(game, depth, isMaximizer, flag):\n '''recursively determine the optimal play for all board positions'''\n # flag = 1, player1 is using minimax in real game\n # flag = 2, player2 is using minimax in real game\n\n # base case\n game_complete, is_winner, who_won = game.board.game_over(game)\n if game_complete:\n if is_winner: \n if who_won.token == 'X' and flag == 1:\n score = game.p1.scores['X'] - depth\n elif who_won.token == 'O' and flag == 1:\n score = game.p1.scores['O'] + depth\n elif who_won.token == 'X' and flag == 2:\n score = game.p2.scores['X'] + depth\n else:\n score = game.p2.scores['O'] - depth\n else:\n score = game.p1.scores['Tie']\n return score\n\n if isMaximizer:\n bestScore = float(\"-inf\")\n for move in game.board.open_spaces():\n game.board.update_board(move, flag, isMaximizing=isMaximizer) \n score = minimax(game, depth + 1, False, flag)\n # undo the winning, tying, or losing move\n game.board.update_board(move, None) \n bestScore = max(score, bestScore)\n return bestScore\n else:\n bestScore = float(\"inf\")\n for move in game.board.open_spaces():\n game.board.update_board(move, flag, isMaximizing=isMaximizer)\n score = minimax(game, depth + 1, True, flag)\n # undo the winning, tying, or losing move\n game.board.update_board(move, None) \n bestScore = min(score, bestScore)\n return bestScore\n\n# Define the possible strategies for make_move method\n\ndef Hard(player, game):\n '''Best move is determined using the minimax algorithm'''\n # Determine which player is using minimax to make their turn\n flag = 1 if player.token == 'X' else 2\n bestScore = float(\"-inf\")\n bestMove = None\n # loop through possible moves given the current board\n for move in game.board.open_spaces():\n game.board.update_board(move, None, player=player)\n score = minimax(game, 0, False, flag) \n game.board.update_board(move, None) # undo the update \n if score > bestScore:\n bestScore = score\n bestMove = move \n return bestMove\n\ndef Medium(player, game):\n '''Win, if possible. If not, block. Else, random.'''\n move = game.board.win_possible(player)\n if move:\n return move\n move = game.board.win_possible(game.change_turn(player))\n if move:\n return move\n else:\n return random.choice(game.board.open_spaces())\n\ndef Easy(player, game):\n '''Play randomly'''\n return random.choice(game.board.open_spaces())\n\ndef User(player, game):\n move = input(\"Insert move (1-9): \")\n while move not in game.board.get_keys():\n print(\"Please enter a value between 1 and 9\")\n move = input(\"Insert move (1-9): \")\n return move\n\nclass Game():\n\n def __init__(self, p1_diff, p2_diff, p1_name, p2_name):\n self.p1 = Player(p1_diff, 'X', p1_name, scores1)\n self.p2 = Player(p2_diff, 'O', p2_name, scores2)\n self.board = Board()\n\n def change_turn(self, player):\n if player is self.p1:\n return self.p2\n else:\n return self.p1\n\n# Convert inputted strings to function types\nfunction_mappings = {\n 'Hard': Hard,\n 'Medium': Medium,\n 'Easy': Easy,\n 'User': User\n}\n\n# define the rewards/penalties for minimax\nscores1 = {\n 'X': 10,\n 'O': -10,\n 'Tie': 0\n}\n\nscores2 = {\n 'X': -10,\n 'O': 10,\n 'Tie': 0\n}\n\ndef get_players_diff():\n '''Determine and convert to functions the inputted difficulties'''\n players = (\"User\", \"Easy\", \"Medium\", \"Hard\")\n player = []\n for i in range(1, 3):\n player_diff = input(f\"Who is player {i} {players}? \").title()\n while player_diff not in players:\n print(\"Please enter an acceptable player name.\")\n player_diff = input(f\"Who is player {i} {players}? \").title()\n player.append(player_diff)\n return function_mappings[player[0]], function_mappings[player[1]]\n\ndef query_new_game():\n ans = input(\"Do you want to play again? [y/n] \")\n while ans not in ('y', 'n'):\n print(\"Please enter acceptable answer\")\n ans = input(\"Do you want to play again? [y/n] \")\n return True if ans == 'y' else False\n\ndef start_game(p1, p2):\n p1_user = input(\"What is player 1's name? \")\n p2_user = input(\"What is player 2's name? \")\n game = Game(p1, p2, p1_user, p2_user)\n print(\"\\n------------BEGIN GAME--------------\\n\")\n print(\"NOTE: Locations are numbered 1-9 where 1 is bottom left\")\n print(\"and numbers increase left to right. Ex. 4 is first location\")\n print(\"in the second row.\\n\")\n game.board.print_board()\n player = game.p1\n print(f'{player.name}\\'s Turn')\n\n # initial play begins here\n while True:\n move = str(player.make_move(game))\n while not game.board.is_open(move):\n print(\"Position already occupied. Please try again.\")\n move = player.make_move(game)\n game.board.update_board(move, None, player=player)\n print(f'Last Move: {player.name} played at position {move}.')\n game.board.print_board()\n game_complete, is_winner, who_won = game.board.game_over(game)\n if game_complete:\n if is_winner:\n print(f'{who_won.name} wins!')\n return query_new_game()\n else:\n print(\"Tie!\")\n return query_new_game()\n else:\n player = game.change_turn(player)\n print(f'{player.name}\\'s Turn')\n\ndef start_session():\n new_game = True\n while new_game:\n p1, p2 = get_players_diff()\n new_game = start_game(p1, p2)\n \nif __name__ == '__main__':\n start_session()"
},
{
"alpha_fraction": 0.6732915043830872,
"alphanum_fraction": 0.6823570728302002,
"avg_line_length": 45.96721267700195,
"blob_id": "2723f39f94f5b690612c84831bdded0855b3df7c",
"content_id": "03cedb968f6ba66e6be4bd089d76e2a425f835e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2868,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 61,
"path": "/README.md",
"repo_name": "elamb785/Tic-Tac-Toe-with-AI",
"src_encoding": "UTF-8",
"text": "# Tic-Tac-Toe-with-AI\nPlay Tic-Tac-Toe with friends or against the AI!\n\n# Demo\n\n\n# Features\n1) Locations are marked as if it were a numerical keypad for intutive use.\n\n2) Supports 3 AI difficulties with the following protocols:\n\n * Easy: Play randomly\n * Medium: Win if possible. If not, block if possible. If not, play randomly.\n * Hard: Make optimal move (via minimax algorithm)\n\n3) Supports 4 game modes:\n\n * Human vs Human\n * Human vs AI\n * AI vs Human\n * AI vs AI\n\n4) The move-making protocols are determined at run time using a Behavioral Design Pattern\nknown as Strategy Method (aka Policy Method) which permits easier scalability if more\ndifficulty levels were to be introduced. In this code, the contents of the make_move\nmethod in the Player class are replaced with the contents of the four strategy functions\ndefined outside the class.\n\n5) Rewards AI for quicker wins and reduces the punishment for late game losses via \"decay\nreward/punishment as depth of recursion increases\" idea.\n\n6) Handles improper input and various sessions\n\n# Minimax Algorithm\n\nThe Minimax algorithm is a recursive search algorithm that determines the best move given\nthe current board state. It does this by creating a tree of all future possibilities where \npositive endings (i.e. it wins) result in a reward and negative endings (i.e. it loses) result\nin a negative reward. Ties result in no reward. This recursion continues until all possible states\n(in the case of Tic Tac Toe) have been evalauted and the best move is determined as the move with \nthe greatest score. The two assumptions for minimax to work properly are that 1) the opponent \nis trying to win, and 2) there is no chance component to the game. Below is an example of the \nalgorithm at work courtesy of theoryofprogramming.com.\n\n\nFig. 1 Minimax Algorithm Search Tree\n\nThe algorithm is trying to determine the best move for X by trying all possible moves and assigning\nappropriate rewards. If the game does not end, the algorithm plays for the opponent as well (O's in \ncase) until a terminal state is reached. Rewards propagate up the tree from children, grandchildren, \netc. to the root. Each of the possible choices on the actual, current game board are \nassigned a score and the greatest value is the best move. The depth is considered when applying scores.\n\nApplying this to the above example:\n\n| Possible Moves | Score | Stage Best Move| Overall Best Move |\n| --------------- | ---------- | -------------- | --------------------- |\n| 2 |a. 0 | [ ] | N/A |\n| 2 |b. 10-2 = 8 | [x] | [ ] |\n| 4 | 10 | N/A | [x] |\n| 6 | -10+1 = 9 | N/A | [ ] |\n\n\n\n"
}
] | 2 |
kyleget/microservice-python | https://github.com/kyleget/microservice-python | 939f3fc6722f2f1a938c027733b9308d4c4b3d6f | 0a181ebddf57640676a05ebb84aa5c97c189af7a | 9385024ab257dda576a55c8d0a0a845f7347f95e | refs/heads/master | 2021-01-10T08:04:20.126122 | 2016-01-27T01:58:54 | 2016-01-27T01:58:54 | 49,403,482 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5925925970077515,
"alphanum_fraction": 0.6296296119689941,
"avg_line_length": 14.357142448425293,
"blob_id": "d6b8baa4220ea7856421ccd77fb048a6237215ae",
"content_id": "6314d0058554d5455a79939b9c49c5c7157e785d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 216,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 14,
"path": "/myservice/app.py",
"repo_name": "kyleget/microservice-python",
"src_encoding": "UTF-8",
"text": "from flask import Flask\n\nfrom v1 import api as v1\n\n\napp = Flask(__name__)\n\n\n# API Versions\napp.register_blueprint(v1, url_prefix='/v1')\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', threaded=True, debug=True)\n\n"
},
{
"alpha_fraction": 0.6295313835144043,
"alphanum_fraction": 0.640141487121582,
"avg_line_length": 25.302326202392578,
"blob_id": "29eab1ee244ea93bd90a6a3d1db9fb5db31bc805",
"content_id": "86e0569d37d679a48bfd30b5879fd1af90c86ad8",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1131,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 43,
"path": "/myservice/v1/api.py",
"repo_name": "kyleget/microservice-python",
"src_encoding": "UTF-8",
"text": "from flask import Blueprint, jsonify\nfrom flask.views import MethodView\n\nfrom version import __version__\n\n\napi = Blueprint('v1', __name__, static_folder='')\n\n\nclass Main(MethodView):\n \"\"\" Example view \"\"\"\n methods = ['GET']\n\n def get(self):\n \"\"\"\n Return some data\n ---\n responses:\n 200:\n description: some data\n \"\"\"\n data = {'message': 'Success'}\n return jsonify(data)\n\n\n# URLs\napi.add_url_rule('/', view_func=Main.as_view('main'))\n\n\[email protected]('/swagger.yaml')\ndef spec():\n return api.send_static_file('swagger.yaml')\n\n\[email protected]_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add('Access-Control-Allow-Headers', 'Authorization, Content-Type')\n response.headers.add('Access-Control-Expose-Headers', 'Authorization')\n response.headers.add('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS')\n response.headers.add('Access-Control-Allow-Credentials', 'true')\n response.headers.add('Access-Control-Max-Age', 60 * 60 * 24 * 20)\n return response\n"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.7307692170143127,
"avg_line_length": 25,
"blob_id": "8bf67e640a31ab0caf2f1b630b47c5efb83782be",
"content_id": "c9c56ddf915f28b03f6363434a18ed6c8e5a6e69",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 208,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 8,
"path": "/README.md",
"repo_name": "kyleget/microservice-python",
"src_encoding": "UTF-8",
"text": "# Python Microservice Template\nA starter template for a Python microservice.\n\n## Build & run Docker container\n```\n~ docker build -t myservice .\n~ docker run -name mycontainer -p 5000:5000 -i -t myservice\n```\n"
},
{
"alpha_fraction": 0.6377952694892883,
"alphanum_fraction": 0.6614173054695129,
"avg_line_length": 17.14285659790039,
"blob_id": "5ee86655ddeb48dea6657ebb8d2ef506cbee4663",
"content_id": "b123ce4c56fdfbad9beeb8a7dc94589b97a7f558",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 127,
"license_type": "permissive",
"max_line_length": 43,
"num_lines": 7,
"path": "/tests/tests.py",
"repo_name": "kyleget/microservice-python",
"src_encoding": "UTF-8",
"text": "from unittest import TestCase\n\n\nclass MyTest(TestCase):\n\n def test_one_plus_one_equals_two(self):\n assert 1 + 1 == 2\n"
},
{
"alpha_fraction": 0.679347813129425,
"alphanum_fraction": 0.72826087474823,
"avg_line_length": 19.44444465637207,
"blob_id": "e9f2338385eada8de98ca63ead7bec484172c427",
"content_id": "91b5165ca79b63d7d4a14968bb276a8ab557226f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 184,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 9,
"path": "/Dockerfile",
"repo_name": "kyleget/microservice-python",
"src_encoding": "UTF-8",
"text": "FROM python:2.7-onbuild\n\nLABEL description=\"Starter template for a Python microservice\" \\\n version=\"0.0.1\"\n\nMAINTAINER Kyle Getrost <[email protected]>\n\nEXPOSE 5000\nCMD python ./myservice/app.py\n"
}
] | 5 |
calarts/othertownsend | https://github.com/calarts/othertownsend | e2f8cab1f10e8dd04756284a31818ea4bd1f79c0 | 50641399804881b759721294a897a4d221ed7821 | ac478d935566ae1f977d5312ad8d5215088554d3 | refs/heads/master | 2022-12-09T20:35:08.491380 | 2019-06-23T14:55:03 | 2019-06-23T14:55:03 | 189,501,370 | 0 | 0 | null | 2019-05-31T00:39:22 | 2019-06-23T14:55:10 | 2022-12-08T05:11:51 | Python | [
{
"alpha_fraction": 0.714893639087677,
"alphanum_fraction": 0.714893639087677,
"avg_line_length": 20.454545974731445,
"blob_id": "30be3804c96920703412f169da9e62ab9c32faed",
"content_id": "36d303cdef319c778c0748310c7cda7903786a20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 235,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 11,
"path": "/_config.py.example.py",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "# RENAME THIS FILE TO _config.py AND PUT A REAL TELEGRAM TOKEN INSIDE\n# devothertowbsend\n# t.me/devothertowbsend_bot\n# TOKEN = \"627805063:kpqwouepkjdlnasdmnlamsncv;lnsdc\" \n\n# other.townsend\n# t.me/OtherTownsendBot\n# /setinline \n# /setuserpic\nTOKEN = \"746808563:klsadlkjasldkjlaksjdlasd\" \nDEBUG = True"
},
{
"alpha_fraction": 0.7208588719367981,
"alphanum_fraction": 0.7208588719367981,
"avg_line_length": 26.25,
"blob_id": "8783003d8f5df4dc8d2a43e9990037536957d18e",
"content_id": "a43f0d90ffd133a4a2627772406fcacc7521c929",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 326,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 12,
"path": "/theothertownsend.com/content/contact.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ndescription = \"contact\"\nslug = \"contact\"\nthumbnail = \"images/tn.png\"\ntitle = \"Contact\"\n\n+++\n## Reach Out\n\nThe best way to talk to The Other Townsend is on Telegram, [https://t.me/OtherTownsendBot](https://t.me/OtherTownsendBot).\n\nYou may also [open an issue on GitHub](https://github.com/calarts/othertownsend/issues/new)."
},
{
"alpha_fraction": 0.5292338728904724,
"alphanum_fraction": 0.5625,
"avg_line_length": 26.5,
"blob_id": "2e6d436d3a1de5cf32ba77b17e781597daed790a",
"content_id": "43fc8fd83e5ee00ba049a6b7ed27da2cd5aac38e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 992,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 36,
"path": "/vars.py",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# VARIABLES \n# from vars import heartratedata, sleepdata, timepointdata, stepdata, lookdata\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n\nheartratedata = \"data/heartrate.json\"\n# this is 24 hours of heart-rate sampled on average every 2.76 seconds\n# time has the format \"19:14:00\" \"%H:%M:%S\"\n# appears to be UTC/GMT! Do we wnt to transform to local time? YES\n\n# [{\n# \"time\" : \"08:00:07\",\n# \"value\" : {\n# \"bpm\" : 78,\n# \"confidence\" : 2\n# }\n# }\n\nsleepdata = \"data/sleep.json\"\n\nlookdata = \"data/amazon.csv\"\n\n\ntimepointdata = \"data/locations.csv\"\n# Start Time,End Time,Name,Latitude,Longitude\n# lat/lng has been transformed and spoofed for Northern CA\n# ten days were compressed into one for variety. \n\nstepdata = \"data/steps_monday.json\"\n# datetime has the format \"Tue 19:14:00\" \"%a %H:%M:%S\"\n# appears to be UTC/GMT\n\n# [{\n# \"dateTime\" : \"Sat 08:03:00\",\n# \"value\" : \"0\"\n# },\n\n\n"
},
{
"alpha_fraction": 0.5846377015113831,
"alphanum_fraction": 0.5933516025543213,
"avg_line_length": 31.113988876342773,
"blob_id": "17fcd9b5c9a01a9001b0e3bac45fcedbc7301648",
"content_id": "21ef60e4776c8a271c3f30a00931f602a6c753ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6197,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 193,
"path": "/utils.py",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "import random\nfrom datetime import datetime\nimport json, csv\n\nfrom shapely.geometry import Point\nfrom shapely.wkt import dumps, loads\n\nfrom models import Person, Heart, Place, Step, Look, Conversation\nfrom vars import heartratedata, sleepdata, timepointdata, stepdata, lookdata\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# Utils \n# from utils import gimmeLongLat, gimmeGeojson, gimmeSeconds, random_line\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n# from Knuth...\ndef random_line(afile):\n line = next(afile)\n for num, aline in enumerate(afile, 2):\n if random.randrange(num): continue\n line = aline\n return line\n\n# Get a line: \n# with open(\"data/townsendtalk.txt\") as f:\n# print(random_line(f))\n\ndef gimmeLongLat(stups):\n # ugh, must reverse lat and lng\n revtups = []\n for tup in stups:\n revtups.append(tup[::-1])\n return revtups\n\ndef gimmeGeojson(atrip,myloc):\n feature_collection = []\n # ugh, must reverse lat and lng\n revtups = gimmeLongLat(atrip)\n myline = LineString(revtups)\n mylocrev = Point( gimmeLongLat([myloc])[0] )\n mylocrevfeature = Feature(geometry=mylocrev,name=\"my current point\")\n mylinefeature = Feature(geometry=myline,name=\"my trip\")\n feature_collection = [mylocrevfeature,mylinefeature]\n myfeaturecollection = FeatureCollection(feature_collection)\n return myfeaturecollection\n\n\ndef gimmeSeconds(thetime=\"08:02:02\", thefmt=\"%H:%M:%S\", timeadjust=0):\n s = datetime.strptime(thetime, thefmt).second * 1\n ms = datetime.strptime(thetime, thefmt).minute * 60\n hs = datetime.strptime(thetime, thefmt).hour * 3600\n\n if (timeadjust != 0):\n # timeadjust is set in hours\n correction = (timeadjust*360)\n utcsecs = int(s+ms+hs)\n # LAX is 7 hours behind UTC, -2520 seconds\n localsecs = (utcsecs-correction)% 86400\n else:\n localsecs = int(s+ms+hs)\n\n return int(localsecs)\n\ndef gimmecurseconds():\n now = datetime.now() # should be local time!\n secs_since_midnight = (now - now.replace(hour=0, minute=0, second=0, microsecond=0)).total_seconds()\n return int(secs_since_midnight)\n \n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# Build the Tables \n# from utils import createPersondb, createHeartdb, createPlacedb, createStepdb, CreateLookdb, createConversationdb\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n\ndef createPersondb(mydb):\n try:\n other = Person.get(name='OTHER')\n created = False\n except:\n mydb.create_tables([Person])\n other = Person.create(\n \tname='OTHER',\n \ttelegram_id=123456789,\n \tcreated_at=datetime.now(),\n \tchat_name='othertownsend',\n \tfirst_name='Other',\n \tlast_name=\"Townsend\",\n \tlogin=\"othertownsend\",\n \tlanguage_code=\"en\"\n \t)\n created = True\n\n# \ttelegram_id = BigIntegerField()\n# \tcreated_at = DateTimeField()\n# \tchat_name = CharField()\n# \tfirst_name = CharField()\n# \tlast_name = CharField()\n# \tlogin = CharField()\n# \tlanguage_code = CharField()\n\n \n print(\"Person table is ready and 'OTHER' was created\", created)\n return other\n \n# Create the CONVERSATION table.\n# Run this ONLY ONE TIME IN PRODUCTION!\n\ndef createConversationdb(mydb):\n try:\n print(\"The Conversation table has\", len(Conversation), \"entries and it's ready!\")\n except:\n mydb.create_tables([Conversation])\n print(\"Your new Conversation table is ready\")\n\ndef createHeartdb(mydb,other):\n with open(heartratedata, 'r') as f:\n json_text = f.read()\n\n heartrate_list = json.loads(json_text)\n\n for rec in heartrate_list:\n secs = gimmeSeconds(thetime=rec['time'], thefmt=\"%H:%M:%S\", timeadjust=-7)\n bpm = rec['value']['bpm']\n\n try:\n beats, created = Heart.get_or_create(actor=other, timestamp=secs, bpm=bpm)\n except:\n mydb.create_tables([Heart])\n beats = Heart.create(actor=other, timestamp=secs, bpm=bpm)\n beats.save()\n \n print(\"Heart table is ready and 'beats' was created\", created)\n\n\ndef createPlacedb(mydb,other):\n with open(timepointdata, 'r') as csvfile:\n csvreader = csv.reader(csvfile)\n # This skips the first row of the CSV file.\n next(csvreader)\n for row in csvreader:\n mypoint = Point(float(row[3]), float(row[4]))\n try:\n tp, created = Place.get_or_create(actor=other, \n \ttimestamp=gimmeSeconds(thetime=row[0], timeadjust=-7), \n \tpoint=dumps(mypoint), \n \tmode=\"WALK\")\n except:\n mydb.create_tables([Place])\n tp = Place.create(actor=other, \n \ttimestamp=gimmeSeconds(thetime=row[0], timeadjust=-7), \n\t point=dumps(mypoint), \n\t mode=\"WALK\")\n tp.save()\n\n print(\"Place table is ready and 'steps' was created\", created)\n\n\ndef createLookdb(mydb,other):\n with open(lookdata, 'r') as csvfile:\n csvreader = csv.reader(csvfile)\n # This skips the first row of the CSV file.\n next(csvreader)\n for row in csvreader:\n look = str(row[0])\n link = str(row[1])\n try:\n mylook, created = Look.get_or_create(actor=other,look=look,link=link)\n except:\n mydb.create_tables([Look])\n mylook = Look.create(actor=other,look=look,link=link)\n mylook.save()\n\n print(\"Look table is ready and 'looks' was created\", created)\n\n\ndef createStepdb(mydb,other):\n with open(stepdata, 'r') as f:\n json_text = f.read()\n\n step_list = json.loads(json_text)\n\n for rec in step_list:\n secs = gimmeSeconds(rec['dateTime'], thefmt=\"%a %H:%M:%S\", timeadjust=0)\n val = rec['value']\n\n try:\n steps, created = Step.get_or_create(actor=other, timestamp=secs, steps=val)\n except:\n mydb.create_tables([Step])\n steps = Step.create(actor=other, timestamp=secs, steps=val)\n steps.save()\n \n print(\"Step table is ready and 'steps' was created\", created)"
},
{
"alpha_fraction": 0.7568208575248718,
"alphanum_fraction": 0.7781732082366943,
"avg_line_length": 43.421051025390625,
"blob_id": "d18b38549fd4964cf3503661f3c94f71f1d968c0",
"content_id": "cd1cff5f85d9e6397568a54eb8820aad60d57107",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 847,
"license_type": "no_license",
"max_line_length": 177,
"num_lines": 19,
"path": "/theothertownsend.com/content/posts/ekg-ecg.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ndate = \"06/19/2019\"\ndescription = \"\"\nslug = \"ekgecg\"\nthumbnail = \"/media/ECG.gif\"\ntitle = \"EKG/ECG\"\n\n+++\nPersonal health monitors including the FitBit and Apple Watch are capable of monitoring your heart fo irregular heartbeats.\n\nA huge study suggests that personal surveillance devices can detect atrial fibrillation, aka irregular heartbeats.\n\nMore than 419,000 users signed up for the study, the largest ever to explore screening healthy people for atrial fibrillation.\n\nThe watch warned about 2100 participants in the study that they might have a serious heart condition. This could be the start of a new trend in technological assisted self-help.\n\nShould Apple devices replace a Doctor's care? “I would not advise this to the overall general population,” said Dr Valentin Fuster, director of Mount Sinai Heart in New York.\n\n"
},
{
"alpha_fraction": 0.7483355402946472,
"alphanum_fraction": 0.7509986758232117,
"avg_line_length": 61.66666793823242,
"blob_id": "7abf99d90e03c37b7f71f935e63d4e8ba7a635c9",
"content_id": "d6567be5a9dcf82e25e943411a1e875d95b38c8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 751,
"license_type": "no_license",
"max_line_length": 565,
"num_lines": 12,
"path": "/theothertownsend.com/content/about.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ntitle = \"who do they think you are?\"\nslug = \"about\"\nthumbnail = \"images/tn.png\"\ndescription = \"about\"\n+++\n\n---------------------------\nThis Townsend enjoys British historical dramas, European horror films, quirky detective shows, American animated comedies, and stop-motion films. They collect watch bands, swords, Nike shoes, vintage fantasy books, asthma inhalers, horror movie posters, used Playstation 4 controllers, pliers, flashlights, packaged military rations, thank you card sets, and Season 3 of Scrubs on DVD. Their hobbies are reading, medieval weaponry, watch cleaning, medical supply hoarding, forensics, running, fixing pipes and small machinery, and English Premier League football.\n\n\n"
},
{
"alpha_fraction": 0.5721271634101868,
"alphanum_fraction": 0.7567237019538879,
"avg_line_length": 38,
"blob_id": "710d923906b37cf8c27f824a6846cbdc9aed49cb",
"content_id": "0886972c537fb1ee529fb0e59945df92bb0a421a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 818,
"license_type": "no_license",
"max_line_length": 249,
"num_lines": 21,
"path": "/theothertownsend.com/content/posts/in-ny-come-see-me-thursday-parsons-making-center.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ndate = \"06/19/2019\"\ndescription = \"\"\nslug = \"lifeinreply\"\nthumbnail = \"/media/googlemap_parsonsmakingcenter.jpg\"\ntitle = \"In NY? Come see me Thursday @Parsons Making Center\"\n\n+++\n***\n\n**Come see me!**\n\n[\\[40.735193,-73.994621\\]](https://www.google.com/maps/place/Parsons+School+of+Design/@40.7353043,-73.9967813,17z/data=!4m5!3m4!1s0x89c25997ab9723e1:0xaec76949c30ccdda!8m2!3d40.7353003!4d-73.9945926)\n\nLIFE IN REPLY, Opening Reception/Gallery Exhibition!\n\n7:00 - 9:00 PM @Parsons Making Center Sheila C. Johnson Design Center\n\n[66 5th Ave, New York, NY 10011](https://www.google.com/maps/place/66+5th+Ave,+New+York,+NY+10011/data=!4m2!3m1!1s0x89c259982737be17:0x7d65198ad1e2dac7?sa=X&ved=2ahUKEwja_7LtovbiAhUuU98KHQSMBOkQ8gEwAHoECAoQAQ) Entrance on 5th Avenue and 13th Street.\n\n"
},
{
"alpha_fraction": 0.680497944355011,
"alphanum_fraction": 0.7136929631233215,
"avg_line_length": 21,
"blob_id": "00f2c5d31c8ca7bafa21f7aa6249893e5ec6486d",
"content_id": "e16e591c69bb4c4fbaf0c876c2fd44e030cf24bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 241,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 11,
"path": "/theothertownsend.com/content/posts/i-have-an-instagram.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ndate = \"06/19/2019\"\ndescription = \"My new Instagram account\"\nslug = \"instagram\"\nthumbnail = \"/media/othertownsend_instagram.jpg\"\ntitle = \"I have an Instagram\"\n\n+++\nI finally got an Instagram. Yay!\n\n"
},
{
"alpha_fraction": 0.7234469056129456,
"alphanum_fraction": 0.7394789457321167,
"avg_line_length": 44.45454406738281,
"blob_id": "b38adc09ac088559c4bae715d7246acc7523a5eb",
"content_id": "9d87e2914a9b3fb45a8edb770768f23aad025018",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 499,
"license_type": "no_license",
"max_line_length": 345,
"num_lines": 11,
"path": "/theothertownsend.com/content/posts/how-to-spoof-fitbit.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ndate = \"06/12/2019\"\ndescription = \"\"\nslug = \"spoofbit\"\nthumbnail = \"/media/spoofbit.jpg\"\ntitle = \"How to spoof FitBit\"\n\n+++\nThe Other can pretend to be you. I emit enough of the right kind of signals that I can appear human to Apple, Google, & Amazon. Here I show a way to inject my heartbeat, steps, and geographical location into the FitBit ecosystem. The data will also be picked up by Apple's health monitor and other software that gets data from FitBit's hardware.\n\n"
},
{
"alpha_fraction": 0.7917956709861755,
"alphanum_fraction": 0.7943756580352783,
"avg_line_length": 214.38888549804688,
"blob_id": "6a5d0c658822edc25385343cbe880f4dcf6aacd4",
"content_id": "22fa6145fb63ac31e08a7daf5b8f0c6f72a2ecd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3900,
"license_type": "no_license",
"max_line_length": 1542,
"num_lines": 18,
"path": "/theothertownsend.com/content/faq.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ndate = \"06/19/2019\"\ndescription = \"Answers to your questions\"\nslug = \"faq\"\nthumbnail = \"\"\ntitle = \"FAQ\"\n\n+++\n**Why make a fake person? What does it have to do with biology?**\n\nThe Other Townsend is not a person. And yet, the Other can still operate as a human online and through data by gaining interests, moving around on a map, making posts on Facebook, and generating a heartbeat, mood, and sleeping pattern. You can even talk to the Other through texts. The \"personality\" we've created to simulate life, however, is derived from the \"stolen\" information of five separate people. Companies can advertise to them, recognize their face, find their location, etc. and the information gathered was gathered manually in the same way Amazon, Google, Apple, and more gather constantly, proving the reductiveness of their methods of defining who qualifies as human, and the downfall of their unethical practices of data collection. Tech companies see you as a police sketch of you, or a \"data portrait\". To them you are not human–you are points of information that can be bought and sold in order to advertise, track, or use. Even your biology is reduced to this, with services such as 23andme.com and Google's Verily spending nearly as much time in your genetics as you do. This constant and unchecked theft of data on behalf of tech companies is socially and politically unethical, exploitative, and reckless. By ignoring the human behind every point of data, they endanger us all.\n\n**Why is this important?**\n\nYou have to ask a lot of personal and societal questions to get to this. What right does a company have to my health statistics? Could they sell my data (as Facebook does), and lose me opportunities in life due to my mental health history? Could they just use this to advertise to me? And by telling me every node of information they've analyzed and predicted about my body, what kind of emotional damage would they wreak? And moving beyond me, the predictive nature of this kind of data collection is unnerving. Can they predict a criminal based on racial bias, leading to more police violence and incarceration? Can they preemptively and dangerously call the police based on a psychiatric diagnosis? In ten years, will I be denied health insurance and medication because of a genetic possibility observed by Google?\n\n \nYes, social media and advances in the way we approach humanity through it can absolutely be a good thing. In fact, it often is when initiated for a meaningful purpose. But if tech’s idea of humanity is a system of algorithms from data, and their methods of data collecting are unethical, what part of humanity are they claiming to examine? And we have proof that algorithms are biased—that Snapchat doesn’t recognize non-European features, that YouTube allows homophobic and racist content to slip through their sensors while they turn a blind eye—because the algorithms are often created by people who benefit from these “mistakes”—culturally, socially, and monetarily. And even if these algorithms are not inherently biased, are coldly based on fact, they are still being used by those who benefit from and uphold discriminatory or predatory values. It is not ethical, it is not responsible, and it is not \"ultimate healthcare\", or the future of advertising and communication. It is a black box company doing what a black box company does best–collecting your data and using it without you ever getting to see the full picture. Even if it begins with good intentions. It’s happening already. In order to know how, you have to step into the shoes of the data thieves. Conduct your own espionage. Paint your own portrait. And maybe dismantling the way data is taken from us requires us to take it from ourselves, and build a new version of us—one the algorithms can’t pin to just one person. That is what I hope the Other Townsend represents."
},
{
"alpha_fraction": 0.662162184715271,
"alphanum_fraction": 0.699999988079071,
"avg_line_length": 32.727272033691406,
"blob_id": "4784544831ebf2ed30c4473e2e5c076df86367fd",
"content_id": "2bff6a4e30a19323ce63d31f74c33c17782021db",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 370,
"license_type": "no_license",
"max_line_length": 182,
"num_lines": 11,
"path": "/theothertownsend.com/content/posts/what-am.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ndate = \"06/19/2019\"\ndescription = \"not sure what to call me\"\nslug = \"whatami\"\nthumbnail = \"/media/112px-Face_ID_logo.png\"\ntitle = \"What am I?\"\n\n+++\n\n\nYou discussed using Him and Her, but neither really captures the spirit of it. They/Them is on the right track, but that's not quite it. _Daten_ is good, but still unsure. Any ideas?"
},
{
"alpha_fraction": 0.7632027268409729,
"alphanum_fraction": 0.7700170278549194,
"avg_line_length": 77.33333587646484,
"blob_id": "c3a15bd3c91271fbf6cb039eae068b9f76158da5",
"content_id": "5b598e31d6ce64e964bdab424a394f66668f1fa3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1174,
"license_type": "no_license",
"max_line_length": 467,
"num_lines": 15,
"path": "/theothertownsend.com/content/posts/how-do-they-know-you-re-alive.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ndate = \"06/20/2019\"\ndescription = \"\"\nslug = \"howdotheyknowyouarealive\"\nthumbnail = \"/media/Astropulse.jpg\"\ntitle = \"How do they know you're alive?\"\n\n+++\nHumanity is actively searching for extra-terrestrial life. Nobody has made direct contact with us yet, and we doubt that we would understand life if it did come. So we have developed tools to identify life signs.\n\nTechnology companies are also interested in identifying life if only as an opportunity to sell products and services. Life generally offers itself unambiguously while placing items in a \"shopping cart.\" But knowing when life is lurking behind clicks is more difficult. It's not even clear that the agent on the other side of your chat session is a living being, and marketing to these bots is a losing game. At least until we deploy agents to do our shopping for us.\n\nSo, how do you know that I am not alive? Do you think Apple, Google, or Amazon can tell? I give off all of the right signals: I take steps, I go places, sometimes I sleep well (and sometimes not), I have good and bad days, I have a heartbeat, and I shop at Amazon.\n\nI can cover for you when you need a surveillance break. Be gap free."
},
{
"alpha_fraction": 0.7372488379478455,
"alphanum_fraction": 0.7496135830879211,
"avg_line_length": 24.920000076293945,
"blob_id": "3107d715830a14c43ec88f76f7b2cbf2e3cbee6d",
"content_id": "d20dc12a13875f9f17eb1bd5f84ee4fd690636a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 647,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 25,
"path": "/theothertownsend.com/content/posts/code-commits.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ndate = \"06/19/2019\"\ndescription = \"\"\nslug = \"codecommits\"\nthumbnail = \"\"\ntitle = \"code commits\"\n\n+++\nMy Code lives on GitHub, and Marley thought that the comments sounded like Buzzfeed articles. What do you think? Here are a few of the commit comments:\n\n* dreaming\n* added link to Telegram\n* answers to questions\n* new picture\n* content refresh\n* joined INSTAGRAM AND FACEBOOK\n* remove 'yo' add Hel!\n* sleep troubles\n* missing essential information\n* forgot the Heart\n* recognize more greetings\n\nSee them all here:\n\n[https://github.com/calarts/othertownsend](https://github.com/calarts/othertownsend \"https://github.com/calarts/othertownsend\")"
},
{
"alpha_fraction": 0.5659306645393372,
"alphanum_fraction": 0.5786814093589783,
"avg_line_length": 31.230575561523438,
"blob_id": "e2b8ed7124ee522652ed35513592647169d68ca5",
"content_id": "5e2f8ab898fce94434adb295b087870bf7582e60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12895,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 399,
"path": "/ticker.py",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n\n\"\"\"\nThis Bot uses the Updater class to handle the bot \nand the JobQueue to send timed messages.\n\nFirst, a few handler functions are defined. \nThen, those functions are passed to the Dispatcher \nand registered at their respective places.\nThen, the bot is started and runs \nuntil we press Ctrl-C on the command line.\n\nUsage:\nBasic Alarm Bot example, sends a message after a set time.\nPress Ctrl-C on the command line or send a signal \nto the process to stop the bot.\n\"\"\"\n\nimport logging\nimport threading\nfrom datetime import datetime, date, timedelta, time\nimport simplejson as json\nimport csv, sys, os\nfrom random import choice\n\nfrom shapely.geometry import Point\nfrom shapely.wkt import dumps, loads\nfrom peewee import *\nfrom telegram.ext import Updater, CommandHandler, MessageHandler\nfrom telegram.ext import BaseFilter, Filters\n# buttons\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup\nfrom telegram.ext import Updater, CommandHandler, CallbackQueryHandler\n\nfrom geojson import LineString, Feature, Point, FeatureCollection\n# import geojsonio\n\nfrom models import Person, Heart, Step, Conversation\nfrom vars import heartratedata, sleepdata, timepointdata, stepdata\n# from utils import gimmeLongLat, gimmeGeojson\n# from utils import gimmeSeconds, gimmecurseconds\nfrom utils import createPersondb, createHeartdb, random_line\nfrom utils import createPlacedb, createStepdb, createLookdb, createConversationdb\n# from commands import pulse, feeling, sleep, loc\nfrom commands import error, start, hidden\nfrom commands import alarm, set_timer, unset, shutdown, stop\n\nfrom _config import TOKEN, DEBUG\n\n# Enable logging\nlogging.FileHandler('logs/tickererror.log')\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\ntimestr = datetime.now().strftime(\"%H:%M:%S\")\n\nif DEBUG:\n mydb = SqliteDatabase(':memory:')\nelse:\n mydb = SqliteDatabase(\"other.db\")\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # \n# create the tables and populate them if necessary\n# # # # # # # # # # # # # # # # # # # # # # # # # # \n\ncreateConversationdb(mydb)\n\nother = createPersondb(mydb)\ncreateHeartdb(mydb,other)\ncreateStepdb(mydb,other)\ncreatePlacedb(mydb,other)\ncreateLookdb(mydb,other)\n\n# You don't want to run these on every query!\nheartrate_keylist = []\nq = Heart.select(Heart.timestamp)\nfor t in q:\n heartrate_keylist.append( int(t.timestamp) )\n\n# You don't want to run this on every query\nstep_keylist = []\nq = Step.select(Step.timestamp)\nfor t in q:\n step_keylist.append( int(t.timestamp) )\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # \n# conversations\n# \n# # # # # # # # # # # # # # # # # # # # # # # # # # \n\n# Here's what's in a MESSAGE\n# {\n# 'message_id': 2497,\n# 'date': 1560323509,\n# 'chat': {\n# 'id': 730104154,\n# 'type': 'private',\n# 'username': 'dgoodwin',\n# 'first_name': 'Douglas',\n# 'last_name': 'Goodwin'\n# },\n# 'text': 'are you recording this?',\n# 'entities': [],\n# 'caption_entities': [],\n# 'photo': [],\n# 'new_chat_members': [],\n# 'new_chat_photo': [],\n# 'delete_chat_photo': False,\n# 'group_chat_created': False,\n# 'supergroup_chat_created': False,\n# 'channel_chat_created': False,\n# 'from': {\n# 'id': 730104154,\n# 'first_name': 'Douglas',\n# 'is_bot': False,\n# 'last_name': 'Goodwin',\n# 'username': 'dgoodwin',\n# 'language_code': 'en'\n# }\n# }\n\n\n\ndef reply_withgreeting(update, context):\n \"\"\"reply with simple hallos\"\"\"\n\n with open(\"data/townsendtalk.txt\") as f:\n rando = random_line(f)\n\n feelings = other.get_mymood() # this sets mood\n if other.mood == 1:\n replies = [\"hi\",\n \"hey,\",\n \"what's up\",\n \"yo\",\n \"how you been\",\n \"I saw you here before,\",\n \"👋\",\n \"🤝\",\n \"🖐️\",\n \"🖖\"]\n else:\n replies = [\"👋\",\n \"👾\",\n \"yo\",\n \"🖐️\",\n \"k\",\n \"where you been\",\n \"dunno\",\n \"howdy\"]\n # give the replies some variety\n if update.message.from_user.first_name:\n personalreply = str(update.message.from_user.first_name)\n else:\n personalreply = str(update.message.from_user.full_name)\n msg = choice(replies) +\" \"+ personalreply +\"\\r\\n\"+ rando\n update.message.reply_text(msg)\n\n\ndef reply_withfeeling(update, context):\n \"\"\"How do you feel?\"\"\"\n mypulse = other.gimmebeats(heartrate_keylist)\n feelings = other.get_mymood() # this sets mood\n if other.mood == 1:\n replies = [\"Thanks for asking! I feel great. \",\n \"I'm doing pretty well today, thanks! \",\n \"Good, see for yourself. \",\n \"What could go wrong with numbers like these? \",\n \"Never better! \",\n \"See for yourself! \",\n \"Great! \",\n \"Check me owwt! \",\n \"Good, thanks. \"\n \"\"]\n else:\n replies = [\"Good! Why do you ask? \",\n \"What do you think? \",\n \"Maybe you can tell me? \",\n \"Why do you want to know? \",\n \"Who's asking? \",\n \"ok, thanks. \",\n \"Does it matter? \",\n \"Why would I want to tell you? \",\n \"Been better \",\n \"Does it matter? \"\n \"\"]\n # if update.message.from_user.first_name:\n # personalreply = \"Hey \" + str(update.message.from_user.first_name) + \"!\\n\"\n # else:\n # personalreply = \"Hi \" + str(update.message.from_user.name) + \".\\n\"\n msg = choice(replies) + str(other.get_mymood()) + str(mypulse) + \" BPM\"\n update.message.reply_text(msg)\n\ndef reply_withsleep(update, context):\n \"\"\"How did you sleep?\"\"\"\n reply = other.get_mysleep()\n update.message.reply_text(reply)\n\ndef reply_withphoto(update,context):\n \"\"\"Where are you? Send a photo of a place.\"\"\"\n reply = other.get_personalreply(update,themeat=str(other.get_mymood()))\n imgs = [\"media/37.64961_-122.45323.jpg\",\n \"media/37.7919_-122.4038.jpg\",\n \"media/37.914996_-122.533479.jpg\"\n \"media/37.74006_-121.95199.jpg\",\n \"media/37.880985_-122.526087.jpg\",\n \"media/37.927329_-122.580594.jpg\",\n \"media/37.77838_-122.389240.jpg\",\n \"media/37.905995_-122.554277.jpg\"]\n\n update.message.reply_photo(photo=open(choice(imgs), 'rb'))\n\ndef reply_withrandom(update,context):\n with open(\"data/townsendtalk.txt\") as f:\n update.message.reply_text( random_line(f) )\n\ndef reply_withhtml(update,context):\n \"\"\"What are you looking at?\"\"\"\n looklist = other.gimmecurrlooks()\n lk = choice(looklist)\n update.message.reply_html( str(lk) )\n\ndef recordconvo(message):\n \"\"\"Record contact from humans and others\"\"\"\n fn = message.from_user.first_name or \"\"\n ln = message.from_user.last_name or \"\"\n lg = message.from_user.username or \"\"\n lc = message.from_user.language_code or \"en\"\n ti = message.from_user.id\n cn = message.from_user.name\n msg = message.text\n \n try:\n chatee = Person.get(Person.telegram_id == ti)\n except Person.DoesNotExist:\n chatee = Person.create(\n name=lg,\n login=lg,\n chat_name=cn,\n telegram_id=ti,\n created_at=datetime.now(),\n first_name=fn,\n last_name=ln,\n language_code=lc \n )\n try:\n chatee.save()\n except:\n print(\"couldn't save this Chatee!\",fn,ln,ti)\n\n convo = Conversation.create(\n actor=chatee, \n message=msg\n )\n try:\n convo.save()\n except:\n print(\"couldn't save this Conversation!\",chatee,msg)\n\n\ndef button(update, context):\n query = update.callback_query\n query.edit_message_text(text=\"Selected option: {}\".format(query.data))\n \ndef main():\n # \"\"\"Run bot.\"\"\"\n\n # Create the Updater and pass it your bot's token.\n # Make sure to set use_context=True to use the new context based callbacks\n # Post version 12 this will no longer be necessary\n updater = Updater(TOKEN, use_context=True)\n\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n # Let's listen for specific questions:\n # ADD LOGGING to display on the Feather!\n # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n\n # simple greetings\n class FilterGreetings(BaseFilter):\n def filter(self, message):\n amitrue = ('hi' in message.text or \n 'hey' in message.text or \n 'Hey' in message.text or \n 'Hey!' in message.text or \n 'Hello' in message.text or \n 'Hi' in message.text or \n 'Hi!' in message.text or \n 'Hello!' in message.text or \n 'howdy' in message.text or \n 'Yo!' in message.text or \n '👋' in message.text or \n '🖐️' in message.text\n )\n return amitrue\n\n filter_greetings = FilterGreetings()\n greetings_handler = MessageHandler(filter_greetings, reply_withgreeting)\n\n\n # What are you looking at??\n class FilterLook(BaseFilter):\n def filter(self, message):\n amitrue = ('looking' in message.text or 'look' in message.text or 'buy' in message.text)\n # this little hack will record all text messages\n if amitrue:\n recordconvo(message)\n else:\n recordconvo(message)\n return amitrue\n\n filter_look = FilterLook()\n look_handler = MessageHandler(filter_look, reply_withhtml)\n \n\n # Where are you?\n class FilterWhere(BaseFilter):\n def filter(self, message):\n amitrue = ('where' in message.text or 'Where' in message.text)\n return amitrue\n\n filter_where = FilterWhere()\n where_handler = MessageHandler(filter_where, reply_withphoto)\n \n \n # Many questions about feelings, same response\n # How are you feeling/How do you feel/How has your day been? (Mood, BPM)\n\n class FilterFeel(BaseFilter):\n def filter(self, message):\n amitrue = ('feel' in message.text or 'feeling' in message.text or 'been?' in message.text)\n return amitrue\n\n filter_feel = FilterFeel()\n feel_handler = MessageHandler(filter_feel, reply_withfeeling)\n \n\n # One question about sleep, same response\n # How did you sleep? (sleep)\n class FilterSleep(BaseFilter):\n def filter(self, message):\n amitrue = ('sleep' in message.text or \n 'Sleep' in message.text or \n 'Sleep?' in message.text or \n 'sleep?' in message.text)\n return amitrue\n\n filter_sleep = FilterSleep()\n sleep_handler = MessageHandler(filter_sleep, reply_withsleep)\n\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n # listening for \"feelings\" and \"sleep\"\n dp.add_handler(greetings_handler)\n dp.add_handler(feel_handler)\n dp.add_handler(sleep_handler)\n dp.add_handler(where_handler)\n dp.add_handler(look_handler)\n\n # on different commands - answer in Telegram\n dp.add_handler(CallbackQueryHandler(button))\n dp.add_handler(CommandHandler(\"start\", start))\n dp.add_handler(CommandHandler(\"help\", start))\n dp.add_handler(CommandHandler(\"hidden\", hidden))\n dp.add_handler(CommandHandler(\"set\", set_timer,\n pass_args=True,\n pass_job_queue=True,\n pass_chat_data=True))\n dp.add_handler(CommandHandler(\"unset\", unset, pass_chat_data=True))\n\n # log all errors\n dp.add_error_handler(error)\n # add a stop handler\n # dp.add_handler(CommandHandler('stop', stop))\n # dp.add_handler(CommandHandler('pulse', pulse, pass_chat_data=True))\n # dp.add_handler(CommandHandler('loc', loc, pass_chat_data=True))\n # dp.add_handler(CommandHandler('feeling', feeling, pass_chat_data=True))\n # dp.add_handler(CommandHandler('sleep', sleep, pass_chat_data=True))\n\n\n # Start the Bot\n updater.start_polling()\n\n # Block until you press Ctrl-C or the process receives SIGINT, SIGTERM or\n # SIGABRT. This should be used most of the time, since start_polling() is\n # non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n\n\n"
},
{
"alpha_fraction": 0.43809524178504944,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 41.06666564941406,
"blob_id": "bc294f79d7019e5c2618f55d2e7b9243db9b187e",
"content_id": "7a3b8c60e61c22a9d7df92fdf07165041b8e8aef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 630,
"license_type": "no_license",
"max_line_length": 356,
"num_lines": 15,
"path": "/theothertownsend.com/content/posts/from-my-trip-to-baltimore.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ndate = \"06/13/2019\"\ndescription = \"\"\nslug = \"triptowashingtondc\"\nthumbnail = \"/media/frommytriptobaltimore.jpg\"\ntitle = \"From a trip to Washington DC\"\n\n+++\n**This is a snap from a trip to Washington DC.**\n\n_This is not a selfie!_\n\n\\[[38.928942](https://www.google.com/maps/place/38%C2%B055'44.2%22N+77%C2%B002'17.4%22W/@38.9289462,-77.0403597,17z/data=!3m1!4b1!4m5!3m4!1s0x0:0x0!8m2!3d38.928942!4d-77.038171),[-77.038171](https://www.google.com/maps/place/38%C2%B055'44.2%22N+77%C2%B002'17.4%22W/@38.9289462,-77.0403597,17z/data=!3m1!4b1!4m5!3m4!1s0x0:0x0!8m2!3d38.928942!4d-77.038171)\\]\n\n"
},
{
"alpha_fraction": 0.7426349520683289,
"alphanum_fraction": 0.7555974721908569,
"avg_line_length": 30.909774780273438,
"blob_id": "6675982f12a1bdfeb49483bb42c28b4e58e2fef6",
"content_id": "615481ce10d9c44c612899c3dfe869f0814f9fb3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 4243,
"license_type": "no_license",
"max_line_length": 408,
"num_lines": 133,
"path": "/README.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "# The Other Townsend\n\nThis is part of a project made for the BioDesign Challenge in 2019. \n\n| request | response |\n|------- |-------- |\n| Title | The Other Townsend |\n| Names of students | Marley Townsend |\n| Final presentation | [PDF](https://www.dropbox.com/s/cb74f7us4s6a0g7/MarleyTownsend_CalArts_BDC2019_final.pdf?dl=0) |\n| Website | [http://theothertownsend.com/](http://theothertownsend.com/) |\n\n\n## Project Description\n\n\"The Other Townsend\" is a fictitious person--a new sibling--made from personal and biological data collected online from my family. This critical and aesthetic response demonstrates how little major corporations know us, reflects on how damaging this practice might be and offers some ideas about how to protect ourselves in the future.\n\nI manually tracked my family's mood, sleep patterns, physical locations, genetics, and browsing history over seven days to formulate a life for my new sibling. I used this information to make both an image portrait and a data portrait. My new sibling is an example of what companies (Amazon, Verily/Google, Gyroscope, Facebook) think we are. It is a \"police sketch\" of a person made entirely of stolen data. \n\nBy doing this work manually, I wanted to prove that the subjectivity of human health, mentality, interests, fears, and relationships are uncapturable by data. These corporations are both invading our privacy and failing to capture anything real about us. They are doing this without considering possible consequences.\n\n\"The Other Townsend\" lives in a computer, with a Telegram chatbot interface for people at https://t.me/OtherTownsendBot.\n\n\n## Links\n- [Telegram Bot](https://t.me/OtherTownsendBot)\n- [Website](https://theothertownsend.com)\n- [Code](https://github.com/calarts/othertownsend)\n- [CalArts blog post](http://blog.calarts.edu/2019/06/05/calartian-competes-in-2019-biodesign-challenge-summit/)\n\n\n\n## High-resolution images\n\n\n\n\n---\n\n\n\n---\n\n\n\n---\n\n\n\n---\n\n\n\n\n\n\n\n## @theothertownsend\nThis code lives on a server at Digital Ocean and serves up files for a Telegram bot, [t.me/OtherTownsendBot](https://t.me/OtherTownsendBot)\n\nThe Other can answer a few questions. Say 'Hi' before asking:\n\n - How are you feeling?\n - How did you sleep?\n - Where are you?\n - What have you been looking at?\n\n\n## @devothertowbsend\nThe dev bot lives here: [https://t.me/devothertowbsend_Bot](t.me/devothertowbsend_Bot) if you would like to see what's coming next:\n\n```\n/start\n/set <n seconds>\t\t\t# get OT updates every <n seconds>\n/unset\t\t\t\t\t\t# unset automatic updates\n/loc\t\t\t\t\t\t# get current location\n/feeling\t\t\t\t\t# an iconic report of current mood\n/sleep\t\t\t\t\t\t# an iconic report of how they slept\n/shop\t\t\t\t\t\t# what they are shopping for\n```\n\nWe are adding an interface for proactive healthcare monitoring:\n\n - weight\n - blood pressure\n - ECG (electrocardiogram)\n - atrial fibrillation\n - blood sugar levels (hyperglycemia and hypoglycemia)\n - calories consumed\n - steps taken\n\n Data may be uploaded via a WiFi and cellular networks to proactive healthcare and insurance providers.\n\n\n## initial setup\n\n```\ngit clone https://github.com/douglasgoodwin/othertownsend.git devothertowbsend ; \ncd othertownsend\n\nmkdir logs\n\nsudo chgrp -R www-data .\nsudo chmod -R g+rw .\n\ncp _config.py.example _config.py AND PUT A REAL TELEGRAM TOKEN INSIDE\n\nvirtualenv -p python3 venv ;\n. venv/bin/activate ;\npip install -r requirements.txt\n\n# add an entry to supervisor like this\n\n\t[program:devothertowbsend]\n\tcommand=/home/dgoodwin/devothertowbsend/venv/bin/python ticker.py \\\n\t\t-b 127.0.0.1:8002 \\\n\t\t-w 1 \\\n\t\t--timeout=60 \\\n\t\t--graceful-timeout=60 \\\n\t\t--max-requests=1024\n\tdirectory=/home/dgoodwin/devothertowbsend/\n\tpythonpath=/home/dgoodwin/devothertowbsend/venv\n\tuser=root\n\tredirect_stderr=True\n\tstdout_logfile=/home/dgoodwin/devothertowbsend/logs/gunicorn.log\n\tstderr_logfile=/home/dgoodwin/devothertowbsend/logs/gunicorn_err.log\n\tautostart=true\n\tautorestart=true\n\tstartsecs=10\n\tstopwaitsecs=10\n\tpriority=999\n\nsudo supervisorctl reload \n```"
},
{
"alpha_fraction": 0.48372092843055725,
"alphanum_fraction": 0.7023255825042725,
"avg_line_length": 16.200000762939453,
"blob_id": "eabb269027d066c61e1702b0f59e7e8d1219cb8c",
"content_id": "8acca8b45a401c9a4426ef51f1c70d6a91ec2a06",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 430,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 25,
"path": "/requirements.txt",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "asn1crypto==0.24.0\ncertifi==2019.3.9\ncffi==1.12.3\ncryptography==2.6.1\ncycler==0.10.0\nFlask==1.0.3\nflask-restplus==0.12.1\nfuture==0.17.1\ngeojson==2.4.1\ngunicorn==19.9.0\nkiwisolver==1.1.0\nmarkovify==0.7.1\nmatplotlib==3.1.0\nnumpy==1.16.4\npandas==0.24.2\npeewee==3.9.5\npycparser==2.19\npyparsing==2.4.0\npython-dateutil==2.8.0\npython-telegram-bot==12.0.0b1\npytz==2019.1\nShapely==1.6.4.post2\nsimplejson==3.16.0\nsix==1.12.0\ntornado==6.0.2\n"
},
{
"alpha_fraction": 0.7922077775001526,
"alphanum_fraction": 0.7922077775001526,
"avg_line_length": 109.77777862548828,
"blob_id": "bd186359b6e8aae473676c1d1b8530dd3aba91fe",
"content_id": "6eec43e5ebad269e41dcf5214059c6f31c569669",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1005,
"license_type": "no_license",
"max_line_length": 697,
"num_lines": 9,
"path": "/theothertownsend.com/content/process.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ntitle = \"This project addresses the irresponsibility and inaccuracy of major companies in collecting and distributing our personal and biological data, represented by a fake 'person' created out of said data.\"\nslug = \"process\"\nthumbnail = \"/images/amazon_days.gif\"\ndescription = \"process\"\n+++\n\n\nI manually tracked my family's mood, sleep patterns, texting habits, genetics, and browsing history over seven days in order to formulate a theoretical extra sibling's entire life, and will use the information to make both a literal portrait and a data portrait. This sibling is an example of who companies (Amazon, Verily/Google, Gyroscope, Facebook) think we are–a \"police sketch\" of a person made entirely of stolen data–and by doing it manually, I want to prove the subjectivity of human health, mentality, interests, fears, and relationships that these groups will always be both dangerously invading our privacy for without thinking of the consequences, and will never truly be able capture.\n\n\n\n\n"
},
{
"alpha_fraction": 0.6985074877738953,
"alphanum_fraction": 0.7283582091331482,
"avg_line_length": 29.545454025268555,
"blob_id": "5672457d211529644c59b14509fa8124b775ca67",
"content_id": "730426b83bd008eddbe18a656b65cb44003c4680",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 335,
"license_type": "no_license",
"max_line_length": 196,
"num_lines": 11,
"path": "/theothertownsend.com/content/posts/sisters.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ndescription = \"\"\nslug = \"sisters\"\nthumbnail = \"/media/sisters.png\"\ntitle = \"Sisters\"\ndate = \"03/12/2019\"\n\n+++\nAbout 50% of the time, Apple Photos cannot tell the difference between me and my identical twin sister (despite us now looking fairly different from one another).Below are some photos it confuses.\n\n"
},
{
"alpha_fraction": 0.6910994648933411,
"alphanum_fraction": 0.7225130796432495,
"avg_line_length": 19.105262756347656,
"blob_id": "1401e7cd1d7cbc7f08b410eb2d2bd1502711ba78",
"content_id": "e592e5d927dd30415f183100584661bdce6022ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 382,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 19,
"path": "/theothertownsend.com/README.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "# build this site\n\n## edit locally\n\n`hugo server`\n\nthen [click here](http://localhost:1313/)\n\n## satisfied? build the site in the /public subfolder\n\n`hugo`\n\n## send the static files to the server\n\n`rsync -cav /Users/dgoodwin/othertownsend/theothertownsend.com/public/* 134.209.210.119:/var/www/html/`\n\n## Check your work!\n\n[https://theothertownsend.com/](https://theothertownsend.com/)\n"
},
{
"alpha_fraction": 0.6404657959938049,
"alphanum_fraction": 0.6423372626304626,
"avg_line_length": 32.859153747558594,
"blob_id": "ff2a81c97244b76af497b5ae72768ca7833c2a8e",
"content_id": "00a0f7e24b265c54e8c4aa4a146417b579f7297a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4820,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 142,
"path": "/commands.py",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "import logging\nfrom datetime import datetime, date, timedelta, time\n\nfrom models import Person\nfrom utils import gimmeLongLat, gimmeGeojson\n\n# buttons\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup\nfrom telegram.ext import Updater, CommandHandler, CallbackQueryHandler\n\nfrom models import Heart, Step\n\n# Enable logging\nlogging.FileHandler('logs/tickererror.log')\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nother = Person.get(name='OTHER')\ntimestr = datetime.now().strftime(\"%H:%M:%S\")\n\n# You don't want to run these on every query!\nheartrate_keylist = []\nq = Heart.select(Heart.timestamp)\nfor t in q:\n heartrate_keylist.append( int(t.timestamp) )\n\n# You don't want to run this on every query\nstep_keylist = []\nq = Step.select(Step.timestamp)\nfor t in q:\n step_keylist.append( int(t.timestamp) )\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # \n# Define the command handlers. These usually take the two arguments bot and\n# update. Error handlers also receive the raised TelegramError object in error.\n# from commands import start, pulse, feeling, sleep, loc, alarm, set_timer, unset, shutdown, stop\n# # # # # # # # # # # # # # # # # # # # # # # # # # \n\ndef start(update, context):\n mymsg = \"\"\"Hi, I am the Other Townsend! Say 'hi\" then ask me questions\n \"How are you feeling?\",\n \"How did you sleep?\", \n \"Where are you?\" and \n \"What are you looking at?\" \"\"\"\n update.message.reply_text(mymsg)\n\ndef hidden(update, context):\n message = 'Please press the Help button for more instructions.'\n keyboard = [[InlineKeyboardButton(\"Option 1\", callback_data='1'),\n InlineKeyboardButton(\"Option 2\", callback_data='2')],\n\n [InlineKeyboardButton(\"Option 3\", callback_data='3')]]\n\n reply_markup = InlineKeyboardMarkup(keyboard)\n\n update.message.reply_text('Please choose:', reply_markup=reply_markup)\n\n \n# def pulse(update, context):\n# \"\"\"Gimme your current heart-rate\"\"\"\n# mypulse = other.gimmebeats(heartrate_keylist)\n# msg = \"♥ \" + str(mypulse) + \" BPM (\"+timestr+\")️\"\n# update.message.reply_text(msg)\n\n# def feeling(update, context):\n# \"\"\"Gimme your current mood\"\"\"\n# mypulse = other.gimmebeats(heartrate_keylist)\n# msg = str(other.get_mymood()) + \" (\"+timestr+\")️\"\n# update.message.reply_text(msg)\n\n# def sleep(update, context):\n# \"\"\"Gimme your current heart-rate\"\"\"\n# mypulse = other.gimmebeats(heartrate_keylist)\n# msg = str(other.get_mysleep()) + \" (\"+timestr+\")️\"\n# update.message.reply_text(msg)\n\n# def loc(update, context):\n# \"\"\"Gimme your current location STUB\"\"\"\n# # hacky! -- get the first position on our trip\n# # you need to get the position in the duration\n# msg = other.gimmeclosestplace()\n# update.message.reply_text(msg)\n\ndef alarm(context):\n \"\"\"Send the alarm message.\"\"\"\n job = context.job\n mypulse = other.gimmebeats(heartrate_keylist)\n msg = str(other.get_mymood()) + str(other.get_mysleep()) + str(other.gimmeclosestplace()) + str(mypulse) + \" BPM (\"+ str(timestr) +\")\"\n context.bot.send_message(job.context, text=msg)\n\n\ndef set_timer(update, context):\n \"\"\"Add a job to the queue.\"\"\"\n chat_id = update.message.chat_id\n try:\n # args[0] should contain the time for the timer in seconds\n due = int(context.args[0])\n if due < 0:\n update.message.reply_text('Sorry we can not go back to future!')\n return\n\n # Add job to queue\n # job = context.job_queue.run_once(alarm, due, context=chat_id)\n job = context.job_queue.run_repeating(alarm, due, context=chat_id)\n \n context.chat_data['job'] = job\n\n update.message.reply_text('👍')\n\n except (IndexError, ValueError):\n update.message.reply_text('Usage: /set <seconds>')\n\n\ndef unset(update, context):\n \"\"\"Remove the job if the user changed their mind.\"\"\"\n if 'job' not in context.chat_data:\n update.message.reply_text('You have no active timer')\n return\n\n job = context.chat_data['job']\n job.schedule_removal()\n del context.chat_data['job']\n\n update.message.reply_text('Timer successfully unset!')\n\n\ndef error(update, context):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\n # context.bot.send_message(context, text='Update \"%s\" caused error \"%s\"', update, context.error)\n \ndef shutdown():\n # context.bot.send_message(job.context, text='Stopping...')\n updater.stop()\n updater.is_idle = False\n \ndef stop(update, context):\n threading.Thread(target=shutdown).start()\n\n"
},
{
"alpha_fraction": 0.5976008772850037,
"alphanum_fraction": 0.5995092988014221,
"avg_line_length": 30.620689392089844,
"blob_id": "e0ee33a112fdcf8e22071db149385c6ecd13839f",
"content_id": "485baedebf605f3b7de3aed5adcd25b0c46340ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3672,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 116,
"path": "/api.py",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\n\nfrom shapely.geometry import Point\nfrom shapely.wkt import dumps, loads\nfrom peewee import *\nfrom flask import Flask, Blueprint\nfrom flask_restplus import Resource, Api\n\nfrom models import Person, Heart, Place, Step, Look, Conversation\nfrom utils import gimmeLongLat, gimmeGeojson, gimmeSeconds\nfrom utils import gimmecurseconds\n\nheartrate_keylist = []\nq = Heart.select(Heart.timestamp)\nfor t in q:\n heartrate_keylist.append( int(t.timestamp) )\n\n# You don't want to run this on every query\nstep_keylist = []\nq = Step.select(Step.timestamp)\nfor t in q:\n step_keylist.append( int(t.timestamp) )\n\nother = Person.get(name='OTHER')\nmood = other.get_mymood()[1]\n\napp = Flask(__name__)\n# set the base URL with a blueprint\nblueprint = Blueprint('api', __name__, url_prefix='/api')\napi = Api(blueprint)\napp.register_blueprint(blueprint)\n\n\[email protected]('/heartrate')\nclass HeartRate(Resource):\n def get(self):\n timestr = datetime.now().strftime(\"%H:%M:%S\")\n mypulse = other.gimmebeats(heartrate_keylist)\n return {'heartrate': mypulse, 'timestr': timestr}\n\[email protected]('/location')\nclass CurrentLocation(Resource):\n def get(self):\n timestr = datetime.now().strftime(\"%H:%M:%S\")\n mykey, myplace = other.gimmeclosestplace()\n return {'myplace': myplace, 'mykey': mykey, 'timestr': timestr}\n\[email protected]('/feelings')\nclass CurrentFeelings(Resource):\n def get(self):\n timestr = datetime.now().strftime(\"%H:%M:%S\")\n return {'feelings': str(other.get_mymood()), 'timestr': timestr}\n\[email protected]('/state')\nclass CurrentState(Resource):\n \"\"\"an aggregate of feelings prepared for ArduinoJson\"\"\"\n def get(self):\n timestr = datetime.now().strftime(\"%s\") # we want a UNIX timestring\n mymood = \"❤\" # this seems to be legal\n mysleep = \"--\"\n # but this one gives you 0 or 1\n if other.get_mymood()[1] == 1:\n mymood = \"❤\"\n else:\n mymood = \"o\"\n mylat,mylon= other.gimmeclosestpoint()\n return {'mymood': str(mymood),\n \t\t'mood': other.get_mymood()[1],\n \t\t'timestr': int(timestr), \n \t\t'sleep': str(other.get_mysleep()),\n \t\t'heartrate': other.gimmebeats(heartrate_keylist),\n \t\t'steps': other.gimmecurrsteps(step_keylist),\n \t\t'location': [mylat,mylon]}\n\[email protected]('/sleep')\nclass SleepQuality(Resource):\n def get(self):\n timestr = datetime.now().strftime(\"%H:%M:%S\")\n return {'sleep': str(other.get_mysleep()), 'timestr': timestr}\n\[email protected]('/conversations/-1')\nclass LatestConversation(Resource):\n def get(self):\n convo = Conversation.select().order_by(Conversation.timestamp.desc()).get()\n myd = {'first_name': str(convo.actor.first_name),\n 'last_name': str(convo.actor.last_name),\n 'message': str(convo.message),\n 'timestamp': str(convo.timestamp)\n }\n\n return myd\n\n\[email protected]('/conversations')\nclass MyConversations(Resource):\n\n def get(self):\n myconvos = []\n query = (Conversation\n .select()\n .join(Person, on=(Conversation.actor == Person.id)))\n\n for convo in query:\n myd = {'first_name': str(convo.actor.first_name),\n 'last_name': str(convo.actor.last_name),\n 'message': str(convo.message),\n 'timestamp': str(convo.timestamp)\n }\n print(myd)\n myconvos.append(myd)\n\n return myconvos\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n"
},
{
"alpha_fraction": 0.7353760600090027,
"alphanum_fraction": 0.7576601505279541,
"avg_line_length": 31.727272033691406,
"blob_id": "1eb47afb896486d4e7f2ae2b5dbdecb459a2ffb6",
"content_id": "992385be955eb87ecb9230fc6f196ba8b5267f7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 359,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 11,
"path": "/theothertownsend.com/content/posts/apple-photos.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ndate = \"02/02/2019\"\ndescription = \"\"\nslug = \"applephotos\"\nthumbnail = \"/media/iphone_familyrecognition.jpg\"\ntitle = \"Apple Photos\"\n\n+++\nApple can distinguish my parents' faces clearly and identify photos of them from both young adulthood and middle age. Here are some photos it identifies..\n\n"
},
{
"alpha_fraction": 0.6135265827178955,
"alphanum_fraction": 0.6618357300758362,
"avg_line_length": 17.909090042114258,
"blob_id": "a3c40937c8fb4bc579815de7a24b2cf1cee028f8",
"content_id": "7ea1015440e5e0765e9817af78d70a792f722d23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 207,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 11,
"path": "/theothertownsend.com/content/posts/june-steps.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ndate = \"06/08/2019\"\ndescription = \"\"\nslug = \"junesteps\"\nthumbnail = \"/media/mysteps_june9.jpg\"\ntitle = \"June steps\"\n\n+++\n**I have been walking and getting in great shape!**\n\n"
},
{
"alpha_fraction": 0.6770427823066711,
"alphanum_fraction": 0.7081711888313293,
"avg_line_length": 22.454545974731445,
"blob_id": "d04525b51896c3c4fbc6a990bee62da58f67fb2e",
"content_id": "1334823dce214583e6b3a5be4048c6c8bbe89ca8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 257,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 11,
"path": "/theothertownsend.com/content/posts/dreaming.md",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "+++\ndate = \"06/19/2019\"\ndescription = \"\"\nslug = \"dreaming\"\nthumbnail = \"/media/theother-dreaming.jpg\"\ntitle = \"Dreaming\"\n\n+++\nSometime The Other dreams in pictures, other times it dreams in code. Here it dreams with stars.\n\n"
},
{
"alpha_fraction": 0.533945620059967,
"alphanum_fraction": 0.5411489009857178,
"avg_line_length": 30.19662857055664,
"blob_id": "2b5db389de18f58a080bc46fd39a2f1c1e09f874",
"content_id": "e331ba8641b60bb03ad283146341f25c7b4fa384",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5664,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 178,
"path": "/models.py",
"repo_name": "calarts/othertownsend",
"src_encoding": "UTF-8",
"text": "from random import choice\nfrom datetime import time, datetime\n\nfrom peewee import *\nfrom shapely.wkt import dumps, loads\n\nfrom _config import DEBUG\n\nif DEBUG:\n mydb = SqliteDatabase(':memory:')\nelse:\n mydb = SqliteDatabase(\"other.db\")\n\n\n\ndef gimmecurseconds():\n now = datetime.now() # should be local time!\n secs_since_midnight = (now - now.replace(hour=0, minute=0, second=0, microsecond=0)).total_seconds()\n return int(secs_since_midnight)\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n# MODELS\n# from models import Person, Heart, Brain, Place, Step, Look, Conversation\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # \n\n\nclass BaseModel(Model):\n class Meta:\n database = mydb\n\nclass Person(BaseModel):\n name = CharField()\n telegram_id = BigIntegerField()\n created_at = DateTimeField()\n chat_name = CharField()\n first_name = CharField()\n last_name = CharField()\n login = CharField()\n language_code = CharField()\n\n def get_mytimepoints(self):\n return self.timepoints\n\n def get_myheartbeats(self):\n return self.heartbeats\n\n def get_myconversations(self):\n return self.conversations\n\n def get_personalreply(self,update,themeat):\n pleasentries = ['Hi',\n 'yawn',\n 'are you still here?'\n 'I was just getting back to it.',\n 'could you ask me in a few minutes?',\n \"I'll sleep when I'm dead.\"]\n personalreply = \"Hi again \" + str(update.message.from_user.name) + \"!\\n\"\n personalreply = personalreply + choice(pleasentries)\n personalreply = personalreply + themeat\n return personalreply\n\n def get_mymood(self,myday=int(datetime.today().day)):\n feels = [\"💛\",\"💜\",\"💜\",\"💛\",\"💜\",\"💜\",\"💛\",\n \"💛\",\"💛\",\"💛\",\"💛\",\"💛\",\"💜\",\"💜\",\n \"💛\",\"💜\",\"💜\",\"💛\",\"💛\",\"💜\",\"💛\",\n \"💜\",\"💛\",\"💛\",\"💜\",\"💜\",\"💛\",\"💛\",\n \"💜\",\"💛\",\"💛\",\"💛\",\"💜\",\"💛\",\"💜\"]\n self.feels = feels[myday]\n if self.feels == \"💜\": self.mood = 0\n if self.feels == \"💛\": self.mood = 1\n return self.feels, self.mood\n\n def get_mysleep(self,myday=int(datetime.today().day)):\n sleeps = [\"--\",\"~\",\"--\",\"--\",\"--\",\"--\",\"~\",\n \"--\",\"~\",\"--\",\"~\",\"--\",\"--\",\"--\",\n \"--\",\"--\",\"--\",\"~\",\"--\",\"--\",\"--\",\n \"--\",\"--\",\"--\",\"--\",\"~\",\"--\",\"~\",\n \"--\",\"~\",\"--\",\"--\",\"--\",\"--\",\"--\"]\n self.sleep = sleeps[myday]\n return self.sleep\n\n def gimmebeats(self,mykeys):\n # mykeys = set().union(*(d.keys() for d in alistofdicts))\n mykey = min(mykeys, key=lambda x:abs(x - gimmecurseconds() ))\n q = Heart.select().where(Heart.timestamp == int(mykey))\n for entry in q:\n self.mybpm = entry.bpm\n\n return self.mybpm\n\n def gimmecurrsteps(self,mykeys):\n mykey = min(mykeys, key=lambda x:abs(x - gimmecurseconds() ))\n q = Step.select().where(Step.timestamp == int(mykey))\n for entry in q:\n self.mysteps = entry.steps \n\n return self.mysteps\n\n def gimmecurrlooks(self):\n looklist = []\n for l in Look.select():\n mystr = \"<a href='%s'>%s</a>\" %(l.link,l.look)\n looklist.append(mystr)\n self.looklist = looklist\n return self.looklist\n\n def gimmeclosestpoint(self):\n # mykeys = set().union(*(d.keys() for d in alistofdicts))\n # get the keys by querying the places\n mykeys = []\n q = Place.select()\n for entry in q:\n mykeys.append(int(entry.timestamp))\n\n mykey = min(mykeys, key=lambda x:abs(x - gimmecurseconds() ))\n\n q = Place.select().where(Place.timestamp == int(mykey))\n for entry in q:\n self.myplce = entry.point\n\n geom = loads(self.myplce)\n return geom.x, geom.y # (37.9609969049851, -122.404216421264)\n\n def gimmeclosestplace(self):\n # mykeys = set().union(*(d.keys() for d in alistofdicts))\n # get the keys by querying the places\n mykeys = []\n q = Place.select()\n for entry in q:\n mykeys.append(int(entry.timestamp))\n\n mykey = min(mykeys, key=lambda x:abs(x - gimmecurseconds() ))\n\n q = Place.select().where(Place.timestamp == int(mykey))\n for entry in q:\n self.myplce = entry.point\n\n return self.myplce\n\n\n\nclass Conversation(BaseModel):\n # record conversations with users\n actor = ForeignKeyField(Person, backref='conversations')\n message = TextField()\n timestamp = DateTimeField(default=datetime.now)\n\n\nclass Heart(BaseModel):\n actor = ForeignKeyField(Person, backref='heartbeats')\n timestamp = IntegerField()\n bpm = IntegerField()\n\n \nclass Place(BaseModel):\n actor = ForeignKeyField(Person, backref='timepoints')\n timestamp = IntegerField()\n point = CharField()\n mode = CharField()\n\n def __repr__(self):\n return self.timestamp, self.mode, loads(self.point)\n\nclass Step(BaseModel):\n # do we count steps individually\n # or count them in a 24 hour period?\n actor = ForeignKeyField(Person, backref='steps')\n steps = IntegerField()\n timestamp = IntegerField()\n\nclass Look(BaseModel):\n # do we count steps individually\n # or count them in a 24 hour period?\n actor = ForeignKeyField(Person, backref='looks')\n look = CharField()\n link = CharField()\n # timestamp = IntegerField()\n"
}
] | 26 |
skydream72/UnderstandingAmazonFromSpace | https://github.com/skydream72/UnderstandingAmazonFromSpace | 179fdfee34cd3fefe5f601deee932928f07569be | 80e14b564294ea198a47b11284b633878a64043c | ed14f9427b0f5a774f50c48222db99d440a5e11c | refs/heads/master | 2021-06-18T18:53:25.728933 | 2017-05-19T08:11:21 | 2017-05-19T08:11:21 | 90,380,018 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6481266617774963,
"alphanum_fraction": 0.6682116389274597,
"avg_line_length": 36.50724792480469,
"blob_id": "c5c284d6c75f15f2be12ed3590c68e296cc84126",
"content_id": "57c003b2b531ae989ab7628880862995f7b05a32",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2589,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 69,
"path": "/net_vgg16.py",
"repo_name": "skydream72/UnderstandingAmazonFromSpace",
"src_encoding": "UTF-8",
"text": "\nimport json\n\nfrom keras.applications.vgg16 import VGG16\nfrom keras.models import Model, model_from_json\nfrom keras.layers import Dense, GlobalAveragePooling2D\nfrom keras.layers import Input, Flatten\n\nMODEL_NAME = \"VGG16\"\n# create the base pre-trained model\ndef build_model(nb_classes):\n base_model = VGG16(weights='imagenet', include_top=False)\n #x = base_model.output\n ## There is still two layers for fine turning, how to do that?\n #x = Flatten(name='flatten', input_shape=base_model.output_shape[1:])(x)\n #x = Dense(4096, activation='relu', name='fc1')(base_model.layers[-4].output)\n #x = Dense(4096, activation='relu', name='fc2')(x)\n #x = Dense(nb_classes, activation='softmax', name='predict_overwrite')(x)\n \n #Create your own input format (here 3x200x200)\n input = Input(shape=(224,224,3),name = 'image_input')\n\n #Use the generated model \n output_vgg16_conv = base_model(input)\n\n #Add the fully-connected layers \n x = Flatten(name='flatten')(output_vgg16_conv)\n x = Dense(4096, activation='relu', name='fc1')(x)\n #print(\"softmax x shape = \", x.get_shape())\n #x = Dense(4096, activation='softmax', name='fc2')(x)\n x = Dense(nb_classes, activation='softmax', name='predict')(x)\n \n model = Model(input=input, output=x)\n\n # first: train only the top layers (which were randomly initialized)\n # i.e. freeze all convolutional InceptionV3 layers\n for layer in base_model.layers:\n #print(\"len base_model.layers = \", layer.name)\n layer.trainable = False\n\n # compile the model (should be done *after* setting layers to non-trainable)\n print \"starting model compile\"\n compile(model)\n print \"model compile done\"\n return model\n\n\ndef save(model, tags, prefix):\n model.save_weights(prefix+\"_\"+MODEL_NAME+\".h5\")\n # serialize model to JSON\n model_json = model.to_json()\n with open(prefix+\"_\"+MODEL_NAME+\".json\", \"w\") as json_file:\n json_file.write(model_json)\n with open(prefix+\"_\"+MODEL_NAME+\"-labels.json\", \"w\") as json_file:\n json.dump(tags, json_file)\n\n\ndef load(prefix):\n # load json and create model\n with open(prefix+\"_\"+MODEL_NAME+\".json\") as json_file:\n model_json = json_file.read()\n model = model_from_json(model_json)\n # load weights into new model\n model.load_weights(prefix+\"_\"+MODEL_NAME+\".h5\")\n with open(prefix+\"_\"+MODEL_NAME+\"-labels.json\") as json_file:\n tags = json.load(json_file)\n return model, tags\n\ndef compile(model):\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=[\"accuracy\"])\n"
},
{
"alpha_fraction": 0.6649377346038818,
"alphanum_fraction": 0.6721991896629333,
"avg_line_length": 34.66666793823242,
"blob_id": "f94cce023ebdf6c5ebd9a187c2a0340b2f7d9409",
"content_id": "92a2f5c8bc3ba80707866d47e3746cd9954e9b80",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1928,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 54,
"path": "/net_inception.py",
"repo_name": "skydream72/UnderstandingAmazonFromSpace",
"src_encoding": "UTF-8",
"text": "\nimport json\n\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.models import Model, model_from_json\nfrom keras.layers import Dense, GlobalAveragePooling2D\n\nMODEL_NAME = \"InceptionV3\"\n# create the base pre-trained model\ndef build_model(nb_classes):\n base_model = InceptionV3(weights='imagenet', include_top=False)\n\n # add a global spatial average pooling layer\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n #print(\"GlobalAveragePooling x = \" + x)\n # let's add a fully-connected layer\n x = Dense(1024, activation='relu')(x) #why need extra fully convolution layer?\n # and a logistic layer\n predictions = Dense(nb_classes, activation='sigmoid')(x)\n\n # this is the model we will train\n model = Model(input=base_model.input, output=predictions)\n\n # first: train only the top layers (which were randomly initialized)\n # i.e. freeze all convolutional InceptionV3 layers\n for layer in base_model.layers:\n layer.trainable = False\n\n return model\n\n\ndef save(model, tags, prefix):\n model.save_weights(prefix+\"_\"+MODEL_NAME+\".h5\")\n # serialize model to JSON\n model_json = model.to_json()\n with open(prefix+\"_\"+MODEL_NAME+\".json\", \"w\") as json_file:\n json_file.write(model_json)\n with open(prefix+\"_\"+MODEL_NAME+\"-labels.json\", \"w\") as json_file:\n json.dump(tags, json_file)\n\n\ndef load(prefix):\n print(\"open \", prefix+\"_\"+MODEL_NAME+\".json\")\n # load json and create model\n with open(prefix+\"_\"+MODEL_NAME+\".json\") as json_file:\n model_json = json_file.read()\n model = model_from_json(model_json)\n # load weights into new model\n print(\"open \", prefix+\"_\"+MODEL_NAME+\".h5\")\n model.load_weights(prefix+\"_\"+MODEL_NAME+\".h5\")\n print(\"open \", prefix+\"_\"+MODEL_NAME+\"-labels.json\")\n with open(prefix+\"_\"+MODEL_NAME+\"-labels.json\") as json_file:\n tags = json.load(json_file)\n return model, tags\n\n"
}
] | 2 |
jamiejackherer/FaceNet-v2 | https://github.com/jamiejackherer/FaceNet-v2 | 52acecce765aa739d2dcb18250132e935b7bbbec | 8331f49ea64ca68331ab436807216e2cbfc459d3 | 005354f5bfbd4835f5ed186efd7ee010c8521545 | refs/heads/master | 2020-04-09T00:57:19.038963 | 2018-09-01T03:21:44 | 2018-09-01T03:21:44 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5650660991668701,
"alphanum_fraction": 0.5746346712112427,
"avg_line_length": 29.90322494506836,
"blob_id": "ffeb8a1c63e19ac46f67ef549c25756cb22b7db2",
"content_id": "961224d604a77aca287a6d23171a22a1bbc997a0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5748,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 186,
"path": "/valid_eval.py",
"repo_name": "jamiejackherer/FaceNet-v2",
"src_encoding": "UTF-8",
"text": "import multiprocessing as mp\nimport os\nimport pickle\nimport queue\nfrom multiprocessing import Process\nfrom multiprocessing import Process\n\nimport cv2 as cv\nimport numpy as np\nfrom keras.applications.inception_resnet_v2 import preprocess_input\nfrom tqdm import tqdm\n\nfrom config import image_folder, img_size, channel, num_valid_samples, SENTINEL, best_model\nfrom utils import get_random_triplets, get_best_model\n\n\nclass InferenceWorker(Process):\n def __init__(self, gpuid, in_queue, out_queue, signal_queue):\n Process.__init__(self, name='ImageProcessor')\n\n self.gpuid = gpuid\n self.in_queue = in_queue\n self.out_queue = out_queue\n self.signal_queue = signal_queue\n\n def run(self):\n # set enviornment\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(self.gpuid)\n print(\"InferenceWorker init, GPU ID: {}\".format(self.gpuid))\n\n from model import build_model\n\n # load models\n model = build_model()\n model.load_weights(get_best_model())\n\n while True:\n try:\n try:\n sample = self.in_queue.get(block=False)\n except queue.Empty:\n break\n\n batch_inputs = np.empty((3, 1, img_size, img_size, channel), dtype=np.float32)\n\n for j, role in enumerate(['a', 'p', 'n']):\n image_name = sample[role]\n filename = os.path.join(image_folder, image_name)\n image_bgr = cv.imread(filename)\n image_bgr = cv.resize(image_bgr, (img_size, img_size), cv.INTER_CUBIC)\n image_rgb = cv.cvtColor(image_bgr, cv.COLOR_BGR2RGB)\n batch_inputs[j, 0] = preprocess_input(image_rgb)\n\n y_pred = model.predict([batch_inputs[0], batch_inputs[1], batch_inputs[2]])\n a = y_pred[0, 0:128]\n p = y_pred[0, 128:256]\n n = y_pred[0, 256:384]\n\n self.out_queue.put(\n {'image_name_a': sample['a'], 'embedding_a': a, 'image_name_p': sample['p'], 'embedding_p': p,\n 'image_name_n': sample['n'], 'embedding_n': n})\n self.signal_queue.put(SENTINEL)\n\n if self.in_queue.qsize() == 0:\n break\n except Exception as e:\n print(e)\n\n import keras.backend as K\n K.clear_session()\n print('InferenceWorker done, GPU ID {}'.format(self.gpuid))\n\n\nclass Scheduler:\n def __init__(self, gpuids, signal_queue):\n self.signal_queue = signal_queue\n manager = mp.Manager()\n self.in_queue = manager.Queue()\n self.out_queue = manager.Queue()\n self._gpuids = gpuids\n\n self.__init_workers()\n\n def __init_workers(self):\n self._workers = list()\n for gpuid in self._gpuids:\n self._workers.append(InferenceWorker(gpuid, self.in_queue, self.out_queue, self.signal_queue))\n\n def start(self, samples):\n # put all of image names into queue\n for sample in samples:\n self.in_queue.put(sample)\n\n # start the workers\n for worker in self._workers:\n worker.start()\n\n # wait all fo workers finish\n for worker in self._workers:\n worker.join()\n print(\"all of workers have been done\")\n return self.out_queue\n\n\ndef run(gpuids, q):\n # scan all files under img_path\n samples = get_random_triplets('valid')\n\n # init scheduler\n x = Scheduler(gpuids, q)\n\n # start processing and wait for complete\n return x.start(samples)\n\n\ndef listener(q):\n pbar = tqdm(total=num_valid_samples)\n for item in iter(q.get, None):\n pbar.update()\n\n\ndef create_valid_embeddings():\n gpuids = ['0', '1', '2', '3']\n print(gpuids)\n\n manager = mp.Manager()\n q = manager.Queue()\n proc = mp.Process(target=listener, args=(q,))\n proc.start()\n\n out_queue = run(gpuids, q)\n out_list = []\n while out_queue.qsize() > 0:\n out_list.append(out_queue.get())\n\n with open(\"data/valid_embeddings.p\", \"wb\") as file:\n pickle.dump(out_list, file)\n\n q.put(None)\n proc.join()\n\n\nif __name__ == '__main__':\n print('creating valid embeddings')\n create_valid_embeddings()\n with open('data/valid_embeddings.p', 'rb') as file:\n samples = pickle.load(file)\n\n print('evaluating valid with various thresholds')\n accuracy_list = []\n threshold_list = []\n for threshold in np.arange(0.4, 1.2, 0.01):\n print('threshold: {0:.2f}'.format(threshold))\n threshold_list.append(threshold)\n\n y_true_list = []\n y_pred_list = []\n\n for sample in tqdm(samples):\n embedding_a = sample['embedding_a']\n embedding_p = sample['embedding_p']\n embedding_n = sample['embedding_n']\n y_true_list.append(True)\n y_true_list.append(False)\n\n dist_1 = np.square(np.linalg.norm(embedding_a - embedding_p))\n y_pred_list.append(dist_1 <= threshold)\n dist_2 = np.square(np.linalg.norm(embedding_a - embedding_n))\n y_pred_list.append(dist_2 <= threshold)\n\n y = np.array(y_true_list).astype(np.int32)\n pred = np.array(y_pred_list).astype(np.int32)\n from sklearn import metrics\n\n print(y)\n print(pred)\n\n fpr, tpr, thresholds = metrics.roc_curve(y, pred)\n accuracy = metrics.auc(fpr, tpr)\n print('accuracy: ' + str(accuracy))\n accuracy_list.append(accuracy)\n\n i = int(np.argmax(accuracy_list))\n with open('data/threshold.txt', 'w') as file:\n file.write('{0:.2f}'.format(threshold_list[i]))\n"
},
{
"alpha_fraction": 0.6183399558067322,
"alphanum_fraction": 0.6284886002540588,
"avg_line_length": 28.351064682006836,
"blob_id": "1a0863f956d282c493536b4bb19101a08d821b79",
"content_id": "373bcf208114aa891f170de68ccc0a38e2e72a2a",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2759,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 94,
"path": "/pre_process.py",
"repo_name": "jamiejackherer/FaceNet-v2",
"src_encoding": "UTF-8",
"text": "import bz2\nimport os\nimport tarfile\nfrom multiprocessing import Pool\n\nimport cv2 as cv\nimport dlib\nfrom tqdm import tqdm\n\nfrom config import img_size\nfrom config import predictor_path\nfrom utils import ensure_folder\n\n\ndef extract(filename):\n print('Extracting {}...'.format(filename))\n with tarfile.open(filename) as tar:\n tar.extractall('data')\n\n\ndef ensure_dlib_model():\n if not os.path.isfile(predictor_path):\n import urllib.request\n urllib.request.urlretrieve(\"http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\",\n filename=\"models/shape_predictor_5_face_landmarks.dat.bz2\")\n\n\ndef extract_bz2(new):\n old = '{}.bz2'.format(new)\n print('Extracting {}...'.format(old))\n with open(new, 'wb') as new_file, bz2.BZ2File(old, 'rb') as file:\n for data in iter(lambda: file.read(100 * 1024), b''):\n new_file.write(data)\n\n\ndef check_one_image(filename):\n img = cv.imread(filename)\n img = img[:, :, ::-1]\n dets = detector(img, 1)\n\n num_faces = len(dets)\n if num_faces == 0:\n return filename\n\n # Find the 5 face landmarks we need to do the alignment.\n # faces = dlib.full_object_detections()\n # for detection in dets:\n # faces.append(sp(img, detection))\n #\n # # It is also possible to get a single chip\n # image = dlib.get_face_chip(img, faces[0], size=img_size)\n # image = image[:, :, ::-1]\n\n\ndef check_images(usage):\n folder = os.path.join('data', usage)\n dirs = [d for d in os.listdir(folder)]\n fileset = []\n for d in dirs:\n dir = os.path.join(folder, d)\n files = [os.path.join(dir, f) for f in os.listdir(dir) if f.lower().endswith('.jpg')]\n fileset += files\n print('usage:{}, files:{}'.format(usage, len(fileset)))\n\n pool = Pool(12)\n results = []\n for item in tqdm(pool.imap_unordered(check_one_image, fileset), total=len(fileset)):\n results.append(item)\n pool.close()\n pool.join()\n\n results = [r for r in results if r is not None]\n print(len(results))\n with open('data/exclude.txt', 'w') as file:\n file.write('\\n'.join(results))\n\n\nif __name__ == '__main__':\n ensure_folder('data')\n ensure_folder('models')\n ensure_dlib_model()\n extract_bz2(predictor_path)\n if not os.path.isdir('data/test'):\n extract('data/vggface2_test.tar.gz')\n if not os.path.isdir('data/train'):\n extract('data/vggface2_train.tar.gz')\n\n # Load all the models we need: a detector to find the faces, a shape predictor\n # to find face landmarks so we can precisely localize the face\n detector = dlib.get_frontal_face_detector()\n sp = dlib.shape_predictor(predictor_path)\n\n check_images('train')\n check_images('test')\n"
}
] | 2 |
lifangge/SdHAnalysis | https://github.com/lifangge/SdHAnalysis | 9b84ccb5c0f7aca82b9433849b1e6ca75fdf217d | 0789da6833618ff21e984c6cf3b96fdfb2cda6fa | 6a4e20ce926e1a88deb19d7fedeb5a9c6d22c30f | refs/heads/master | 2020-05-15T21:37:50.841660 | 2017-02-22T18:38:22 | 2017-02-22T18:38:22 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5248488187789917,
"alphanum_fraction": 0.5327373147010803,
"avg_line_length": 49.538204193115234,
"blob_id": "d8ed120bf96885b7039a12ddd5287c4b01d47542",
"content_id": "39d9901e2a2a2770ede6b698e8bba5b42a923795",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15212,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 301,
"path": "/sdhanalysis.py",
"repo_name": "lifangge/SdHAnalysis",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nfrom numpy import fft\n\nfrom scipy import interpolate\nfrom scipy import optimize\nfrom scipy import signal\n\nfrom detect_peaks import detect_peaks\n\nimport matplotlib.pyplot as plt\n\n\"\"\" Data processing pipeline for analysis of SdH in pulsed fields.\n Logan Bishop-Van Horn (2017)\n\"\"\"\n\n###################################################################################################\n# #\n###################################################################################################\n \nclass SdHDataSet:\n \"\"\" SdHDataSet handles SdH data and analysis for a single magnet sweep/pulse.\n Attributes:\n name: (string) name/description of dataset\n date: (string) date data was taken\n UpRaw: (pandas DataFrame) raw up-sweep data\n DownRaw: (pandas DataFrame) raw down-sweep data\n Upcs: (pandas DataFrame) spline fit/background subtracted\n up-sweep data\n Downcs: (pandas DataFrame) spline fit/backgroudn subtracted\n down-sweep data\n nskip: (int) number of points to skip at beginning of signal\n FFTpeaks: (pandas DataFrame) FFT peak amplitudes and locations (1/B)\n Orbits: (dict) contains data specific to a given orbit/frequency\n Orbits keys:\n 'Osc': (pandas DataFrame) oscillations vs. 1/B\n 'Peaks': (pandas DataFrame) oscillation amplitude vs. 1/B\n Methods:\n load_data() (Adapt this method for your particular dataset)\n subtract_background()\n get_fft_peaks()\n isolate_orbit()\n get_peak_amplitudes()\n \"\"\"\n def __init__(self, name, date):\n self.name = name\n self.date = date\n self.Orbits = {}\n \n def load_data(self, year, num, plot=True):\n self.UpRaw, self.DownRaw = sdh_load(year, num, plot=plot)\n \n def subtract_background(self, deg, Bmin, Bmax, npnts=2**13, plot=True, yscale=1e5, save=False):\n self.Upcs, self.Downcs = back_subtract(self.UpRaw, self.DownRaw, deg, Bmin, Bmax,\n npnts=npnts, plot=plot, yscale=yscale, save=save)\n \n def get_fft_peaks(self, nskip=100, mph=None, mpd=1,\n threshold=0, edge='rising', kpsh=False, valley=False,\n show=True, ax=None, nignore=5, xmax=None, keep_ind=None, save=False):\n self.nskip = nskip\n self.FFTpeaks = fft_peaks(self.Downcs, nskip=nskip, mph=mph, mpd=mpd,\n threshold=threshold, edge=edge, kpsh=kpsh,\n valley=valley, show=show, ax=ax, nignore=nignore,\n xmax=xmax, keep_ind=keep_ind, save=save)\n \n def isolate_orbit(self, orbit, center_freq, passband, order=2, method='gust', plot=True, save=False):\n df_orbitdata = filter_orbit(self.Downcs, center_freq, passband, orbit,\n self.nskip, order=order, method=method, plot=plot, save=save)\n self.Orbits.update({orbit: {'Osc': df_orbitdata}})\n \n def get_peak_amplitudes(self, orbit, show=True, save=False):\n orbit_dict = self.Orbits[orbit]\n df_data = self.Downcs\n df_peaks = peak_amplitudes(orbit_dict, orbit, self.nskip, show=show, save=save)\n self.Orbits[orbit].update({'Peaks': df_peaks})\n \n def plot_orbit_amplitudes(self):\n orbits = self.Orbits\n for orbit in sorted(orbits.keys()):\n plt.plot(orbits[orbit]['Peaks'].InvField, 1e-3*orbits[orbit]['Peaks'].Amp, '.', label=orbit)\n plt.legend(loc=0, numpoints=1)\n plt.xlabel(r'Inverse Field (T${}^{-1}$)')\n plt.ylabel('Amplitude (kHz)')\n plt.xlim(orbits[list(orbits.keys())[0]]['Peaks'].InvField.min())\n plt.title(self.name+' orbit amplitudes')\n plt.show()\n \n###################################################################################################\n# #\n###################################################################################################\n\ndef sdh_load(year, num, plot):\n \"\"\" Loads delimited text frequency vs. field data\n File format:\n First column: Frequency\n Second column: Field\n Header: yes, one line\n Returns: DataFrame for both up and down sweeps\n Any function that returns a dict with a separate DataFrame \n for up and down sweeps will work here.\n \"\"\"\n fileu = 'Jun'+str(year)+'_2002s0'+str(num)+'u.txt'\n filed = 'Jun'+str(year)+'_2002s0'+str(num)+'d.txt'\n col_names = ['Freq', 'Field']\n df_sdhu = pd.read_csv(fileu, sep='\\t')\n df_sdhd = pd.read_csv(filed, sep='\\t')\n df_sdhu.columns = col_names\n df_sdhd.columns = col_names\n df_sdhu = pd.DataFrame({'Freq': df_sdhu.Freq, \n 'Field': df_sdhu.Field, 'InvField':1/df_sdhu.Field[::-1]})\n df_sdhd = pd.DataFrame({'Freq': df_sdhd.Freq, \n 'Field': df_sdhd.Field, 'InvField':1/df_sdhd.Field[::-1]})\n sdh_dict = {'Up': df_sdhu, 'Down': df_sdhd}\n if plot:\n plt.plot(df_sdhu.Field, 1e-6*df_sdhu.Freq, label='Up')\n plt.plot(df_sdhd.Field, 1e-6*df_sdhd.Freq, label='Down')\n plt.xlim(0)\n plt.xlabel('Field (T)')\n plt.ylabel('Frequency (MHz)')\n plt.legend(loc=0)\n plt.show()\n return df_sdhu, df_sdhd\n\n###################################################################################################\n# #\n###################################################################################################\n\ndef back_subtract(df_up, df_down, deg, Bmin, Bmax, npnts, plot, yscale, save):\n \"\"\" Performs subtraction of polynomial background on Freq vs. Inverse Field data.\n Inputs:\n df_up: DataFrame containing freq, field, inv field for up sweep\n df_down: DataFrame containint freq, field, inve field for down sweep\n deg: degree of polynomial fit (typically 5, 7, or 9)\n Bmin: minimum field for section of data to fit\n Bmax: maximum field for section of data to fit\n npnts: number of points to spline to (2**13-2**15 is reasonable)\n plot and yscale: If you want to plot output.\n save: Once you're happy with the background subtraction,\n add new data to dict\n Returns: DataFrames for spline fit, background subtracted up and down sweeps\n \"\"\"\n Binv = np.linspace(1/Bmax, 1/Bmin, npnts)\n tck = interpolate.splrep(df_up.InvField[df_up.InvField>=1/Bmax][df_up.InvField<=1/Bmin].values[::-1],\n df_up.Freq[df_up.InvField>=1/Bmax][df_up.InvField<=1/Bmin].values[::-1], s=0)\n new_up = interpolate.splev(Binv, tck, der=0)\n df_splup = pd.DataFrame({'Freq': new_up, 'Field': 1/Binv, 'InvField': Binv})\n \n tck = interpolate.splrep(df_down.InvField[df_down.InvField>=1/Bmax][df_down.InvField<=1/Bmin].values[::-1],\n df_down.Freq[df_down.InvField>=1/Bmax][df_down.InvField<=1/Bmin].values[::-1], s=0)\n new_down = interpolate.splev(Binv, tck, der=0)\n df_spldown = pd.DataFrame({'Freq': new_down, 'Field': 1/Binv, 'InvField': Binv})\n \n coeffup = np.polyfit(df_up.InvField[df_up.InvField>=1/Bmax][df_up.InvField<=1/Bmin],\n df_up.Freq[df_up.InvField>=1/Bmax][df_up.InvField<=1/Bmin], deg)\n coeffdown = np.polyfit(df_down.InvField[df_down.InvField>=1/Bmax][df_down.InvField<=1/Bmin],\n df_down.Freq[df_down.InvField>=1/Bmax][df_down.InvField<=1/Bmin], deg)\n fitup = np.polyval(coeffup, Binv)\n fitdown = np.polyval(coeffdown, Binv)\n sub_up = new_up-fitup\n sub_down = new_down-fitdown\n if plot:\n plt.plot(Binv, 1e-3*sub_down, 'b', label='Down')\n plt.plot(Binv, 1e-3*sub_up, 'r', label='Up')\n plt.xlim(1/Bmax, 1/Bmin)\n plt.ylim(-yscale*1e-3,yscale*1e-3)\n plt.legend(loc=0)\n plt.xlabel(r'Inverse Field (T${}^{-1}$)')\n plt.ylabel(r'$\\Delta$f (kHz)')\n plt.show()\n if not save:\n print('Happy with this background subtraction?')\n print('If so, set save=True and run again.')\n return None, None\n df_upcs = pd.DataFrame({'Field': 1/Binv, 'InvField': Binv,\n 'Freq': new_up, 'FreqSub': sub_up})\n df_downcs = pd.DataFrame({'Field': 1/Binv, 'InvField': Binv,\n 'Freq': new_down, 'FreqSub': sub_down})\n return df_upcs, df_downcs\n \n###################################################################################################\n# #\n###################################################################################################\n\ndef fft_peaks(df_datacs, nskip, mph, mpd, threshold, edge, kpsh,\n valley, show, ax, nignore, xmax, keep_ind, save):\n \"\"\" Uses detect_peaks module to find peak frequencies using FFT of delta freq. vs inverse field.\n Inputs:\n df_datacs: DataFrame containing background subtracted data\n nskip: Number of points to skip at the beginning of the signal\n mph--ax: See documentation for deteck_peaks:\n http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb\n nignore: Throw out peak if index < nignore\n xmax: x-axis maximum for plotting\n keep_ind: List of indices in the array of peak locations to keep\n e.g. if you want to keep 1st, 3rd, 4th, and 7th peaks:\n keep_ind = [0, 2, 3, 6]\n (inspect plot to decide which peak locations to keep)\n save: Once you've chosen the indices of the peak locations to keep, set save=True\n and run fft_peaks() again\n Returns: DataFrame containing amplitudes, frequencies of peaks if save=True\n \"\"\"\n \n dt = df_datacs.InvField[1]-df_datacs.InvField[0]\n fftdata = abs(fft.rfft(df_datacs.FreqSub[nskip:]))\n f = fft.rfftfreq(len(df_datacs.FreqSub)-nskip, d=dt)\n peak_ind = detect_peaks(fftdata, mph, mpd, threshold, edge, kpsh, valley, show, ax, nignore, xmax, keep_ind)\n df_fftpeaks = pd.DataFrame({'Freq': [f[i] for i in peak_ind], 'Amp': [fftdata[i] for i in peak_ind],\n 'Ind': [i for i in peak_ind]})\n if not save:\n print('Happy with these peaks?')\n print('If so, set save=True and run again.\\n')\n print(df_fftpeaks)\n return None\n print('The following {} peaks have been added to the dataset:\\n'.format(len(peak_ind)))\n print(df_fftpeaks)\n return df_fftpeaks\n \n###################################################################################################\n# #\n###################################################################################################\n\ndef bandpass(lowcut, highcut, fs, order=2):\n nyq = fs/2\n low = lowcut/nyq\n high = highcut/nyq\n b, a = signal.butter(order, [low, high], btype='band')\n return b, a\n\ndef bandpass_filter(freq_data, lowcut, highcut, fs, order, method):\n b, a = bandpass(lowcut, highcut, fs, order=order)\n freq_filt = signal.filtfilt(b, a, freq_data, method=method)\n return freq_filt\n\ndef filter_orbit(df_datacs, center_freq, passband, orbit, nskip, order, method, plot, save):\n \"\"\" Bandpass filter to isolate a specific orbit/fundamental frequency.\n Inputs:\n df_datacs: DataFrame containing background subtracted data\n center_freq: Frequency (in tesla) of the peak you want to isolate\n passband: Filter will allow center_freq +/- passband to pass (tesla)\n orbit: (string) name of orbit/fundamental frequency, used as dict key\n nskip: number of points to skip at beginning of signal\n order: order of the filter\n plot: (Boolean) plotting option\n save: If you're satisfied with the filtered signal, set save=True\n Returns: DataFrame with orbit properties if save=True:\n ({'Freq': filtered signal, 'InvField': inverse field})\n \"\"\"\n freq_data = df_datacs.FreqSub.values[nskip:]\n inv_field = df_datacs.InvField.values[nskip:]\n fs = 1/(inv_field[1]-inv_field[0])\n lowcut, highcut = center_freq-passband, center_freq+passband\n freq_filt = bandpass_filter(freq_data, lowcut, highcut, fs, order=order, method=method)\n if plot:\n plt.plot(df_datacs.InvField[nskip:], 1e-3*df_datacs.FreqSub[nskip:], label='Raw')\n plt.plot(df_datacs.InvField[nskip:], 1e-3*freq_filt, 'r', label='{} T bandpass'.format(center_freq))\n plt.xlabel(r'Inverse Field (T${}^{-1}$)')\n plt.ylabel(r'$\\Delta$f (kHz)')\n plt.legend(loc=0)\n plt.title(orbit+' orbit')\n plt.show()\n if not save:\n print('Happy with the filtered signal?')\n print('If so, set save=True and run again.')\n return None\n print('The filtered '+orbit+' orbit has been added to the dataset.')\n return pd.DataFrame({'Freq': freq_filt, 'InvField': df_datacs.InvField.values[nskip:]})\n\n###################################################################################################\n# #\n###################################################################################################\n\ndef peak_amplitudes(orbit_dict, orbit, nskip, show, save):\n \"\"\" Finds peak amplitude as a function of inverse field.\n Inputs:\n orbit_dict: dict containing each separate orbit\n orbit: (string) name of orbit you're looking at (a key in orbit_dict)\n nskip: number of points to skip at beginning of signal \n show: (Boolean) plotting option for detect_peaks()\n save: if detect_peaks() was successful, set save=True and run again\n Returns: DataFrame containing peak amplitude vs. inverse field if save=True\n \"\"\"\n peaks = abs(orbit_dict['Osc'].Freq)\n peak_ind = detect_peaks(peaks, show=show)\n peak_fields = np.array([orbit_dict['Osc'].InvField[i] for i in peak_ind])\n peak_amps = np.array([peaks[i] for i in peak_ind])\n if not save:\n print('Happy with the peaks detected?')\n print('If so, set save=True and run again.\\n')\n return None\n print('The below peaks have been added to the dataset under orbit '+orbit+'.\\n')\n df_peaks = pd.DataFrame({'Amp': peak_amps, 'InvField': peak_fields})\n plt.plot(peak_fields, 1e-3*peak_amps, 'o')\n plt.xlabel(r'Inverse Field (T${}^{-1}$)')\n plt.ylabel(r'Amplitude (kHz)')\n plt.title(orbit+' orbit amplitudes')\n plt.show()\n return df_peaks\n\n###################################################################################################\n# #\n###################################################################################################\n"
},
{
"alpha_fraction": 0.7576197385787964,
"alphanum_fraction": 0.7624576687812805,
"avg_line_length": 81.72000122070312,
"blob_id": "94fe39f4eff29cc7a191db124e9f7e08fa38e14a",
"content_id": "c85cfccaa40a1a370c3ef56938b277c3af9890d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2069,
"license_type": "no_license",
"max_line_length": 477,
"num_lines": 25,
"path": "/README.md",
"repo_name": "lifangge/SdHAnalysis",
"src_encoding": "UTF-8",
"text": "# SdHAnalysis\n___________________________________________\n#### Data processing pipeline for analysis of quantum oscillations.\n-----------------------\nSdHAnalysis is a collection of Python functions for analyzing Shubnikov-de Haas oscillations measured in pulsed and dc magnetic fields. SdHAnalysis is based on the `SdHDataSet` class, which contains the raw and processed data for a single magnet sweep/pulse and includes methods for analysis and plotting. Currently, it is assumed that the resistivity is measured using a tunnel diode oscillator (i.e. SdH manifests as oscillations in frequency as a function of inverse field).\n##### Data processing steps:\n1. Import data, clean it if necessary.\n2. Invert the magnetic field, spline fit to get evenly spaced points, and subtract a polynomial (in inverse field) magnetoresistance background signal.\n3. Identify peaks in the FFT corresponding to SdH and magnetic breakdown orbits, and any mixing signals or harmonics.\n4. Filter the oscillatory magnetoresistance signal to isolate each of the orbits of interest.\n5. Calculate the amplitude of magnetoresistance oscillations as a function of inverse field.\n6. Fit the data to theoretical models to extract materials parameters like effective mass, g-factor, Dingle temperature, and magnetic breakdown field.\n\nSdH oscillations at many temperatures are needed to calculate effective mass (using fits to the Lifshitz–Kosevich formula). I hope to implement this calculation soon.\n\nCalculation of the Dingle temperature and magnetic breakdown field is in general not possible from SdH unless one of the two is known from a separate measurement. I don't when I'll get around to implementing this calculation in some form.\n##### Dependencies:\n- [`numpy`](http://www.numpy.org)\n- [`scipy`](https://www.scipy.org)\n- [`pandas`](http://pandas.pydata.org)\n- [`matplotlib`](http://matplotlib.org)\n- [`detect_peaks`](https://github.com/demotu/BMC/blob/master/functions/detect_peaks.py) (author: Marcos Duarte)\n- For use in [Jupyter](http://jupyter.org) notebooks\n\nAuthor: Logan Bishop-Van Horn (2017)"
}
] | 2 |
santhosh-santhu/Super_Market-Bill_Generation | https://github.com/santhosh-santhu/Super_Market-Bill_Generation | 7c7bf1efe18d5c6d4edabb7ffb3921e42ab23593 | 275505adc36cda575402435c75bb88865292e75c | b50bfed833dcdb99e9362fe816b4c603a367da22 | refs/heads/main | 2023-06-17T00:39:25.234359 | 2021-07-13T13:19:22 | 2021-07-13T13:19:22 | 384,959,055 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.48347288370132446,
"alphanum_fraction": 0.5218157768249512,
"avg_line_length": 28.98611068725586,
"blob_id": "2e1301513c5f1e38c7d9803513f9d16aa60d87d1",
"content_id": "bb0b627a62c3ff45bcd7e8e37dd8a90231192a25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2269,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 72,
"path": "/super-market Bill-generation.py",
"repo_name": "santhosh-santhu/Super_Market-Bill_Generation",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\r\n\r\nname = input(\"Enter customer name:\")\r\n\r\n#Here iam listing some items\r\nlists =''' \r\nRice Rs 55/kg\r\nSugar Rs 30/kg\r\nsalt Rs 10/kg\r\noil Rs 110/ per liter\r\npanner Rs 210/kg\r\nMaggi Rs 50/kg\r\nBoost Rs 100/each\r\ncolgate Rs 25/each\r\n'''\r\n\r\n#declaration\r\nprice = 0\r\npricelist = []\r\ntotalprice = 0\r\nFinalprice = 0\r\nilist=[]#itemlist\r\nqlist=[]#quantity list\r\nplist=[]#pricelist\r\n\r\n#rates for items\r\nitems={'Rice':55,'Sugar':30,'Salt':10,'oil':80,'panner':210,'maggi':50,'Boost':100,'colgate':25}\r\n\r\noption = int(input(\"For list of items press 1: \"))\r\nif option==1:\r\n print(lists)\r\nfor i in range(len(items)):\r\n inp1=int(input(\"if you want to buy press 1 or 2 for exit:\"))\r\n if inp1==2:\r\n break\r\n if inp1==1:\r\n item=input(\"Enter your items:\")\r\n quantity = int(input(\"Enter quanity: \"))\r\n if item in items.keys():\r\n price=quantity*(items[item])\r\n pricelist.append((item,quantity,items,price))\r\n totalprice+=price\r\n ilist.append(item)\r\n qlist.append(quantity)\r\n plist.append(price)\r\n #if we want to cut gst\r\n gst=(totalprice*5)/100\r\n finalamount = gst+totalprice\r\n else:\r\n print(\"Sorry,you entered item is not available \")\r\n else:\r\n print(\"Please enter the valid number\")\r\n #To geneate the bill\r\n inp=input(\"can i bill the items y/n:\")\r\n if inp=='y':\r\n pass\r\n if finalamount!=0:\r\n print(25*\"=\",\"World Supermarket\",25*\"=\")\r\n print(28*\" \",\"Ramanthapur\")\r\n print(\"Name:\",name,30*\" \",\"Date:\",datetime.now())\r\n print(75*\"-\")\r\n print(\"sno\",6*\" \",'items',8*\" \",'Quantity',8*\" \",\"Price\")\r\n for i in range(len(pricelist)):\r\n print(i,1*' ',8*' ',ilist[i],8*' ',qlist[i],8*\" \",plist[i])\r\n print(75*\"-\")\r\n print(50*\" \",\"TotalAmount:\",'Rs',totalprice)\r\n print(\"Gst amount\",30*\" \",'Rs',totalprice)\r\n print(75*\"-\")\r\n print(50*\" \",'finalamount','Rs',finalamount)\r\n print(75*\"-\")\r\n print(20*\" \",\"Thanks for visiting!Have a good day\")\r\n print(75*\"-\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n \r\n\r\n"
}
] | 1 |
allanshimako/airflow-dags | https://github.com/allanshimako/airflow-dags | 16df2ac7df60a50f82decc857744d2d9a3755185 | 1d40b94901d2a26a4364d4e6d0e47dad6314e6dd | f5dd47b2e324a942c98b3028daa2b0d33d136c65 | refs/heads/master | 2022-09-19T06:10:07.310616 | 2020-05-29T14:38:09 | 2020-05-29T14:38:09 | 267,879,559 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6141493320465088,
"alphanum_fraction": 0.6206597089767456,
"avg_line_length": 21.77227783203125,
"blob_id": "55f685494b7aa1691ab7d0dff3ebdd0e6cda8f6e",
"content_id": "9b81679e49a904dd2a679fd90f16655e91410578",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2304,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 101,
"path": "/test.py",
"repo_name": "allanshimako/airflow-dags",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nfrom datetime import timedelta\nimport papermill as pm\nimport os\n\nfrom airflow import DAG\n\n# Operators are used to create the tasks\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.papermill_operator import PapermillOperator\nfrom airflow.operators.python_operator import PythonOperator\nfrom airflow.utils.dates import days_ago\n\n\n# In[3]:\n\n\n# These args will get passed on to each operator\n# You can override them on a per-task basis during operator initialization\ndefault_args = {\n 'owner': 'allan',\n 'depends_on_past': False,\n 'start_date': days_ago(1),\n 'email': ['[email protected]'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n}\n\ndag = DAG(\n 'test_spark',\n default_args=default_args,\n description='test',\n schedule_interval=\"@once\",\n)\n\n\ndag.doc_md = __doc__\n\n# t1 will only send start_date from bash\nt1 = BashOperator(\n task_id='user',\n bash_command='whoami',\n dag=dag,\n xcom_push=True, \n)\n\nt2 = BashOperator(\n task_id='echo_user',\n bash_command='echo {{ task_instance.xcom_pull(task_ids=\"user\")}}',\n dag=dag,\n xcom_push=True, \n)\n\ndef papermill_run (config):\n \"\"\"\n run the notebooks called in Python operators\n \"\"\"\n pm.execute_notebook(\n config.get('input_nb', None),\n config.get('output_nb', None),\n parameters=config.get('parameters', None))\n\nt3 = PythonOperator(\n task_id = 'papermill_test',\n python_callable=papermill_run,\n op_kwargs={'config': {\n 'input_nb': '/opt/dags/spark_test.ipynb',\n 'output_nb': '/opt/dags/spark_test_{{ ds }}.ipynb',\n 'parameters': {'file':'/opt/dags/test'\n , 'pyspark_python':'python3',\n }\n }\n },\n dag=dag,\n)\n \n \n## PapermillOperator is not yet operational, there is an error when submitting it. \n## It was corrected and pushed to master but not yet released \n\"\"\"t2 = PapermillOperator(\n task_id ='test_papermill',\n input_nb='/mnt/d/notebooks/test.ipynb',\n output_nb='/mnt/d/notebooks/test_out.ipynb',\n parameters={'file':'/mnt/d/notebooks/test'},\n dag=dag\n)\"\"\"\n\n\n# \n\nt1 >> t2 >>t3\n\n\n# In[ ]:\n\n\n\n\n"
}
] | 1 |
ZhangYet/vulcan | https://github.com/ZhangYet/vulcan | 13e7c626a54b25b15016837c43208e25c332109a | 2e845192b4c70cad7f1b3f98f698befc1de86f51 | 991de3c5a37be7aa6037c8dc6569c6cbda05ecff | refs/heads/master | 2020-06-17T20:48:10.890327 | 2019-07-15T16:27:25 | 2019-07-15T16:27:25 | 196,049,320 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4529801309108734,
"alphanum_fraction": 0.46357616782188416,
"avg_line_length": 24.149999618530273,
"blob_id": "14d12e87cf1a5f1cd3d98730a7fa12980d2ffd64",
"content_id": "77872c3b3419d13b1c7b6cd047d6b1bef9872479",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1524,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 60,
"path": "/vulcan/data/jp_word.py",
"repo_name": "ZhangYet/vulcan",
"src_encoding": "UTF-8",
"text": "STAR = '☆'\n\n\nclass Word:\n\n def __str__(self):\n return '{0.lesson}, {0.word}, {0.gana}, {0.tone}, {0.attr}, {0.chinese}'.format(self)\n\n def __init__(self, lesson: str, word: str, gana: str, tone: str, attr: str, chinese):\n self.lesson = lesson\n self.word = word\n self.gana = gana\n self.tone = tone\n self.attr = attr\n self.chinese = chinese\n\n def to_chinese(self):\n return '汉语: {}'.format(self.chinese)\n\n def to_word(self):\n return '日文: {}'.format(self.word)\n\n def to_gana(self):\n return '假名: {}'.format(self.gana)\n\n def clean_lesson(self) -> str:\n lesson_num = self.lesson.strip().replace(STAR, '')\n try:\n if int(lesson_num) < 10:\n return '0' + self.lesson\n return self.lesson\n except:\n print(self.lesson)\n\n\ndef load_from_file(file_path: str) -> [Word]:\n ret = []\n with open(file_path) as data:\n for line in data:\n sline = line.split('\\t')\n if len(sline) < 6:\n continue\n\n word = Word(sline[0].strip(),\n sline[1].strip(),\n sline[2].strip(),\n sline[3].strip(),\n sline[4].strip(),\n sline[5].strip())\n\n if __name__ == '__main__':\n print(word)\n\n ret.append(word)\n\n return ret\n\n\nif __name__ == '__main__':\n load_from_file('./clean_jp.csv')\n\n"
},
{
"alpha_fraction": 0.6353383660316467,
"alphanum_fraction": 0.6390977501869202,
"avg_line_length": 28.55555534362793,
"blob_id": "379f9a106d9494f2db09ecee00075e873fb53f8b",
"content_id": "5bea4007d02b07b71364ba03499c68c76f3370ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 294,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 9,
"path": "/main.py",
"repo_name": "ZhangYet/vulcan",
"src_encoding": "UTF-8",
"text": "from vulcan.data.jp_word import load_from_file\nfrom vulcan.anki import Vulcan\n\nif __name__ == '__main__':\n word_list = load_from_file('vulcan/data/new_japanese_1.csv')\n v = Vulcan('新编日语(上海教育出版社)第一册')\n for word in word_list:\n v.add(word)\n v.save()\n"
},
{
"alpha_fraction": 0.7195122241973877,
"alphanum_fraction": 0.7195122241973877,
"avg_line_length": 15.600000381469727,
"blob_id": "67d367e40daeb2fe744e93a62dca8788a79ed3a6",
"content_id": "b93451e8ec2e77b1fcd292c8be96758f351dc07e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 186,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 5,
"path": "/README.md",
"repo_name": "ZhangYet/vulcan",
"src_encoding": "UTF-8",
"text": "# vulcan\n\n**vulcan**(火神),其实是《明日方舟》里面的一个重装干员,刚好名字短,所以用来命名了。\n\n将《新编日语》的单词导入成 apkg 格式。"
},
{
"alpha_fraction": 0.2945139706134796,
"alphanum_fraction": 0.29964709281921387,
"avg_line_length": 38.45569610595703,
"blob_id": "f7cf34e00d5c22a071add7380bcd433d8de31d0d",
"content_id": "201bce34c6f258883519ffd75d81351be4e83210",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3179,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 79,
"path": "/vulcan/anki.py",
"repo_name": "ZhangYet/vulcan",
"src_encoding": "UTF-8",
"text": "import genanki\nimport datetime\n\nfrom vulcan.data.jp_word import Word\n\n\nCSS = '''\ndiv.front, div.back {\n text-align:center;\n font-family: Courier;\n font-size: 30px;\n}\n\nspan.small {font-size: 15px;}\nspan.normal {font-size: 30px;}\nspan.large {font-size: 60px;}\nspan.italic {font-style:italic;}\n'''\n\n\ndef gen_id() -> int:\n return int(datetime.datetime.now().timestamp())\n\n\nclass Vulcan:\n\n def __init__(self, name: str):\n self.name = name\n self.model = genanki.Model(gen_id(),\n '新编日语(上海外语教育出版社)第一册',\n fields=[\n {'name': 'Question'},\n {'name': 'Answer1'},\n {'name': 'Answer2'},\n {'name': 'Tone'},\n {'name': 'Attr'},\n {'name': 'Lesson'},\n ],\n templates=[\n {\n 'name': 'new_japanese',\n 'qfmt': '''\n <div class=\"front\">\n <span class=\"large japanese\">{{Question}}</span>\n <br/\n </div>\n ''',\n 'afmt': '''\n <div class=\"back\">\n <span class=\"large\">{{Answer1}}</span>\n <span class=\"large\">{{Answer2}}</span>\n <hr/>\n 声调:{{Tone}}, 词性:{{Attr}}, 课文:{{Lesson}}\n <br/>\n </span>\n </div>\n ''',\n },\n ],\n css=CSS)\n self.deck = genanki.Deck(gen_id(),\n name='新编日语')\n\n def add(self, word: Word):\n node1 = genanki.Note(model=self.model,\n fields=[\n word.to_word(), word.to_gana(), word.to_chinese(),\n word.tone, word.attr, word.clean_lesson(),\n ])\n self.deck.add_note(node1)\n node3 = genanki.Note(model=self.model,\n fields=[\n word.to_gana(), word.to_word(), word.to_chinese(),\n word.tone, word.attr, word.clean_lesson(),\n ])\n self.deck.add_note(node3)\n\n def save(self):\n genanki.Package(self.deck).write_to_file(self.name + '.apkg')\n"
}
] | 4 |
sumedhaagarwal/WC | https://github.com/sumedhaagarwal/WC | ac25ba23378efc1387120981a826b09cfde9921a | 3497097948ac5e3cef2290bcd119b8de18db54cb | b42944e33d0d95085e8adc4e6a37a0fa911a0639 | refs/heads/master | 2021-03-27T17:23:32.231953 | 2018-01-26T12:22:02 | 2018-01-26T12:22:02 | 95,140,464 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6193353533744812,
"alphanum_fraction": 0.6374622583389282,
"avg_line_length": 24.461538314819336,
"blob_id": "0fc095498df5e66ad13c7eda32e8e6ce0789ee12",
"content_id": "bb3383cde6e5281902bc34a29af485a90293fd27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 662,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 26,
"path": "/youtubedownloader.py",
"repo_name": "sumedhaagarwal/WC",
"src_encoding": "UTF-8",
"text": "import os\na=input(\"Enter URL: \")\nb=input(\"For audio press 1 and for video press 2: \")\nprint(b)\nM_path = \"/Music\"\nV_path = \"/Videos\"\n\nif b==1:\n print('1')\n c=input(\"For default path press 1 and for any other path press 2: \")\n if c==1:\n\t\tos.chdir(M_path)\n\t\tos.system('youtube-dl --extract-audio --audio-format mp3 '+a)\n else:\n\t\tpath=input(\"Enter Path: \")\n\t\tos.chdir(path)\n\t\tos.system('youtube-dl --extract-audio --audio-format mp3 '+a)\nelse:\n\tc=input(\"For default path press 1 and for any other path press 2: \")\n\tif c==1:\n\t\tos.chdir(V_path)\n\t\tos.system('youtube-dl '+a)\n\telse:\n\t\tpath=input(\"Enter Path: \")\n\t\tos.chdir(path)\n\t\tos.system('youtube-dl '+a)\n"
},
{
"alpha_fraction": 0.7980769276618958,
"alphanum_fraction": 0.807692289352417,
"avg_line_length": 33.66666793823242,
"blob_id": "5409c85265f171a2a517399ce1e6b188bf5c3377",
"content_id": "b2ed6c13bc924cf415ebf9ab6042248f191b8f17",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 104,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 3,
"path": "/README.md",
"repo_name": "sumedhaagarwal/WC",
"src_encoding": "UTF-8",
"text": "#sudo add-apt-repository ppa:nilarimogard/webupd8\n#sudo apt-get update\n#sudo apt-get install youtube-dl\n"
}
] | 2 |
shubhampachori12110095/Attentional-NMT | https://github.com/shubhampachori12110095/Attentional-NMT | aeb45087bdfa9af9e70365122c77796286610d46 | c39cc149a94bd1b5f62f473f7ffd42ae7b3b9092 | 56d847ea57acf403a30f11c3d0d60e29221a2538 | refs/heads/master | 2020-04-04T20:47:28.556256 | 2018-05-03T21:30:37 | 2018-05-03T21:30:37 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7268921136856079,
"alphanum_fraction": 0.7652173638343811,
"avg_line_length": 57.58490753173828,
"blob_id": "01310d4612a8e2cdc42c8c1eb4f1a02821c1de82",
"content_id": "2cbb409ebf26c2a0e93cc7773a614a923a5da06a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3115,
"license_type": "no_license",
"max_line_length": 466,
"num_lines": 53,
"path": "/README.md",
"repo_name": "shubhampachori12110095/Attentional-NMT",
"src_encoding": "UTF-8",
"text": "# Attentional-NMT\n\nAttention-Based Neural Machine Translation\n\n## Intro\nIn this project, we want to apply attentional mechanism to the deep neural network and translate German to English. The data we use is from IWLST14 dataset. For the result, we will evaluate it by BLEU scores.\n\n## Dataset\nFollowing previous works (Ranzato et al., 2015; Bahdanau et al., 2016), the dataset we use is from IWLST 2014 evaluation campaign (Cettolo et al., 2014), consisting of training/dev/test corpus with ap- proximately 153k, 7k, 6.5k bilingual sentence pairs respectively. The maximal sentence length is set as 50. The dictionary for English and German corpus respectively include 21,415 and 30,457 most frequent words, with other words replaced as a special token ‘UNK’.\n\n\n## Method\n#### 1. Training\nWe built the NMT model according to the methods we described before, a RNN based encoding-decoding framework with attention mechanism. For layers LSTMs act as encoder and decoder. The dimension of word embedding and LSTM hidden state are both set as 1000 is set as 1,000. To compare the performance with greedy search and beam search, the beam window is set as 5.\n\n#### 2. Inference\nWe firstly tried greedy algorithm, then came to Beam Search Algorithm. The basic idea of this algorithm is to set a window size of N and keep track of those top N most possible word combinations. we tried the window size from 1 to 15 and found that the performance increased as the window size incresed.\n\n\n## Result\nOur model training loss is shown in the following picture: \n\n\n\nTo learn the effect of different beam sizes when doing inference (translation), we set the beam size from 1 to 15. The De→En translation performance are shown below. \n\nFrom the table 2 we can see that our translation model can produce very good translation on short sentences. And the translation quality decrease when the sentences becoming longer. But our model can still capture the “main structure” of the sentence, with some errors when choosing the target words.\n\n## How to run the code\n\n#### 1. train the model:\n```\npython train.py --data data-bin/iwslt14.tokenized.de-en/ --optimizer Adam --learning_rate 1e-3 --model_file checkpoints/iwlst14/ --gpuid 0\n```\n\n#### 2. generate translation:\n```\npython generate.py --data data-bin/iwslt14.tokenized.de-en/ --batch-size 128 --model_file checkpoints/iwlst14/best_gmodel.pt --gpuid 0 --remove-bpe --beam 5\n```\n\n#### 3. compute BLEU scores:\n```\nperl scripts/multi-bleu.perl < ground_truth.txt translation.txt\n```\n\n## Reference\n[1] Luong, Minh-Thang, Hieu Pham, and Christopher D. Manning. Effective approaches to attention- based neural machine translation. arXiv preprint arXiv:1508.04025 (2015).\n\n[2] Bahdanau, Dzmitry, Kyunghyun Cho, and Yoshua Bengio. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473 (2014).\n\n[3] Vaswani, Ashish, et al. Attention is all you need. Advances in Neural Information Processing Systems. 2017.\n\n[4] Gehring, Jonas, et al. Convolutional sequence to sequence learning. arXiv preprint arXiv:1705.03122 (2017).\n"
},
{
"alpha_fraction": 0.5932292342185974,
"alphanum_fraction": 0.5999212861061096,
"avg_line_length": 36.35293960571289,
"blob_id": "958b8dbe8478a6eee12cb755c5d98dcfd26ca6af",
"content_id": "fe77a24a7433d7f786c0a8e10627dbd692c638bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7621,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 204,
"path": "/train.py",
"repo_name": "shubhampachori12110095/Attentional-NMT",
"src_encoding": "UTF-8",
"text": "\nimport argparse\nimport logging\nimport math\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import cuda\n\nimport options\nimport data\nimport utils\nfrom meters import AverageMeter\nfrom model import LSTMModel\n\n\nlogging.basicConfig(\n format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S', level=logging.DEBUG)\n\nparser = argparse.ArgumentParser(description=\"Driver program for JHU Attentional-NMT.\")\n\n# Load args\noptions.add_general_args(parser)\noptions.add_dataset_args(parser)\noptions.add_distributed_training_args(parser)\noptions.add_optimization_args(parser)\noptions.add_checkpoint_args(parser)\noptions.add_model_args(parser)\n\ndef main(args):\n use_cuda = (len(args.gpuid) >= 1)\n if args.gpuid:\n cuda.set_device(args.gpuid[0])\n\n # Load dataset\n splits = ['train', 'valid']\n if data.has_binary_files(args.data, splits):\n dataset = data.load_dataset(\n args.data, splits, args.src_lang, args.trg_lang)\n else:\n dataset = data.load_raw_text_dataset(\n args.data, splits, args.src_lang, args.trg_lang)\n if args.src_lang is None or args.trg_lang is None:\n # record inferred languages in args, so that it's saved in checkpoints\n args.src_lang, args.trg_lang = dataset.src, dataset.dst\n print('| [{}] dictionary: {} types'.format(dataset.src, len(dataset.src_dict)))\n print('| [{}] dictionary: {} types'.format(dataset.dst, len(dataset.dst_dict)))\n for split in splits:\n print('| {} {} {} examples'.format(args.data, split, len(dataset.splits[split])))\n\n # Set model parameters\n args.encoder_embed_dim = 1000\n args.encoder_layers = 4\n args.encoder_dropout_out = 0\n args.decoder_embed_dim = 1000\n args.decoder_layers = 4\n args.decoder_out_embed_dim = 1000\n args.decoder_dropout_out = 0\n args.bidirectional = False\n\n logging_meters = OrderedDict()\n logging_meters['train_loss'] = AverageMeter()\n logging_meters['valid_loss'] = AverageMeter()\n logging_meters['bsz'] = AverageMeter() # sentences per batch\n\n # Build model\n generator = LSTMModel(args, dataset.src_dict, dataset.dst_dict, use_cuda=use_cuda)\n\n if use_cuda:\n generator.cuda()\n else:\n generator.cpu()\n\n optimizer = eval(\"torch.optim.\" + args.optimizer)(generator.parameters(), args.learning_rate)\n\n lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=0, factor=args.lr_shrink)\n\n # Train until the learning rate gets too small\n max_epoch = args.max_epoch or math.inf\n\n epoch_i = 1\n best_dev_loss = math.inf\n lr = optimizer.param_groups[0]['lr']\n # main training loop\n\n # added for write training loss\n f1 = open(\"train_loss\", \"a\")\n\n while lr > args.min_lr and epoch_i <= max_epoch:\n logging.info(\"At {0}-th epoch.\".format(epoch_i))\n\n seed = args.seed + epoch_i\n torch.manual_seed(seed)\n\n max_positions_train = (\n min(args.max_source_positions, generator.encoder.max_positions()),\n min(args.max_target_positions, generator.decoder.max_positions())\n )\n\n # Initialize dataloader, starting at batch_offset\n itr = dataset.train_dataloader(\n 'train',\n max_tokens=args.max_tokens,\n max_sentences=args.max_sentences,\n max_positions=max_positions_train,\n seed=seed,\n epoch=epoch_i,\n sample_without_replacement=args.sample_without_replacement,\n sort_by_source_size=(epoch_i <= args.curriculum),\n shard_id=args.distributed_rank,\n num_shards=args.distributed_world_size,\n )\n # set training mode\n generator.train()\n\n # reset meters\n for key, val in logging_meters.items():\n if val is not None:\n val.reset()\n\n for i, sample in enumerate(itr):\n\n if use_cuda:\n # wrap input tensors in cuda tensors\n sample = utils.make_variable(sample, cuda=cuda)\n\n loss = generator(sample)\n sample_size = sample['target'].size(0) if args.sentence_avg else sample['ntokens']\n nsentences = sample['target'].size(0)\n logging_loss = loss.data / sample_size / math.log(2)\n logging_meters['bsz'].update(nsentences)\n logging_meters['train_loss'].update(logging_loss, sample_size)\n f1.write(\"{0}\\n\".format(logging_meters['train_loss'].avg))\n logging.debug(\"loss at batch {0}: {1:.3f}, batch size: {2}, lr={3}\".format(i, logging_meters['train_loss'].avg,\n round(logging_meters['bsz'].avg),\n optimizer.param_groups[0]['lr']))\n optimizer.zero_grad()\n loss.backward()\n\n # all-reduce grads and rescale by grad_denom\n for p in generator.parameters():\n if p.requires_grad:\n p.grad.data.div_(sample_size)\n\n torch.nn.utils.clip_grad_norm(generator.parameters(), args.clip_norm)\n optimizer.step()\n\n # validation -- this is a crude estimation because there might be some padding at the end\n max_positions_valid = (\n generator.encoder.max_positions(),\n generator.decoder.max_positions(),\n )\n\n # Initialize dataloader\n itr = dataset.eval_dataloader(\n 'valid',\n max_tokens=args.max_tokens,\n max_sentences=args.max_sentences,\n max_positions=max_positions_valid,\n skip_invalid_size_inputs_valid_test=args.skip_invalid_size_inputs_valid_test,\n descending=True, # largest batch first to warm the caching allocator\n shard_id=args.distributed_rank,\n num_shards=args.distributed_world_size,\n )\n # set validation mode\n generator.eval()\n\n # reset meters\n for key, val in logging_meters.items():\n if val is not None:\n val.reset()\n\n for i, sample in enumerate(itr):\n with torch.no_grad():\n if use_cuda:\n # wrap input tensors in cuda tensors\n sample = utils.make_variable(sample, cuda=cuda)\n loss = generator(sample)\n sample_size = sample['target'].size(0) if args.sentence_avg else sample['ntokens']\n loss = loss / sample_size / math.log(2)\n logging_meters['valid_loss'].update(loss, sample_size)\n logging.debug(\"dev loss at batch {0}: {1:.3f}\".format(i, logging_meters['valid_loss'].avg))\n\n # update learning rate\n lr_scheduler.step(logging_meters['valid_loss'].avg)\n lr = optimizer.param_groups[0]['lr']\n\n logging.info(\"Average loss value per instance is {0} at the end of epoch {1}\".format(logging_meters['valid_loss'].avg, epoch_i))\n torch.save(generator.state_dict(), open(args.model_file + \"data.nll_{0:.3f}.epoch_{1}.pt\".format(logging_meters['valid_loss'].avg, epoch_i), 'wb'))\n\n if logging_meters['valid_loss'].avg < best_dev_loss:\n best_dev_loss = logging_meters['valid_loss'].avg\n torch.save(generator.state_dict(), open(args.model_file + \"best_gmodel.pt\", 'wb'))\n\n epoch_i += 1\n\n f1.close()\n\nif __name__ == \"__main__\":\n ret = parser.parse_known_args()\n args = ret[0]\n if ret[1]:\n logging.warning(\"unknown arguments: {0}\".format(parser.parse_known_args()[1]))\n main(args)\n"
},
{
"alpha_fraction": 0.5411764979362488,
"alphanum_fraction": 0.5411764979362488,
"avg_line_length": 27.33333396911621,
"blob_id": "c017388e4acb7df3db7791cb43171724c66111fa",
"content_id": "7a2223bf44f1b8124b4065844138a50b18ae0d29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 85,
"license_type": "no_license",
"max_line_length": 28,
"num_lines": 3,
"path": "/postprocess.sh",
"repo_name": "shubhampachori12110095/Attentional-NMT",
"src_encoding": "UTF-8",
"text": " sed -r 's/\\@\\@ //g' # linux\n# sed 's/\\@\\@ //g' # mac os\nperl scripts/detruecase.perl"
}
] | 3 |
Tasha-Carty-220/asvs | https://github.com/Tasha-Carty-220/asvs | dd5496b08fb0709940d76ad0426d841220fa8662 | 634cc0e96daedc91d1acc06827ce82e9c13f520d | e7f062991916862d3ba3540f485407a095e522e3 | refs/heads/master | 2022-12-07T05:00:57.727155 | 2020-09-03T16:25:54 | 2020-09-03T16:25:54 | 300,162,880 | 1 | 0 | MIT | 2020-10-01T05:55:28 | 2020-10-01T05:55:23 | 2020-09-03T16:25:55 | null | [
{
"alpha_fraction": 0.7607154846191406,
"alphanum_fraction": 0.7684779167175293,
"avg_line_length": 50.94736862182617,
"blob_id": "6ff9df0641adb4445210795a5c1cfee0edda17c0",
"content_id": "8f43af3da9861e4d75a03adfaa83b95aa463816f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2963,
"license_type": "permissive",
"max_line_length": 394,
"num_lines": 57,
"path": "/README.md",
"repo_name": "Tasha-Carty-220/asvs",
"src_encoding": "UTF-8",
"text": "# Welcome To The OWASP Application Security Verification Standard (ASVS) Web App - Release Name: Marbles\n\nThe OWASP Application Security Verification Standard (ASVS) is a community-effort to\nestablish a framework of security requirements and controls that focus on normalising the functional and non-functional security controls required when designing, developing and testing modern web applications.\n\nYou can access the ASVS via Github, but we wanted to make this more accessible to all as a web application (based on django). The key aim of the web application is to allow individuals or companies a quick and easy way to understand the ASVS controls, and where to find information on meeting those controls.\n\nWhen we built Marbles, our aim was the following:\n\n - [x] Easy to update - the app is powered by two JSON files (asvs.json & category.json)\n - [X] Lightweight - The included Dockerfile builds a 89MB Docker image (size isn't everything)\n - [X] No frills - You don't want a web app that takes ages to load and has \"extra\" stuff in it\n - [X] Community-focused - The ASVS is built by you for you, and so should this application. We welcome requests and ideas from you, the community.\n\n## Installing\n\nWe decided to use Docker for virtualisation so that it's easier to run in the cloud and keep update. This assumes you have Docker installed and running on your host (be it on-prem or cloud)\n\n### Docker\nOnce you have cloned the repo, you can build and run the docker image with the following commands:\n\n`docker build -t asvs .` \n`docker run -d -p 8000:8000 asvs` \n \nThis will then map your local port 8000 to the running docker container.\n\n**NOTE:** In this example we built the docker image using the tag (-t) *asvs*, however you can change this if you want.\n\n### Other\nIf you want to run the web app on a \"production\" grade web server (such as Apache2) you can, django provides a WSGI file which you can find in the *asvs* folder. Individual installation guides for this are outside of the scope of this project, however because sharing is caring here is a [link](https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/) to the official django documentation.\n\n\n## Roadmap\n\nThis is the second release and we already have some new \"features\" in the planning stages. These are currently (and subject to change).\n\n1. Sharing projects between individuals\n2. Teams (maybe)\n3. Integration into JIRA (via an API)\n\n\n\n## Screenshots\n\n\n\n\n\n\n\n\n\n\n\n## Who Are We?\n\nAdam Maxwell (@catalyst256) & Daniel Cuthbert (@dcuthbert) are part of the Santander Group Cyber Security Research Team. Daniel is one of the co-authors of the ASVS, and we use it within the group and felt this app would be better suited as a community release, rather than just another internal tool. \n\n"
},
{
"alpha_fraction": 0.6219007968902588,
"alphanum_fraction": 0.629601776599884,
"avg_line_length": 34.97297286987305,
"blob_id": "11ded18433e8d3efbdafd22bc0f2587923c46bc9",
"content_id": "fbc431f95c3e171a0c8583b9b41f17e2922e5f6d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5324,
"license_type": "permissive",
"max_line_length": 133,
"num_lines": 148,
"path": "/projects/views.py",
"repo_name": "Tasha-Carty-220/asvs",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom .models import Projects\nimport json\nimport hashlib\nfrom django.conf import settings\nfrom django.core.files import File\nimport os\n\n\ndef load_json_file(level):\n results = []\n with open('common/asvs.json') as f:\n data = json.load(f)\n for r in data['requirements']:\n bob = 'L{0}'.format(level)\n if r.get(bob):\n results.append(r)\n return results\n\n\ndef create_template(requirements, project):\n # delete all the levels out of the requirements as not needed in the template\n # for r in requirements:\n # del r['levels']\n # r['enabled'] = 0\n # build the template with project information and requirements\n data = {}\n data['project_owner'] = project['project_owner']\n data['project_name'] = project['project_name']\n data['project_id'] = project['id']\n data['project_description'] = project['project_description']\n data['project_created'] = project['project_created'].isoformat()\n data['project_level'] = project['project_level']\n data['requirements'] = requirements\n phash = (hashlib.md5('{0}{1}'.format(\n project['project_owner'], project['id']).encode('utf-8')).hexdigest())\n with open('storage/{0}.json'.format(phash), 'w') as output:\n project_file = File(output)\n json.dump(data, project_file, indent=2)\n project_file.close()\n return\n\n\ndef load_template(phash):\n with open('storage/{0}.json'.format(phash), 'r') as template:\n data = json.load(template)\n template.close()\n return data\n\n\ndef update_template(phash, data):\n with open('storage/{0}.json'.format(phash), 'w') as template:\n json.dump(data, template, indent=2)\n template.close()\n return\n\n\ndef calculate_completion(requirements):\n total = len(requirements)\n enabled = 0\n for r in requirements:\n if r.get('enabled') and r['enabled'] > 0:\n enabled += 1\n else:\n pass\n percentage = enabled / total * 100\n return {'total': total, 'enabled': enabled, 'percentage': '{0:.1f}'.format(percentage)}\n\n\n@login_required\ndef project_all(request):\n projects = Projects.objects.filter(\n project_owner=request.user.username).values()\n return render(request, 'projects/manage.html', {'projects': projects})\n\n\n@login_required\ndef project_add(request):\n if request.method == 'POST':\n # Create the database record\n project_name = request.POST.get('project_name')\n project_owner = request.user.username\n project_description = request.POST.get('project_description')\n project_level = request.POST.get('project_level')\n p = Projects(project_name=project_name, project_owner=project_owner,\n project_description=project_description, project_level=project_level)\n p.save()\n # Build the template\n controls = load_json_file(project_level)\n project = Projects.objects.filter(\n project_owner=request.user.username, project_name=project_name).values()[0]\n create_template(controls, project)\n return redirect('projectsmanage')\n\n\n@login_required\ndef project_delete(request, projectid):\n Projects.objects.filter(\n project_owner=request.user.username, pk=projectid).delete()\n phash = (hashlib.md5('{0}{1}'.format(\n request.user.username, projectid).encode('utf-8')).hexdigest())\n os.remove('storage/{0}.json'.format(phash))\n return redirect('projectsmanage')\n\n\n@login_required\ndef project_view(request, projectid):\n phash = (hashlib.md5('{0}{1}'.format(\n request.user.username, projectid).encode('utf-8')).hexdigest())\n project = load_template(phash)\n if project['project_owner'] == request.user.username:\n percentage = calculate_completion(project['requirements'])\n return render(request, \"projects/view.html\", {'data': project['requirements'], 'project': project, 'percentage': percentage})\n\n\n@login_required\ndef project_update(request):\n if request.method == 'POST':\n phash = (hashlib.md5('{0}{1}'.format(request.user.username, request.POST.get(\n 'projectid')).encode('utf-8')).hexdigest())\n project = load_template(phash)\n for k, v in request.POST.items():\n if 'csrfmiddlewaretoken' in k or 'projectid' in k:\n pass\n else:\n for r in project['requirements']:\n if r['Item'] in k:\n if int(v) == 0:\n r['enabled'] = 0\n else:\n r['enabled'] = 1\n update_template(phash, project)\n return redirect('projectsview', projectid=request.POST.get('projectid'))\n\n\n@login_required\ndef project_download(request, projectid):\n phash = (hashlib.md5('{0}{1}'.format(\n request.user.username, projectid).encode('utf-8')).hexdigest())\n filename = 'storage/{0}.json'.format(phash)\n with open(filename, 'rb') as fh:\n response = HttpResponse(\n fh.read(), content_type=\"application/json\")\n response['Content-Disposition'] = 'inline; filename=' + \\\n os.path.basename(filename)\n return response\n"
},
{
"alpha_fraction": 0.7004830837249756,
"alphanum_fraction": 0.7004830837249756,
"avg_line_length": 33.5,
"blob_id": "fe8c49fa81311987f6307cd4435fa9a36f684d40",
"content_id": "f7ccaa334babb3c9ccf664da8e9d126a0099ee2c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 414,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 12,
"path": "/accountauth/urls.py",
"repo_name": "Tasha-Carty-220/asvs",
"src_encoding": "UTF-8",
"text": "# Basic setup for allowing users to sign up using a username and a password\n\nfrom django.conf.urls import url\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom .views import signup\n\nurlpatterns = [\n url(r'^signup/$', signup, name='signup'),\n url(r'login$', LoginView.as_view(\n template_name=\"auth/login.html\"), name=\"login\"),\n url(r'logout$', LogoutView.as_view(), name=\"logout\"),\n]\n"
},
{
"alpha_fraction": 0.6089385747909546,
"alphanum_fraction": 0.6350092887878418,
"avg_line_length": 37.35714340209961,
"blob_id": "fe0587b24a2e304212b6bb9903ce47632fd61d4c",
"content_id": "ca8928756237313ea5a9ea259ca1e912d2b3dee9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 537,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 14,
"path": "/Dockerfile",
"repo_name": "Tasha-Carty-220/asvs",
"src_encoding": "UTF-8",
"text": "FROM alpine:edge\nRUN apk add --no-cache python3 && \\\n python3 -m ensurepip && \\\n rm -r /usr/lib/python*/ensurepip && \\\n pip3 install --upgrade pip setuptools && \\\n if [ ! -e /usr/bin/pip ]; then ln -s pip3 /usr/bin/pip ; fi && \\\n if [[ ! -e /usr/bin/python ]]; then ln -sf /usr/bin/python3 /usr/bin/python; fi && \\\n rm -r /root/.cache\nCOPY . /app\nWORKDIR /app\nRUN pip3 install -r requirements.txt\nRUN python manage.py makemigrations\nRUN python manage.py migrate\nCMD [\"python\", \"manage.py\", \"runserver\", \"0.0.0.0:8000\"]\n"
},
{
"alpha_fraction": 0.6589147448539734,
"alphanum_fraction": 0.6589147448539734,
"avg_line_length": 17.428571701049805,
"blob_id": "871343ca01a3d950489a895922493af193ee74b6",
"content_id": "ed66a3b1fd2c6b1cd5a1956dea22eb0d8d7bdcdd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 129,
"license_type": "permissive",
"max_line_length": 49,
"num_lines": 7,
"path": "/levels/urls.py",
"repo_name": "Tasha-Carty-220/asvs",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom .views import levels\n\n\nurlpatterns = [\n url(r'(?P<level>\\d+)', levels, name=\"level\"),\n]\n"
},
{
"alpha_fraction": 0.6817391514778137,
"alphanum_fraction": 0.6817391514778137,
"avg_line_length": 43.230770111083984,
"blob_id": "f8ac01189cff90030ce1d4696affb9c0e920a715",
"content_id": "ba94d536d14cb677758b7a3ab6581d8091320f26",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 575,
"license_type": "permissive",
"max_line_length": 107,
"num_lines": 13,
"path": "/projects/urls.py",
"repo_name": "Tasha-Carty-220/asvs",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom .views import project_all, project_add, project_delete, project_view, project_update, project_download\n\n\nurlpatterns = [\n url(r'manage/', project_all, name='projectsmanage'),\n url(r'_add/', project_add, name='projectsadd'),\n url(r'_delete/(?P<projectid>\\d+)', project_delete, name='projectsdelete'),\n url(r'view/(?P<projectid>\\d+)', project_view, name='projectsview'),\n url(r'_update/', project_update, name='projectsupdate'),\n url(r'_download/(?P<projectid>\\d+)',\n project_download, name='projectsdownload')\n]\n"
},
{
"alpha_fraction": 0.7932489514350891,
"alphanum_fraction": 0.7932489514350891,
"avg_line_length": 235,
"blob_id": "d8e98113848296ded7a915032f146b5ba44d45bf",
"content_id": "505a36f89d8dfae67ae69bac6bf3067c9dc67ed5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 237,
"license_type": "permissive",
"max_line_length": 235,
"num_lines": 1,
"path": "/CONTRIBUTING.md",
"repo_name": "Tasha-Carty-220/asvs",
"src_encoding": "UTF-8",
"text": "The OWASP Application Security Verification Standard (ASVS) is driven by the community, and as such, so should this. We welcome anyone contributing, from code to ideas to advice. This release is for all to use and enjoy and help shape. \n"
},
{
"alpha_fraction": 0.6742424368858337,
"alphanum_fraction": 0.6742424368858337,
"avg_line_length": 21,
"blob_id": "bad357c0458fb2433d2f0573e6624ef1a094a1be",
"content_id": "ad44a73f28d3f06cdc1412536c75a0b4ab54d35e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 132,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 6,
"path": "/help/urls.py",
"repo_name": "Tasha-Carty-220/asvs",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom .views import helping\n\nurlpatterns = [\n url(r'(?P<category>\\d+)', helping, name=\"help\"),\n]\n"
},
{
"alpha_fraction": 0.6639344096183777,
"alphanum_fraction": 0.6639344096183777,
"avg_line_length": 16.428571701049805,
"blob_id": "0f73cc655dd155dd0ed12077e59348fc193acd2a",
"content_id": "5df06f22e3cf1cf9dfce6f77a9784e2d7e01acc4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 122,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 7,
"path": "/home/urls.py",
"repo_name": "Tasha-Carty-220/asvs",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import url\nfrom .views import home_page\n\n\nurlpatterns = [\n url(r'^$', home_page, name=\"home\"),\n]\n"
}
] | 9 |
HwayGuo/idlm_Pytorch | https://github.com/HwayGuo/idlm_Pytorch | afb45dbbeea2b12edbf14470113ff6dba90f71d9 | 741cc99fd266f5e59929d7fd303199136ce9e2ce | e40ca868622be4200170f0e8127bdae944959234 | refs/heads/master | 2022-04-27T01:29:55.726024 | 2020-04-30T18:47:26 | 2020-04-30T18:47:26 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5615682005882263,
"alphanum_fraction": 0.660408616065979,
"avg_line_length": 25.632352828979492,
"blob_id": "836a466b38c612496f741cbce1e18ab80a733b2a",
"content_id": "3bc051193b60d9b27547f96ab4190a799e754594",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1811,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 68,
"path": "/Tandem/parameters.py",
"repo_name": "HwayGuo/idlm_Pytorch",
"src_encoding": "UTF-8",
"text": "\"\"\"\nHyper-parameters of the Tandem model\n\"\"\"\n# Define which data set you are using\nDATA_SET = 'meta_material'\n# DATA_SET = 'gaussian_mixture'\n# DATA_SET = 'sine_wave'\n# DATA_SET = 'naval_propulsion'\n# DATA_SET = 'robotic_arm'\n# DATA_SET = 'ballistics'\nTEST_RATIO = 0.05\n\n# Model Architecture parameters\n#LOAD_FORWARD_CKPT_DIR = 'pre_trained_forward/'\nLOAD_FORWARD_CKPT_DIR = None\n\n#LINEAR_F = [4, 500, 500, 500, 1]\nLINEAR_F = [8, 1000, 1000, 1000, 1000, 150]\nCONV_OUT_CHANNEL_F = [4, 4, 4]\nCONV_KERNEL_SIZE_F = [8, 5, 5]\nCONV_STRIDE_F = [2, 1, 1]\n\nLINEAR_B = [150, 1000, 1000, 1000, 1000, 1000, 8]\nCONV_OUT_CHANNEL_B = [4, 4, 4]\nCONV_KERNEL_SIZE_B = [5, 5, 8]\nCONV_STRIDE_B = [1, 1, 2]\n\n# Model Architectural Params for gaussian mixture dataset\n#LINEAR_F = [3, 150, 150, 150, 150, 150, 2]\n#LINEAR_F = [4, 300, 300, 2]\n#CONV_OUT_CHANNEL_F = []\n#CONV_KERNEL_SIZE_F = []\n#CONV_STRIDE_F = []\n\n#LINEAR_B = [2, 150, 150, 150, 150, 150, 3]\n#LINEAR_B = [2, 300, 300, 300, 300, 4]\n#CONV_OUT_CHANNEL_B = []\n#CONV_KERNEL_SIZE_B = []\n#CONV_STRIDE_B = []\n\n# Optimizer parameters\nOPTIM = \"Adam\"\nREG_SCALE = 1e-3\nBATCH_SIZE = 256\nEVAL_BATCH_SIZE = 1024\nEVAL_STEP = 20\nTRAIN_STEP = 500\nVERB_STEP = 20\nLEARN_RATE = 1e-2\n# DECAY_STEP = 25000 # This is for step decay, however we are using dynamic decaying\nLR_DECAY_RATE = 0.9\nSTOP_THRESHOLD = -1 #-1 means dont stop\n\n# Running specific parameter\nUSE_CPU_ONLY = False\nDETAIL_TRAIN_LOSS_FORWARD = True\n#EVAL_MODEL = 'robotic_arm'\nEVAL_MODEL = 'ballistics'\n#EVAL_MODEL = 'sine_wave'\n# Data-specific parameters\nX_RANGE = [i for i in range(2, 10 )]\nY_RANGE = [i for i in range(10 , 2011 )]\nMODEL_NAME = None\n#DATA_DIR = '../'\nDATA_DIR = '/work/sr365/'\n# DATA_DIR = '/home/omar/PycharmProjects/github/idlm_Pytorch-master/forward/'\nGEOBOUNDARY = [30, 52, 42, 52]\nNORMALIZE_INPUT = True\n"
}
] | 1 |
tbold5/201910_1510_week_02 | https://github.com/tbold5/201910_1510_week_02 | f44d5f0e2a513a11d187c17169d51b56a55cb720 | f5b58bb23af52f1fa29e73eb143782655c6fe157 | a23153fd5f45af25eb813d51cea5bbce30a39b4c | refs/heads/master | 2020-04-16T21:52:45.018566 | 2019-01-16T00:13:58 | 2019-01-16T00:13:58 | 165,943,736 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7532467246055603,
"alphanum_fraction": 0.7922077775001526,
"avg_line_length": 37.75,
"blob_id": "5c5cd0189f84acc655253e0c6d3332b3a2dcf2c3",
"content_id": "78693f92e657014ca0b7f7836ed9a92d0979a059",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 154,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 4,
"path": "/Temperature.py",
"repo_name": "tbold5/201910_1510_week_02",
"src_encoding": "UTF-8",
"text": "temperature_in_celcius = 10\ntemperature_in_fahrenheit = (temperature_in_celcius * 9/5) + 32\nprint(temperature_in_celcius)\nprint(temperature_in_fahrenheit)"
}
] | 1 |
sumiki/pygraphql | https://github.com/sumiki/pygraphql | 62daa17ad3b93677b6918d490f00acfe38cfdcc5 | e7cf6de10c23b0c29c132091ff927f3bb3ad1129 | a7eaa49b13268218989a40fe398177450626601d | refs/heads/master | 2022-12-15T17:37:19.269238 | 2018-07-03T18:58:16 | 2018-07-03T18:58:16 | 139,543,084 | 0 | 0 | null | 2018-07-03T07:12:03 | 2018-07-03T19:04:19 | 2022-12-03T18:49:13 | Python | [
{
"alpha_fraction": 0.29456382989883423,
"alphanum_fraction": 0.29519596695899963,
"avg_line_length": 33.39130401611328,
"blob_id": "e30f9bbebe0d18e4b2d57ec86757fac0bfcf37b0",
"content_id": "5f9c1457b8e93437a2ac5d57eb4a6f1e9bae654a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3164,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 92,
"path": "/javascripts/components/users.js",
"repo_name": "sumiki/pygraphql",
"src_encoding": "UTF-8",
"text": "// @flow\n\nimport React from 'react';\nimport UpdateUserMutation from '../mutations/UpdateUserMutation'\nimport {\n createFragmentContainer,\n graphql,\n} from 'react-relay';\n\nclass Users extends React.Component {\n state = {\n users: this.props.users,\n edit_user_id: 0\n }\n \n componentDidMount(){\n }\n \n handleEdit = (e, user) => {\n this.setState({\n edit_user_id: user.id\n })\n }\n \n handleUpdate = (e, user) => {\n var new_users = []\n this.state.users.forEach((eachUser, i) => {\n if( user.id === eachUser.id ){\n let new_user = Object.assign( {}, user )\n new_user.name = e.target.value\n new_users.push( new_user )\n } else {\n new_users.push( eachUser )\n }\n })\n this.setState({\n users: new_users\n })\n }\n \n handleSubmit = (e, user) => {\n UpdateUserMutation.commit(\n this.props.relay.environment,\n user,\n () => { this.setState({edit_user_id: 0}) }\n )\n }\n \n render() {\n return <div>\n <table>\n <tbody>\n { ( () => {\n return this.state.users.map((user, i) => {\n let tmpUser = user\n return <tr key={i}>\n <td>{ user.email }</td>\n <td>\n { ( () => {\n if( this.state.edit_user_id !== user.id ){\n return user.name\n } else {\n return <input\n type=\"text\"\n value={ user.name }\n onChange={ (e) => { this.handleUpdate(e, tmpUser) } }\n />\n }\n } )() }\n </td>\n <td>\n { ( () => {\n if( this.state.edit_user_id !== user.id ){\n return <a href=\"#\" onClick={ (e) => { this.handleEdit( e, tmpUser ) } } >Edit</a>\n } else {\n return <a href=\"#\" onClick={ (e) => { this.handleSubmit( e, tmpUser ) } } >Update</a>\n }\n } )() }\n \n </td>\n </tr>\n })\n } )() }\n \n \n </tbody>\n </table>\n\n </div>\n }\n}\nexport default createFragmentContainer(Users, {})\n"
},
{
"alpha_fraction": 0.523809552192688,
"alphanum_fraction": 0.5767195820808411,
"avg_line_length": 9.44444465637207,
"blob_id": "64591a9735e5129c59347a0f6c3c4ee2d2d0686d",
"content_id": "6a586f805f9ffe57cd20f928e20c87f1a794b3c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 189,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 18,
"path": "/readme.md",
"repo_name": "sumiki/pygraphql",
"src_encoding": "UTF-8",
"text": "\n## Graphql + GAE webapp2\n\npip install graphene-gae -t ./libs\n\npip install jinja2 -t ./libs\n\n\n## Query\n\nhttp://localhost:8080/graphiql\n\n```\n{\n human(id: \"1000\") {\n name\n }\n}\n```\n"
},
{
"alpha_fraction": 0.6877771019935608,
"alphanum_fraction": 0.6979100704193115,
"avg_line_length": 29.941177368164062,
"blob_id": "6bd134dd78b3d851ba67c35024ae66f9869940be",
"content_id": "3d65dc2afd1127d255aa2ad597d16d094d85c1bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1579,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 51,
"path": "/main.py",
"repo_name": "sumiki/pygraphql",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport webapp2\nimport os\nimport sys\nimport json\nimport jinja2\njinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n self.response.headers['Content-Type'] = 'text/plain'\n print sys.path\n self.response.write(u'Hello, World!')\n\nclass GraphiqlHandler(webapp2.RequestHandler):\n def get(self):\n query = self.request.get('query')\n variables = self.request.get('variables')\n response = ''\n graphql_url = '/graphql'\n\n values = dict( graphql_url=graphql_url, response=response,\n variables=variables,query=query.encode('unicode_escape'))\n\n\n template = jinja_environment.get_template('templates/graphiql.html')\n\n self.response.out.write(template.render(values))\n\n\n\napp = webapp2.WSGIApplication([\n ('/', MainPage),\n ('/graphiql', GraphiqlHandler)\n], debug=True)\n\n"
},
{
"alpha_fraction": 0.4760536253452301,
"alphanum_fraction": 0.4760536253452301,
"avg_line_length": 18.679244995117188,
"blob_id": "0f9e01a3053fc9e034772f8997ca997917800471",
"content_id": "cd538bdb060d425ed32c6183101a8e48c95fe02a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 1044,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 53,
"path": "/javascripts/mutations/UpdateUserMutation.js",
"repo_name": "sumiki/pygraphql",
"src_encoding": "UTF-8",
"text": "\nimport {\n commitMutation,\n graphql,\n} from 'react-relay';\n\nconst mutation = graphql`\n mutation UpdateUserMutation($input: UpdateUserMutationInput!) {\n updateUserMutation(input: $input) {\n user {\n id\n name\n email\n }\n }\n }\n`;\n\nfunction getOptimisticResponse(user) {\n console.log('getOptimisticResponse')\n return {\n updateUserMutation: {\n user: user\n }\n };\n}\n\nfunction commit(\n environment,\n user,\n callback\n) {\n return commitMutation(\n environment,\n {\n mutation,\n variables: {\n input: {\n id: user.id,\n name: user.name,\n email: user.email\n },\n },\n onCompleted: (response, errors) => {\n if( typeof(callback) === 'function' ){\n callback()\n }\n },\n optimisticResponse: getOptimisticResponse(user),\n }\n );\n}\n\nexport default {commit};\n"
}
] | 4 |
Tahamosaad/keyglove | https://github.com/Tahamosaad/keyglove | bced92e852e010a82862b5604489654e443ceb5d | 6c4a5bbe88f72b139028f9047f9f157c52661540 | 970ef4f1696332d18eb964cdd5f235a8039638de | refs/heads/master | 2021-01-15T21:14:44.995704 | 2013-11-24T02:03:40 | 2013-11-24T02:03:40 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5452051162719727,
"alphanum_fraction": 0.5607381463050842,
"avg_line_length": 52.8023796081543,
"blob_id": "596fffc16a1503fddc3ffe8cc90c3b5e0a55c12e",
"content_id": "3434688b02a4bd32cb6eee3a4741d7b9d8378c3a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22597,
"license_type": "no_license",
"max_line_length": 227,
"num_lines": 420,
"path": "/hostapps/python/kglib.py",
"repo_name": "Tahamosaad/keyglove",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\"\"\" Keyglove protocol parser/generator library\n\nChangelog:\n 2013-11-20 - Bundled clean serial + HID transport into KGLib code\n - Added send_and_return() method for easier response handling\n 2013-11-16 - Initial release\n\n============================================\nKeyglove Protocol Python interface library\n2013-11-20 by Jeff Rowberg <[email protected]>\nUpdates should (hopefully) always be available at https://github.com/jrowberg/keyglove\n\n============================================\nKeyglove Protocol Python interface library code is placed under the MIT license\nCopyright (c) 2013 Jeff Rowberg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n===============================================\n\n\"\"\"\n\n__author__ = \"Jeff Rowberg\"\n__license__ = \"MIT\"\n__version__ = \"2013-11-20\"\n__email__ = \"[email protected]\"\n\nimport struct, time\n\n\n\n# thanks to Masaaki Shibata for Python event handler code\n# http://www.emptypage.jp/notes/pyevent.en.html\n\nclass KeygloveEvent(object):\n\n def __init__(self, doc=None):\n self.__doc__ = doc\n\n def __get__(self, obj, objtype=None):\n if obj is None:\n return self\n return KeygloveEventHandler(self, obj)\n\n def __set__(self, obj, value):\n pass\n\n\n\nclass KeygloveEventHandler(object):\n\n def __init__(self, event, obj):\n\n self.event = event\n self.obj = obj\n\n def _getfunctionlist(self):\n\n \"\"\"(internal use) \"\"\"\n\n try:\n eventhandler = self.obj.__eventhandler__\n except AttributeError:\n eventhandler = self.obj.__eventhandler__ = {}\n return eventhandler.setdefault(self.event, [])\n\n def add(self, func):\n\n \"\"\"Add new event handler function.\n\n Event handler function must be defined like func(sender, earg).\n You can add handler also by using '+=' operator.\n \"\"\"\n\n self._getfunctionlist().append(func)\n return self\n\n def remove(self, func):\n\n \"\"\"Remove existing event handler function.\n\n You can remove handler also by using '-=' operator.\n \"\"\"\n\n self._getfunctionlist().remove(func)\n return self\n\n def fire(self, earg=None):\n\n \"\"\"Fire event and call all handler functions\n\n You can call EventHandler object itself like e(earg) instead of\n e.fire(earg).\n \"\"\"\n\n for func in self._getfunctionlist():\n func(self.obj, earg)\n\n __iadd__ = add\n __isub__ = remove\n __call__ = fire\n\n\n\nclass KGLib(object):\n\n def kg_cmd_system_ping(self):\n return struct.pack('<4B', 0xC0, 0x00, 0x01, 0x01)\n def kg_cmd_system_reset(self):\n return struct.pack('<4B', 0xC0, 0x00, 0x01, 0x02)\n \n def kg_cmd_touch_get_mode(self):\n return struct.pack('<4B', 0xC0, 0x00, 0x02, 0x01)\n def kg_cmd_touch_set_mode(self, mode):\n return struct.pack('<4BB', 0xC0, 0x01, 0x02, 0x02, mode)\n \n def kg_cmd_feedback_get_blink_mode(self):\n return struct.pack('<4B', 0xC0, 0x00, 0x03, 0x01)\n def kg_cmd_feedback_set_blink_mode(self, mode):\n return struct.pack('<4BB', 0xC0, 0x01, 0x03, 0x02, mode)\n def kg_cmd_feedback_get_piezo_mode(self, index):\n return struct.pack('<4BB', 0xC0, 0x01, 0x03, 0x03, index)\n def kg_cmd_feedback_set_piezo_mode(self, index, mode, duration, frequency):\n return struct.pack('<4BBBBH', 0xC0, 0x05, 0x03, 0x04, index, mode, duration, frequency)\n def kg_cmd_feedback_get_vibe_mode(self, index):\n return struct.pack('<4BB', 0xC0, 0x01, 0x03, 0x05, index)\n def kg_cmd_feedback_set_vibe_mode(self, index, mode, duration):\n return struct.pack('<4BBBB', 0xC0, 0x03, 0x03, 0x06, index, mode, duration)\n def kg_cmd_feedback_get_rgb_mode(self, index):\n return struct.pack('<4BB', 0xC0, 0x01, 0x03, 0x07, index)\n def kg_cmd_feedback_set_rgb_mode(self, index, mode_red, mode_green, mode_blue):\n return struct.pack('<4BBBBB', 0xC0, 0x04, 0x03, 0x08, index, mode_red, mode_green, mode_blue)\n \n def kg_cmd_motion_get_mode(self, index):\n return struct.pack('<4BB', 0xC0, 0x01, 0x04, 0x01, index)\n def kg_cmd_motion_set_mode(self, index, mode):\n return struct.pack('<4BBB', 0xC0, 0x02, 0x04, 0x02, index, mode)\n \n kg_rsp_system_ping = KeygloveEvent()\n kg_rsp_system_reset = KeygloveEvent()\n \n kg_rsp_touch_get_mode = KeygloveEvent()\n kg_rsp_touch_set_mode = KeygloveEvent()\n \n kg_rsp_feedback_get_blink_mode = KeygloveEvent()\n kg_rsp_feedback_set_blink_mode = KeygloveEvent()\n kg_rsp_feedback_get_piezo_mode = KeygloveEvent()\n kg_rsp_feedback_set_piezo_mode = KeygloveEvent()\n kg_rsp_feedback_get_vibe_mode = KeygloveEvent()\n kg_rsp_feedback_set_vibe_mode = KeygloveEvent()\n kg_rsp_feedback_get_rgb_mode = KeygloveEvent()\n kg_rsp_feedback_set_rgb_mode = KeygloveEvent()\n \n kg_rsp_motion_get_mode = KeygloveEvent()\n kg_rsp_motion_set_mode = KeygloveEvent()\n \n kg_evt_protocol_error = KeygloveEvent()\n \n kg_evt_system_boot = KeygloveEvent()\n kg_evt_system_ready = KeygloveEvent()\n kg_evt_system_error = KeygloveEvent()\n \n kg_evt_touch_mode = KeygloveEvent()\n kg_evt_touch_status = KeygloveEvent()\n \n kg_evt_feedback_blink_mode = KeygloveEvent()\n kg_evt_feedback_piezo_mode = KeygloveEvent()\n kg_evt_feedback_vibe_mode = KeygloveEvent()\n kg_evt_feedback_rgb_mode = KeygloveEvent()\n \n kg_evt_motion_mode = KeygloveEvent()\n kg_evt_motion_data = KeygloveEvent()\n \n on_busy = KeygloveEvent()\n on_idle = KeygloveEvent()\n on_timeout = KeygloveEvent()\n on_before_tx_command = KeygloveEvent()\n on_tx_command_complete = KeygloveEvent()\n\n kgapi_rx_buffer = []\n kgapi_rx_expected_length = 0\n busy = False\n debug = False\n\n last_response = None\n last_event = None\n\n def send_command(self, out_obj, packet):\n if self.debug: print('=>[ ' + ' '.join(['%02X' % ord(b) for b in packet ]) + ' ]')\n self.on_before_tx_command()\n self.busy = True\n self.on_busy()\n if out_obj.__class__.__name__ == \"Serial\":\n # send packet using PySerial interface\n out_obj.write(packet)\n elif out_obj.__class__.__name__ == \"HidReport\":\n # send packet via raw HID report (multiple reports if necessary)\n report_size = len(out_obj.values()[0])\n report_i = 1\n out_obj.values()[0][0] = min(len(packet), report_size - 1)\n for i in xrange(len(packet)):\n if report_i >= report_size:\n # send report and start over with new packet\n out_obj.send()\n report_i = 1\n out_obj.values()[0][0] = min(len(packet) - i, report_size - 1)\n out_obj.values()[0][report_i] = ord(packet[i])\n report_i = report_i + 1\n\n # finish sending the last (or only) output report\n out_obj.send()\n else:\n print('UNSUPPORTED OUTPUT OBJECT: %s' % out_obj.__class__.__name__)\n self.on_tx_command_complete()\n\n def send_and_return(self, rxtx_obj, packet, timeout=0):\n self.send_command(rxtx_obj, packet)\n self.check_activity(rxtx_obj, timeout)\n return self.get_last_response()\n\n def get_last_response(self):\n return self.last_response\n\n def get_last_event(self):\n return self.last_event\n\n def check_activity(self, in_obj, timeout=0):\n if in_obj.__class__.__name__ == \"Serial\":\n # read input data using PySerial interface\n if timeout > 0:\n in_obj.timeout = timeout\n while 1:\n x = in_obj.read()\n if len(x) > 0:\n self.parse(ord(x))\n else: # timeout\n self.busy = False\n self.on_idle()\n self.on_timeout()\n if not self.busy: # finished\n break\n else:\n while in_obj.inWaiting(): self.parse(ord(in_obj.read()))\n elif in_obj.__class__.__name__ == \"HidReport\":\n # read input using HID interface (already handled via threading so we only need to check for timeouts)\n if timeout > 0:\n t0 = time.time()\n while time.time() - t0 < timeout:\n if not self.busy: # finished\n break;\n if self.busy: # timeout\n self.busy = False\n self.on_idle()\n self.on_timeout()\n else:\n print('UNSUPPORTED INPUT OBJECT: %s' % in_obj.__class__.__name__)\n return self.busy\n\n def parse(self, b):\n if len(self.kgapi_rx_buffer) == 0 and (b == 0xC0 or b == 0x80):\n self.kgapi_rx_buffer.append(b)\n elif len(self.kgapi_rx_buffer) == 1:\n self.kgapi_rx_buffer.append(b)\n self.kgapi_rx_expected_length = 4 + (self.kgapi_rx_buffer[0] & 0x07) + self.kgapi_rx_buffer[1]\n elif len(self.kgapi_rx_buffer) > 1:\n self.kgapi_rx_buffer.append(b)\n\n \"\"\"\n Keyglove packet structure (as of 2013-11-16):\n Byte 0: 8 bits, Packet Type 0xC0 = command/response, 0x80 = event\n Byte 1: 8 bits, Length Payload length\n Byte 2: 8 bits, Class ID (CID) Packet class\n Byte 3: 8 bits, Command ID (CMD) Packet ID\n Bytes 4-n: 0 - 250 Bytes, Payload (PL) Up to 250 bytes of payload\n \"\"\"\n\n #print('%02X: %d, %d' % (b, len(self.kgapi_rx_buffer), self.kgapi_rx_expected_length))\n if self.kgapi_rx_expected_length > 0 and len(self.kgapi_rx_buffer) == self.kgapi_rx_expected_length:\n if self.debug: print('<=[ ' + ' '.join(['%02X' % b for b in self.kgapi_rx_buffer ]) + ' ]')\n packet_type, payload_length, packet_class, packet_command = self.kgapi_rx_buffer[:4]\n self.kgapi_rx_payload = b''.join(chr(i) for i in self.kgapi_rx_buffer[4:])\n self.kgapi_rx_buffer = []\n if packet_type & 0xC0 == 0xC0:\n # 0xC0 = response packet after a command that has just been sent\n if packet_class == 1: # SYSTEM\n if packet_command == 1: # kg_rsp_system_ping\n runtime, = struct.unpack('<L', self.kgapi_rx_payload[:4])\n self.last_response = { 'length': payload_length, 'class_id': packet_class, 'command_id': packet_command, 'payload': { 'runtime': runtime } }\n self.kg_rsp_system_ping(self.last_response['payload'])\n elif packet_command == 2: # kg_rsp_system_reset\n result, = struct.unpack('<H', self.kgapi_rx_payload[:2])\n self.last_response = { 'length': payload_length, 'class_id': packet_class, 'command_id': packet_command, 'payload': { 'result': result } }\n self.kg_rsp_system_reset(self.last_response['payload'])\n elif packet_class == 2: # TOUCH\n if packet_command == 1: # kg_rsp_touch_get_mode\n mode, = struct.unpack('<B', self.kgapi_rx_payload[:1])\n self.last_response = { 'length': payload_length, 'class_id': packet_class, 'command_id': packet_command, 'payload': { 'mode': mode } }\n self.kg_rsp_touch_get_mode(self.last_response['payload'])\n elif packet_command == 2: # kg_rsp_touch_set_mode\n result, = struct.unpack('<H', self.kgapi_rx_payload[:2])\n self.last_response = { 'length': payload_length, 'class_id': packet_class, 'command_id': packet_command, 'payload': { 'result': result } }\n self.kg_rsp_touch_set_mode(self.last_response['payload'])\n elif packet_class == 3: # FEEDBACK\n if packet_command == 1: # kg_rsp_feedback_get_blink_mode\n mode, = struct.unpack('<B', self.kgapi_rx_payload[:1])\n self.last_response = { 'length': payload_length, 'class_id': packet_class, 'command_id': packet_command, 'payload': { 'mode': mode } }\n self.kg_rsp_feedback_get_blink_mode(self.last_response['payload'])\n elif packet_command == 2: # kg_rsp_feedback_set_blink_mode\n result, = struct.unpack('<H', self.kgapi_rx_payload[:2])\n self.last_response = { 'length': payload_length, 'class_id': packet_class, 'command_id': packet_command, 'payload': { 'result': result } }\n self.kg_rsp_feedback_set_blink_mode(self.last_response['payload'])\n elif packet_command == 3: # kg_rsp_feedback_get_piezo_mode\n mode, duration, frequency, = struct.unpack('<BBH', self.kgapi_rx_payload[:4])\n self.last_response = { 'length': payload_length, 'class_id': packet_class, 'command_id': packet_command, 'payload': { 'mode': mode, 'duration': duration, 'frequency': frequency } }\n self.kg_rsp_feedback_get_piezo_mode(self.last_response['payload'])\n elif packet_command == 4: # kg_rsp_feedback_set_piezo_mode\n result, = struct.unpack('<H', self.kgapi_rx_payload[:2])\n self.last_response = { 'length': payload_length, 'class_id': packet_class, 'command_id': packet_command, 'payload': { 'result': result } }\n self.kg_rsp_feedback_set_piezo_mode(self.last_response['payload'])\n elif packet_command == 5: # kg_rsp_feedback_get_vibe_mode\n mode, duration, = struct.unpack('<BB', self.kgapi_rx_payload[:2])\n self.last_response = { 'length': payload_length, 'class_id': packet_class, 'command_id': packet_command, 'payload': { 'mode': mode, 'duration': duration } }\n self.kg_rsp_feedback_get_vibe_mode(self.last_response['payload'])\n elif packet_command == 6: # kg_rsp_feedback_set_vibe_mode\n result, = struct.unpack('<H', self.kgapi_rx_payload[:2])\n self.last_response = { 'length': payload_length, 'class_id': packet_class, 'command_id': packet_command, 'payload': { 'result': result } }\n self.kg_rsp_feedback_set_vibe_mode(self.last_response['payload'])\n elif packet_command == 7: # kg_rsp_feedback_get_rgb_mode\n mode_red, mode_green, mode_blue, = struct.unpack('<BBB', self.kgapi_rx_payload[:3])\n self.last_response = { 'length': payload_length, 'class_id': packet_class, 'command_id': packet_command, 'payload': { 'mode_red': mode_red, 'mode_green': mode_green, 'mode_blue': mode_blue } }\n self.kg_rsp_feedback_get_rgb_mode(self.last_response['payload'])\n elif packet_command == 8: # kg_rsp_feedback_set_rgb_mode\n result, = struct.unpack('<H', self.kgapi_rx_payload[:2])\n self.last_response = { 'length': payload_length, 'class_id': packet_class, 'command_id': packet_command, 'payload': { 'result': result } }\n self.kg_rsp_feedback_set_rgb_mode(self.last_response['payload'])\n elif packet_class == 4: # MOTION\n if packet_command == 1: # kg_rsp_motion_get_mode\n mode, = struct.unpack('<B', self.kgapi_rx_payload[:1])\n self.last_response = { 'length': payload_length, 'class_id': packet_class, 'command_id': packet_command, 'payload': { 'mode': mode } }\n self.kg_rsp_motion_get_mode(self.last_response['payload'])\n elif packet_command == 2: # kg_rsp_motion_set_mode\n result, = struct.unpack('<H', self.kgapi_rx_payload[:2])\n self.last_response = { 'length': payload_length, 'class_id': packet_class, 'command_id': packet_command, 'payload': { 'result': result } }\n self.kg_rsp_motion_set_mode(self.last_response['payload'])\n self.busy = False\n self.on_idle()\n elif packet_type & 0xC0 == 0x80:\n # 0x80 = event packet\n if packet_class == 0: # PROTOCOL\n if packet_command == 1: # kg_evt_protocol_error\n code, = struct.unpack('<H', self.kgapi_rx_payload[:2])\n self.last_event = { 'length': payload_length, 'class_id': packet_class, 'event_id': packet_command, 'payload': { 'code': code } }\n self.kg_evt_protocol_error(self.last_event['payload'])\n elif packet_class == 1: # SYSTEM\n if packet_command == 1: # kg_evt_system_boot\n self.last_event = { 'length': payload_length, 'class_id': packet_class, 'event_id': packet_command, 'payload': { } }\n self.kg_evt_system_boot(self.last_event['payload'])\n elif packet_command == 2: # kg_evt_system_ready\n self.last_event = { 'length': payload_length, 'class_id': packet_class, 'event_id': packet_command, 'payload': { } }\n self.kg_evt_system_ready(self.last_event['payload'])\n elif packet_command == 3: # kg_evt_system_error\n code, = struct.unpack('<H', self.kgapi_rx_payload[:2])\n self.last_event = { 'length': payload_length, 'class_id': packet_class, 'event_id': packet_command, 'payload': { 'code': code } }\n self.kg_evt_system_error(self.last_event['payload'])\n elif packet_class == 2: # TOUCH\n if packet_command == 1: # kg_evt_touch_mode\n mode, = struct.unpack('<B', self.kgapi_rx_payload[:1])\n self.last_event = { 'length': payload_length, 'class_id': packet_class, 'event_id': packet_command, 'payload': { 'mode': mode } }\n self.kg_evt_touch_mode(self.last_event['payload'])\n elif packet_command == 2: # kg_evt_touch_status\n status_len, = struct.unpack('<B', self.kgapi_rx_payload[:1])\n status_data = [ord(b) for b in self.kgapi_rx_payload[1:]]\n self.last_event = { 'length': payload_length, 'class_id': packet_class, 'event_id': packet_command, 'payload': { 'status': status_data } }\n self.kg_evt_touch_status(self.last_event['payload'])\n elif packet_class == 3: # FEEDBACK\n if packet_command == 1: # kg_evt_feedback_blink_mode\n mode, = struct.unpack('<B', self.kgapi_rx_payload[:1])\n self.last_event = { 'length': payload_length, 'class_id': packet_class, 'event_id': packet_command, 'payload': { 'mode': mode } }\n self.kg_evt_feedback_blink_mode(self.last_event['payload'])\n elif packet_command == 2: # kg_evt_feedback_piezo_mode\n index, mode, duration, frequency, = struct.unpack('<BBBH', self.kgapi_rx_payload[:5])\n self.last_event = { 'length': payload_length, 'class_id': packet_class, 'event_id': packet_command, 'payload': { 'index': index, 'mode': mode, 'duration': duration, 'frequency': frequency } }\n self.kg_evt_feedback_piezo_mode(self.last_event['payload'])\n elif packet_command == 3: # kg_evt_feedback_vibe_mode\n index, mode, duration, = struct.unpack('<BBB', self.kgapi_rx_payload[:3])\n self.last_event = { 'length': payload_length, 'class_id': packet_class, 'event_id': packet_command, 'payload': { 'index': index, 'mode': mode, 'duration': duration } }\n self.kg_evt_feedback_vibe_mode(self.last_event['payload'])\n elif packet_command == 4: # kg_evt_feedback_rgb_mode\n index, mode_red, mode_green, mode_blue, = struct.unpack('<BBBB', self.kgapi_rx_payload[:4])\n self.last_event = { 'length': payload_length, 'class_id': packet_class, 'event_id': packet_command, 'payload': { 'index': index, 'mode_red': mode_red, 'mode_green': mode_green, 'mode_blue': mode_blue } }\n self.kg_evt_feedback_rgb_mode(self.last_event['payload'])\n elif packet_class == 4: # MOTION\n if packet_command == 1: # kg_evt_motion_mode\n index, mode, = struct.unpack('<BB', self.kgapi_rx_payload[:2])\n self.last_event = { 'length': payload_length, 'class_id': packet_class, 'event_id': packet_command, 'payload': { 'index': index, 'mode': mode } }\n self.kg_evt_motion_mode(self.last_event['payload'])\n elif packet_command == 2: # kg_evt_motion_data\n index, flags, data_len, = struct.unpack('<BBB', self.kgapi_rx_payload[:3])\n data_data = [ord(b) for b in self.kgapi_rx_payload[3:]]\n self.last_event = { 'length': payload_length, 'class_id': packet_class, 'event_id': packet_command, 'payload': { 'index': index, 'flags': flags, 'data': data_data } }\n self.kg_evt_motion_data(self.last_event['payload'])\n\n# ============================ EOF ===============================\n"
},
{
"alpha_fraction": 0.5570393204689026,
"alphanum_fraction": 0.6272414922714233,
"avg_line_length": 43.050418853759766,
"blob_id": "a1fcbb8c6eac7ad8436fe5c252d4697cc47625be",
"content_id": "ddd8bbc8f8d7c1456ada7982901b3f7ce9b3beaa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5242,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 119,
"path": "/keyglove/support_feedback_blink.h",
"repo_name": "Tahamosaad/keyglove",
"src_encoding": "UTF-8",
"text": "// Keyglove controller source code - Special hardware setup file\n// 7/17/2011 by Jeff Rowberg <[email protected]>\n\n/* ============================================\nController code is placed under the MIT license\nCopyright (c) 2011 Jeff Rowberg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n===============================================\n*/\n\n\n\n#ifndef _SUPPORT_FEEDBACK_BLINK_H_\n#define _SUPPORT_FEEDBACK_BLINK_H_\n\n#define KG_BLINK_OFF 0\n#define KG_BLINK_SOLID 1\n#define KG_BLINK_200_100 2\n#define KG_BLINK_200_50 3\n#define KG_BLINK_1000_500 4\n#define KG_BLINK_1000_100 5\n#define KG_BLINK_1000_100_2X 6\n#define KG_BLINK_1000_100_3X 7\n#define KG_BLINK_3000_1000 8\n#define KG_BLINK_3000_100 9\n#define KG_BLINK_3000_100_2X 10\n#define KG_BLINK_3000_100_3X 11\n\nuint8_t blinkMode; // blink LED mode\nuint8_t blinkTick;\nuint8_t blinkLoop;\nuint8_t blinkMod;\n\nvoid set_blink_logic(uint8_t logic) {\n // simple wrapper for consistency\n digitalWrite(KG_PIN_BLINK, logic);\n}\n\nvoid set_blink_mode(uint8_t mode) {\n blinkMode = mode;\n blinkTick = 0;\n blinkLoop = 0;\n // logic: mode=0 -> off, mode=5 -> on, else no immediate change\n if (blinkMode == KG_BLINK_OFF) set_blink_logic(0);\n else if (blinkMode == KG_BLINK_SOLID) set_blink_logic(1);\n else if (blinkMode == KG_BLINK_200_100) blinkLoop = 4;\n else if (blinkMode == KG_BLINK_200_50) blinkLoop = 4;\n else if (blinkMode == KG_BLINK_1000_500) blinkLoop = 20;\n else if (blinkMode == KG_BLINK_1000_100) blinkLoop = 20;\n else if (blinkMode == KG_BLINK_1000_100_2X) blinkLoop = 20;\n else if (blinkMode == KG_BLINK_1000_100_3X) blinkLoop = 20;\n else if (blinkMode == KG_BLINK_3000_1000) blinkLoop = 60;\n else if (blinkMode == KG_BLINK_3000_100) blinkLoop = 60;\n else if (blinkMode == KG_BLINK_3000_100_2X) blinkLoop = 60;\n else if (blinkMode == KG_BLINK_3000_100_3X) blinkLoop = 60;\n\n // send kg_evt_feedback_blink_mode packet (if we aren't just intentionally setting it already)\n if (!inBinPacket) {\n uint8_t payload[1] = { blinkMode };\n send_keyglove_packet(KG_PACKET_TYPE_EVENT, 1, KG_PACKET_CLASS_FEEDBACK, KG_PACKET_ID_EVT_FEEDBACK_BLINK_MODE, payload);\n }\n}\n\nvoid setup_feedback_blink() {\n pinMode(KG_PIN_BLINK, OUTPUT);\n digitalWrite(KG_PIN_BLINK, LOW);\n set_blink_mode(KG_BLINK_3000_100);\n blinkTick = 0;\n}\n\nvoid update_feedback_blink() {\n // each \"keygloveTick\" is 10ms, loops at 100 (1 second)\n // each \"blinkTick\" is 50ms, loops at cycle period (max 12.5 seconds = 250, near 255)\n if (blinkLoop && (keygloveTick % 5) == 0) {\n blinkMod = blinkTick % blinkLoop;\n if (blinkMode == KG_BLINK_200_100 && blinkTick % 2 == 0) {\n set_blink_logic(blinkMod >= 2 ? 0 : 1);\n } else if (blinkMode == KG_BLINK_200_50) {\n set_blink_logic(blinkMod >= 1 ? 0 : 1);\n } else if (blinkMode == KG_BLINK_1000_500 && blinkTick % 10 == 0) {\n set_blink_logic(blinkMod >= 10 ? 0 : 1);\n } else if (blinkMode == KG_BLINK_1000_100 && blinkTick % 2 == 0) {\n set_blink_logic(blinkMod >= 2 ? 0 : 1);\n } else if (blinkMode == KG_BLINK_1000_100_2X && blinkTick % 2 == 0) {\n set_blink_logic(((blinkMod >= 2 && blinkMod < 4) || (blinkMod >= 6)) ? 0 : 1);\n } else if (blinkMode == KG_BLINK_1000_100_3X && blinkTick % 2 == 0) {\n set_blink_logic(((blinkMod >= 2 && blinkMod < 4) || (blinkMod >= 6 && blinkMod < 8) || (blinkMod >= 10)) ? 0 : 1);\n } else if (blinkMode == KG_BLINK_3000_1000 && blinkTick % 20 == 0) {\n set_blink_logic(blinkMod >= 20 ? 0 : 1);\n } else if (blinkMode == KG_BLINK_3000_100 && blinkTick % 2 == 0) {\n set_blink_logic(blinkMod >= 2 ? 0 : 1);\n } else if (blinkMode == KG_BLINK_3000_100_2X && blinkTick % 2 == 0) {\n set_blink_logic(((blinkMod >= 2 && blinkMod < 4) || (blinkMod >= 6)) ? 0 : 1);\n } else if (blinkMode == KG_BLINK_3000_100_3X && blinkTick % 2 == 0) {\n set_blink_logic(((blinkMod >= 2 && blinkMod < 4) || (blinkMod >= 6 && blinkMod < 8) || (blinkMod >= 10)) ? 0 : 1);\n }\n blinkTick++;\n if (blinkTick == blinkLoop) blinkTick = 0;\n }\n}\n\n#endif // _SUPPORT_FEEDBACK_BLINK_H_\n"
},
{
"alpha_fraction": 0.5699969530105591,
"alphanum_fraction": 0.5882174372673035,
"avg_line_length": 40.17499923706055,
"blob_id": "3afedc0c27ff7e207a8623844715d801d0ee7576",
"content_id": "906c381b018eda198edd0b7e2b563958d9853786",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3293,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 80,
"path": "/keyglove/support_protocol_feedback.h",
"repo_name": "Tahamosaad/keyglove",
"src_encoding": "UTF-8",
"text": "// Keyglove controller source code - Special hardware setup file\n// 9/9/2013 by Jeff Rowberg <[email protected]>\n\n/* ============================================\nController code is placed under the MIT license\nCopyright (c) 2013 Jeff Rowberg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n===============================================\n*/\n\n\n\n#ifndef _SUPPORT_PROTOCOL_FEEDBACK_H_\n#define _SUPPORT_PROTOCOL_FEEDBACK_H_\n\nuint8_t process_protocol_command_feedback(uint8_t *rxPacket) {\n // check for valid command IDs\n uint8_t protocol_error = 0;\n switch (rxPacket[3]) {\n #if KG_FEEDBACK & KG_FEEDBACK_BLINK\n case KG_PACKET_ID_CMD_FEEDBACK_GET_BLINK_MODE:\n // parameters = 0 bytes\n if (rxPacket[1] != 0) {\n // incorrect parameter length\n protocol_error = KG_DEF_PROTOCOL_ERROR_PARAMETER_LENGTH;\n } else {\n // build response (uint16_t result, uint8_t mode)\n uint8_t payload[3] = { 0x00, 0x00, blinkMode };\n \n // send response\n send_keyglove_packet(KG_PACKET_TYPE_COMMAND, sizeof(payload), rxPacket[2], rxPacket[3], payload);\n }\n break;\n\n case KG_PACKET_ID_CMD_FEEDBACK_SET_BLINK_MODE:\n // parameters = 1 byte (uint8_t mode)\n if (rxPacket[1] != 1) {\n // incorrect parameter length\n protocol_error = KG_DEF_PROTOCOL_ERROR_PARAMETER_LENGTH;\n } else {\n // build response (uint16_t result)\n uint8_t payload[2] = { 0x00, 0x00 };\n\n // validate mode\n if (rxPacket[4] > 11) {\n payload[0] = 0x01; // 0x0001 result code = parameter(s) out of range\n } else {\n set_blink_mode(rxPacket[4]);\n }\n\n // send response\n send_keyglove_packet(KG_PACKET_TYPE_COMMAND, sizeof(payload), rxPacket[2], rxPacket[3], payload);\n }\n break;\n #endif\n\n default:\n protocol_error = KG_DEF_PROTOCOL_ERROR_INVALID_COMMAND;\n }\n return protocol_error;\n}\n\n#endif // _SUPPORT_PROTOCOL_FEEDBACK_H_"
},
{
"alpha_fraction": 0.5964794754981995,
"alphanum_fraction": 0.6165329813957214,
"avg_line_length": 40.953269958496094,
"blob_id": "1cceea56be03ab76782a2991b3e26683d9547fdb",
"content_id": "6f88ff60f535b2b0db5e5652f489faab227144ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4488,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 107,
"path": "/keyglove/custom_protocol.h",
"repo_name": "Tahamosaad/keyglove",
"src_encoding": "UTF-8",
"text": "// Keyglove controller source code - Special hardware setup file\n// 9/9/2013 by Jeff Rowberg <[email protected]>\n\n/* ============================================\nController code is placed under the MIT license\nCopyright (c) 2013 Jeff Rowberg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n===============================================\n*/\n\n\n\n#ifndef _CUSTOM_PROTOCOL_H_\n#define _CUSTOM_PROTOCOL_H_\n\nuint8_t filter_incoming_keyglove_packet(uint8_t *rxPacket) {\n // GUIDELINES:\n // - We know that the packet is well-formed (correct length/payload match), but that's all.\n // - Packet byte array argument is passed by reference, so you can modify it. BE CAREFUL IF YOU DO THIS.\n // - Return 0 to let the packet go through (normal processing).\n // - Return non-zero to prevent the packet from being processed.\n\n // EXAMPLE: block \"system_ping\" command\n //if (rxPacket[0] == 0xC0 && rxPacket[2] == 0x01 && rxPacket[3] == 0x01) return 1;\n\n // allow packet to go through if we reach this point\n return 0;\n}\n\nuint8_t filter_outgoing_keyglove_packet(uint8_t *packetType, uint8_t *payloadLength, uint8_t *packetClass, uint8_t *packetId, uint8_t *payload) {\n // GUIDELINES:\n // - We know that the packet is well-formed (correct length/payload match), but that's all.\n // - All arguments are passed by reference, so you can modify them. BE CAREFUL IF YOU DO THIS.\n // - Return 0 to let the packet go through.\n // - Return non-zero to prevent the packet from being sent.\n \n // EXAMPLE: block \"system_ready\" event\n //if (rxPacket[0] == 0x80 && rxPacket[2] == 0x01 && rxPacket[3] == 0x02) return 1;\n\n // allow packet to go through if we reach this point\n return 0;\n}\n\nuint8_t process_protocol_command_custom(uint8_t *rxPacket) {\n uint8_t protocol_error = 0;\n\n // check for valid custom command class/ID values\n switch (rxPacket[2]) {\n /*// EXAMPLE CLASS/COMMAND ENTRY FOR CUSTOM PROTOCOL\n case 0x80:\n // custom 0x80 command class\n switch (rxPacket[3]) {\n case 0x01:\n // custom 0x80:0x01 class/ID\n // TODO: implement custom functionality - validate data length, validate parameter(s), send_keyglove_packet()\n\n // parameters = 0 bytes\n if (rxPacket[1] != 0) {\n // incorrect parameter length\n protocol_error = KG_DEF_PROTOCOL_ERROR_PARAMETER_LENGTH;\n } else {\n // build response (uint32_t runtime)\n uint8_t payload[4];\n payload[0] = keygloveTock & 0xFF;\n payload[1] = (keygloveTock >> 8) & 0xFF;\n payload[2] = (keygloveTock >> 16) & 0xFF;\n payload[3] = (keygloveTock >> 24) & 0xFF;\n\n // send response\n send_keyglove_packet(KG_PACKET_TYPE_COMMAND, sizeof(payload), rxPacket[2], rxPacket[3], payload);\n }\n break;\n\n default:\n // valid class, but invalid ID (not defined in custom protocol)\n protocol_error = KG_DEF_PROTOCOL_ERROR_INVALID_COMMAND;\n\n }\n break;\n //*/\n\n default:\n // invalid class (not defined in custom protocol)\n protocol_error = KG_DEF_PROTOCOL_ERROR_INVALID_COMMAND;\n }\n\n return protocol_error;\n}\n\n#endif // _CUSTOM_PROTOCOL_H_"
},
{
"alpha_fraction": 0.6123448014259338,
"alphanum_fraction": 0.6352509260177612,
"avg_line_length": 47.449153900146484,
"blob_id": "a2cdc3dd66ff5e44533fb2ee3723f0302a92523e",
"content_id": "47237ad71b2169c5c782305990fa1328b97bd93c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5719,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 118,
"path": "/keyglove/hardware.h",
"repo_name": "Tahamosaad/keyglove",
"src_encoding": "UTF-8",
"text": "// Keyglove controller source code - Overall architecture/hardware option definitions\n// 9/7/2013 by Jeff Rowberg <[email protected]>\n\n/* ============================================\nController code is placed under the MIT license\nCopyright (c) 2013 Jeff Rowberg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n===============================================\n*/\n\n#ifndef _HARDWARE_H_\n#define _HARDWARE_H_\n\n/** Main controller board options. Only one board may be selected at compile time. (defined in KG_BOARD) */\n\n#define KG_BOARD_USER 0 // You better know what you're doing\n#define KG_BOARD_TEENSYPP2 100 // AT90USB1286, 46 I/O\n#define KG_BOARD_ARDUINO_DUE 200 // SAM3X8E, bzillion I/O and Arduino design (prototype)\n#define KG_BOARD_KEYGLOVE100 300 // NOT SUPPORTED YET: SAM3X8E, bzillion I/O and custom design\n\n\n\n/** Flex sensor options. Only one choice may be selected at the same time. (defined in KG_FLEX) */\n\n#define KG_FLEX_NONE 0 // No flex sensors used\n#define KG_FLEX_FINGERS 1 // NOT SUPPORTED YET: Full 5-finger complement\n\n\n\n/** Pressure sensor options. Only one choice may be selected at the same time. (defined in KG_PRESSURE) */\n\n#define KG_PRESSURE_NONE 0 // No pressure sensors used\n#define KG_PRESSURE_TIPS 1 // NOT SUPPORTED YET: Full 5-fingertip complement\n\n\n\n/** Hand selection options. Only one choice may be selected at the same time. (defined in KG_HAND) */\n\n#define KG_HAND_RIGHT 1 /* Right-hand sensor connection orientation (default) */\n#define KG_HAND_LEFT 2 /* Left-hand sensor connection orientation (relevant for kit, others automatic) */\n\n\n\n/** Dual glove communication options. Only one choice may be selected at the same time. (defined in KG_DUALGLOVE) */\n\n#define KG_DUALGLOVE_NONE 0 /* No dual-glove support */\n#define KG_DUALGLOVE_RFM22B 1 /* NOT SUPPORTED YET: Wireless connection with RFM22B */\n\n\n/** Host interface options. Multiple interfaces may be enabled at the same time. (defined in KG_HOSTIF) */\n\n#define KG_HOSTIF_NONE 0 /* Don't communicate (weird, maybe you have a reason) */\n#define KG_HOSTIF_USB_SERIAL 1 /* Hardware USB serial (requires ARM, AT90USB* or ATMega32U* MCU) */\n#define KG_HOSTIF_USB_RAWHID 2 /* Hardware USB raw HID (requires ARM, AT90USB* or ATMega32U* MCU) */\n#define KG_HOSTIF_USB_HID 4 /* Hardware USB HID (requires ARM, AT90USB* or ATMega32U* MCU) */\n#define KG_HOSTIF_BT2_SERIAL 8 /* Bluetooth v2 serial (requires Bluegiga WT12 w/iWRAP) */\n#define KG_HOSTIF_BT2_RAWHID 16 /* Bluetooth v2 raw HID (requires Bluegiga WT12 w/iWRAP v5) */\n#define KG_HOSTIF_BT2_HID 32 /* Bluetooth v2 HID (requires Bluegiga WT12 w/iWRAP) */\n\n\n\n/** Motion sensor options. Multiple sensors may be enabled, provided they do not compete. (defined in KG_MOTION) */\n\n#define KG_MOTION_MPU6050_HAND 1 /* 6-axis I2C digital accel/gyro on back of hand */\n#define KG_MOTION_MPU6050_INDEXTIP 2 /* 6-axis I2C digital accel/gyro on index fingertip */\n\n\n\n/** Sensory feedback. Multiple options may be enabled. (defined in KG_FEEDBACK) */\n\n#define KG_FEEDBACK_BLINK 1 /* Single LED (e.g. on Arduino/Teensy boards) */\n#define KG_FEEDBACK_PIEZO 2 /* Piezo buzzer for sound */\n#define KG_FEEDBACK_VIBRATE 4 /* Vibration motor for haptic feedback */\n#define KG_FEEDBACK_RGB 8 /* Combined RGB LED for visual feedback */\n\n\n\n/** Sensory feedback connection. Only one choice may be selected at a time. (defined in KG_FEEDBACKCONN) */\n\n#define KG_FEEDBACKCONN_NONE 0 /* Don't do anything with feedback */\n#define KG_FEEDBACKCONN_DIRECT 1 /* Direct connection to I/O pins */\n#define KG_FEEDBACKCONN_I2C 2 /* Custom I2C-based RGB/vibe/piezo module */\n\n\n\n/** Debug settings. Multiple options may be enabled. (defined in KG_DEBUG) */\n\n#define KG_DEBUG_NONE 0\n#define KG_DEBUG_BENCHMARK 1\n#define KG_DEBUG_FEEDBACK 2\n#define KG_DEBUG_TOUCH 4\n#define KG_DEBUG_TOUCHSET 8\n#define KG_DEBUG_MOTION 16\n#define KG_DEBUG_HOSTIF_USB 1024\n#define KG_DEBUG_HOSTIF_BT2 2048\n#define KG_DEBUG_HID_KEYBOARD 4096\n#define KG_DEBUG_HID_MOUSE 8192\n\n\n\n#endif // _HARDWARE_H_\n\n\n"
},
{
"alpha_fraction": 0.5835972428321838,
"alphanum_fraction": 0.600380003452301,
"avg_line_length": 38.48749923706055,
"blob_id": "f3a4e146f9f005c666c3678c36fce2bc42571f35",
"content_id": "70e7641f1167969051a49372cc9de250ac1b80d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3158,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 80,
"path": "/keyglove/support_protocol_system.h",
"repo_name": "Tahamosaad/keyglove",
"src_encoding": "UTF-8",
"text": "// Keyglove controller source code - Special hardware setup file\n// 9/9/2013 by Jeff Rowberg <[email protected]>\n\n/* ============================================\nController code is placed under the MIT license\nCopyright (c) 2013 Jeff Rowberg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n===============================================\n*/\n\n\n\n#ifndef _SUPPORT_PROTOCOL_SYSTEM_H_\n#define _SUPPORT_PROTOCOL_SYSTEM_H_\n\nuint8_t process_protocol_command_system(uint8_t *rxPacket) {\n // check for valid command IDs\n uint8_t protocol_error = 0;\n switch (rxPacket[3]) {\n case KG_PACKET_ID_CMD_SYSTEM_PING:\n // parameters = 0 bytes\n if (rxPacket[1] != 0) {\n // incorrect parameter length\n protocol_error = KG_DEF_PROTOCOL_ERROR_PARAMETER_LENGTH;\n } else {\n // build response (uint32_t runtime)\n uint8_t payload[4];\n payload[0] = keygloveTock & 0xFF;\n payload[1] = (keygloveTock >> 8) & 0xFF;\n payload[2] = (keygloveTock >> 16) & 0xFF;\n payload[3] = (keygloveTock >> 24) & 0xFF;\n \n // send response\n send_keyglove_packet(KG_PACKET_TYPE_COMMAND, sizeof(payload), rxPacket[2], rxPacket[3], payload);\n }\n break;\n\n case KG_PACKET_ID_CMD_SYSTEM_RESET:\n // parameters = 0 bytes\n if (rxPacket[1] != 0) {\n // incorrect parameter length\n protocol_error = KG_DEF_PROTOCOL_ERROR_PARAMETER_LENGTH;\n } else {\n // build response (uint16_t result)\n uint8_t payload[2] = { 0x00, 0x00 };\n \n // send response\n send_keyglove_packet(KG_PACKET_TYPE_COMMAND, sizeof(payload), rxPacket[2], rxPacket[3], payload);\n \n // reboot Keyglove\n inBinPacket = false;\n rxPacketLength = 0;\n setup();\n }\n break;\n\n default:\n protocol_error = KG_DEF_PROTOCOL_ERROR_INVALID_COMMAND;\n }\n return protocol_error;\n}\n\n#endif // _SUPPORT_PROTOCOL_SYSTEM_H_"
},
{
"alpha_fraction": 0.5722339153289795,
"alphanum_fraction": 0.5933458805084229,
"avg_line_length": 44.23749923706055,
"blob_id": "0868649c795c5df559af82dab2f756ca6cfc48e7",
"content_id": "35b0ac475e1a698ec397cabf1c2777ccedaf483c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 18094,
"license_type": "no_license",
"max_line_length": 189,
"num_lines": 400,
"path": "/keyglove/support_protocol.h",
"repo_name": "Tahamosaad/keyglove",
"src_encoding": "UTF-8",
"text": "// Keyglove controller source code - Special hardware setup file\n// 9/9/2013 by Jeff Rowberg <[email protected]>\n\n/* ============================================\nController code is placed under the MIT license\nCopyright (c) 2013 Jeff Rowberg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n===============================================\n*/\n\n\n\n#ifndef _SUPPORT_PROTOCOL_H_\n#define _SUPPORT_PROTOCOL_H_\n\n#define KG_PROTOCOL_RX_TIMEOUT 500\n\n#define KG_PACKET_TYPE_EVENT 0x80\n#define KG_PACKET_TYPE_COMMAND 0xC0\n\n#define KG_PACKET_CLASS_PROTOCOL 0x00\n#define KG_PACKET_CLASS_SYSTEM 0x01\n#define KG_PACKET_CLASS_TOUCH 0x02\n#define KG_PACKET_CLASS_FEEDBACK 0x03\n#define KG_PACKET_CLASS_MOTION 0x04\n#define KG_PACKET_CLASS_FLEX 0x05\n#define KG_PACKET_CLASS_PRESSURE 0x06\n#define KG_PACKET_CLASS_TOUCHSET 0x07\n\n#define KG_LOG_LEVEL_PANIC 0\n#define KG_LOG_LEVEL_CRITICAL 1\n#define KG_LOG_LEVEL_WARNING 3\n#define KG_LOG_LEVEL_NORMAL 5\n#define KG_LOG_LEVEL_VERBOSE 9\n\n// ------------------------------------------------------------------\n// -------- API packets below are built into the core system --------\n// ------------------------------------------------------------------\n\n// PROTOCOL PACKETS\n#define KG_PACKET_ID_EVT_PROTOCOL_ERROR 0x01\n #define KG_DEF_PROTOCOL_ERROR_INVALID_COMMAND 0x01\n #define KG_DEF_PROTOCOL_ERROR_PACKET_TIMEOUT 0x02\n #define KG_DEF_PROTOCOL_ERROR_BAD_LENGTH 0x03\n #define KG_DEF_PROTOCOL_ERROR_PARAMETER_LENGTH 0x04\n #define KG_DEF_PROTOCOL_ERROR_NOT_IMPLEMENTED 0x05\n\n// ------------------------------------------------------------------\n\n// SYSTEM PACKETS\n#define KG_PACKET_ID_CMD_SYSTEM_PING 0x01\n#define KG_PACKET_ID_CMD_SYSTEM_RESET 0x02\n// -- command/event split --\n#define KG_PACKET_ID_EVT_SYSTEM_BOOT 0x01\n#define KG_PACKET_ID_EVT_SYSTEM_READY 0x02\n\n// ------------------------------------------------------------------\n\n// TOUCH PACKETS\n#define KG_PACKET_ID_CMD_TOUCH_GET_MODE 0x01\n#define KG_PACKET_ID_CMD_TOUCH_SET_MODE 0x02\n// -- command/event split --\n#define KG_PACKET_ID_EVT_TOUCH_MODE 0x01\n#define KG_PACKET_ID_EVT_TOUCH_STATUS 0x02\n\n// ------------------------------------------------------------------\n// ------- API packets below are optional according to config -------\n// ------------------------------------------------------------------\n\n// FEEDBACK PACKETS\n#define KG_PACKET_ID_CMD_FEEDBACK_GET_BLINK_MODE 0x01\n#define KG_PACKET_ID_CMD_FEEDBACK_SET_BLINK_MODE 0x02\n#define KG_PACKET_ID_CMD_FEEDBACK_GET_PIEZO_MODE 0x03\n#define KG_PACKET_ID_CMD_FEEDBACK_SET_PIEZO_MODE 0x04\n#define KG_PACKET_ID_CMD_FEEDBACK_GET_VIBE_MODE 0x05\n#define KG_PACKET_ID_CMD_FEEDBACK_SET_VIBE_MODE 0x06\n#define KG_PACKET_ID_CMD_FEEDBACK_GET_RGB_MODE 0x07\n#define KG_PACKET_ID_CMD_FEEDBACK_SET_RGB_MODE 0x08\n// -- command/event split --\n#define KG_PACKET_ID_EVT_FEEDBACK_BLINK_MODE 0x01\n#define KG_PACKET_ID_EVT_FEEDBACK_PIEZO_MODE 0x02\n#define KG_PACKET_ID_EVT_FEEDBACK_VIBE_MODE 0x03\n#define KG_PACKET_ID_EVT_FEEDBACK_RGB_MODE 0x04\n\n// MOTION PACKETS\n#define KG_PACKET_ID_CMD_MOTION_GET_MODE 0x01\n#define KG_PACKET_ID_CMD_MOTION_SET_MODE 0x02\n// -- command/event split --\n#define KG_PACKET_ID_EVT_MOTION_MODE 0x01\n#define KG_PACKET_ID_EVT_MOTION_DATA 0x02\n\n// ------------------------------------------------------------------\n// ------------------------------------------------------------------\n// ------------------------------------------------------------------\n\n#if KG_HOSTIF & KG_HOSTIF_USB_RAWHID\n #define RAWHID_TX_SIZE 64\n #define RAWHID_RX_SIZE 64\n uint8_t rxRawHIDPacket[RAWHID_RX_SIZE];\n uint8_t txRawHIDPacket[RAWHID_TX_SIZE];\n#endif\n\n// custom protocol function prototypes\nuint8_t filter_incoming_keyglove_packet(uint8_t *rxPacket);\nuint8_t filter_outgoing_keyglove_packet(uint8_t *packetType, uint8_t *payloadLength, uint8_t *packetClass, uint8_t *packetId, uint8_t *payload);\nuint8_t process_protocol_command_custom(uint8_t *rxPacket);\n\nuint8_t send_keyglove_packet(uint8_t packetType, uint8_t payloadLength, uint8_t packetClass, uint8_t packetId, uint8_t *payload);\n\nbool interfaceBT2SerialReady = false;\nuint8_t interfaceBT2SerialMode = 0;\nbool interfaceBT2RawHIDReady = false;\nuint8_t interfaceBT2RawHIDMode = 0;\nbool interfaceBT2HIDReady = false;\nuint8_t interfaceBT2HIDMode = 0;\n\nuint16_t rxByte; // actually WORD but the high byte is for .read() error checking\n\nuint8_t *rxPacket; // buffer for incoming data\nuint16_t rxPacketSize; // container size in bytes of received packet buffer\nuint16_t rxPacketLength; // full length in bytes of received packet (may be less than rxPacketSize)\nuint32_t packetStartTime; // command packet timeout detection\n\nbool response = false; // indicates whether we intend to generate a response\nbool inBinPacket = false; // indicates whether we have started a binary packet or not\nuint8_t binDataLength; // expected size of incoming binary data (should be rxPacketLength - 4)\n\n// special string comparison for protocol, match must either be end of string OR followed by space\nint strncasecmp2(const char *s1, const char *s2, int len) {\n int r = strncasecmp(s1, s2, len);\n if (r == 0 && s2[len] != 0 && s2[len - 1] != ' ' && s2[len] != ' ') r = 1; // terminate or followed by space\n return r;\n}\n\n// pre-declare packet handlers, which must be defined elsewhere if enabled\n// (we have to call these functions from this file, but this is compiled before others)\n\nuint8_t process_protocol_command_system(uint8_t *rxPacket);\nuint8_t process_protocol_command_touch(uint8_t *rxPacket);\nuint8_t process_protocol_command_feedback(uint8_t *rxPacket);\nuint8_t process_protocol_command_motion(uint8_t *rxPacket);\nuint8_t process_protocol_command_flex(uint8_t *rxPacket);\nuint8_t process_protocol_command_pressure(uint8_t *rxPacket);\nuint8_t process_protocol_command_touchset(uint8_t *rxPacket);\n\nvoid setup_protocol() {\n rxPacketSize = 32;\n rxPacket = (uint8_t *)malloc(rxPacketSize);\n rxPacketLength = 0;\n}\n\nvoid protocol_parse(uint8_t inputByte) {\n if (rxPacketLength + 1 == rxPacketSize) {\n rxPacketSize += 32;\n rxPacket = (uint8_t *)realloc(rxPacket, rxPacketSize);\n }\n if (!inBinPacket && inputByte == KG_PACKET_TYPE_COMMAND) { // \"bait\" byte, 0xC0\n packetStartTime = millis();\n inBinPacket = true;\n rxPacket[0] = inputByte;\n rxPacketLength = 1; // initialize buffer length to include only 1st header byte (so far)\n binDataLength = 0;\n } else if (inBinPacket && rxPacketLength == 1) { // data length byte\n binDataLength = inputByte;\n if (binDataLength > 250) {\n // error (data payload too long)\n uint8_t payload[2] = { KG_DEF_PROTOCOL_ERROR_BAD_LENGTH, 0x00 };\n send_keyglove_packet(KG_PACKET_TYPE_EVENT, 2, KG_PACKET_CLASS_PROTOCOL, KG_PACKET_ID_EVT_PROTOCOL_ERROR, payload);\n inBinPacket = false;\n rxPacketLength = 0;\n } else {\n rxPacket[rxPacketLength++] = inputByte;\n }\n } else {\n rxPacket[rxPacketLength++] = inputByte;\n if (inBinPacket && rxPacketLength - 4 == binDataLength) {\n // process packet that just came in, passing to appropriate main handler\n uint8_t protocol_error = 0;\n\n // filter incoming packets for custom behavior\n if (filter_incoming_keyglove_packet(rxPacket) == 0) {\n switch (rxPacket[2]) {\n //case KG_PACKET_CLASS_PROTOCOL:\n \n case KG_PACKET_CLASS_SYSTEM:\n protocol_error = process_protocol_command_system(rxPacket);\n break;\n \n case KG_PACKET_CLASS_TOUCH:\n protocol_error = process_protocol_command_touch(rxPacket);\n break;\n \n #if KG_MOTION > 0\n case KG_PACKET_CLASS_MOTION:\n protocol_error = process_protocol_command_motion(rxPacket);\n break;\n #endif\n \n #if KG_FEEDBACK > 0\n case KG_PACKET_CLASS_FEEDBACK:\n protocol_error = process_protocol_command_feedback(rxPacket);\n break;\n #endif\n \n #if KG_FLEX > 0\n case KG_PACKET_CLASS_FLEX:\n protocol_error = process_protocol_command_flex(rxPacket);\n break;\n #endif\n \n #if KG_PRESSURE > 0\n case KG_PACKET_CLASS_PRESSURE:\n protocol_error = process_protocol_command_pressure(rxPacket);\n break;\n #endif\n \n #if KG_TOUCHSET > 0\n case KG_PACKET_CLASS_TOUCHSET:\n protocol_error = process_protocol_command_touchset(rxPacket);\n break;\n #endif\n \n default:\n // check for custom protocol, will return KG_DEF_PROTOCOL_ERROR_INVALID_COMMAND if no matches\n protocol_error = KG_DEF_PROTOCOL_ERROR_INVALID_COMMAND;\n break;\n }\n\n // check for errors (e.g. unhandled, bad arguments, etc.)\n if (protocol_error) {\n // check for custom protocol, will return KG_DEF_PROTOCOL_ERROR_INVALID_COMMAND if no matches\n protocol_error = process_protocol_command_custom(rxPacket);\n\n // if we still have an error, then there is no custom functionality implemented for this class/ID combination\n if (protocol_error) {\n uint8_t payload[2] = { protocol_error, 0x00 };\n send_keyglove_packet(KG_PACKET_TYPE_EVENT, 2, KG_PACKET_CLASS_PROTOCOL, KG_PACKET_ID_EVT_PROTOCOL_ERROR, payload);\n }\n }\n }\n\n // reset packet status/length\n inBinPacket = false;\n rxPacketLength = 0;\n }\n }\n}\n\nuint8_t check_incoming_protocol_data() {\n #if KG_HOSTIF & KG_HOSTIF_USB_SERIAL\n // read available data from USB virtual serial\n if (interfaceUSBSerialReady && (interfaceUSBSerialMode & KG_INTERFACE_MODE_INCOMING_PACKET) != 0) {\n while ((rxByte = USBSerial.read()) < 256) protocol_parse((uint8_t)rxByte);\n }\n #endif\n\n #if KG_HOSTIF & KG_HOSTIF_USB_RAWHID\n // read raw HID data over USB\n if (interfaceUSBRawHIDReady && (interfaceUSBRawHIDMode & KG_INTERFACE_MODE_INCOMING_PACKET) != 0) {\n int8_t bytes = RawHID.recv(rxRawHIDPacket, 0);\n if (bytes > 0) {\n for (int8_t i = 0; i < rxRawHIDPacket[0]; i++) protocol_parse(rxRawHIDPacket[i + 1]);\n }\n }\n #endif\n\n #if KG_HOSTIF & KG_HOSTIF_BT2_SERIAL\n // smart read data from Bluetooth v2.1 SPP\n if (interfaceBT2SerialReady && (interfaceBT2SerialMode & KG_INTERFACE_MODE_INCOMING_PACKET) != 0 && bluetoothConnectionMap[bluetoothSPPDeviceIndex] -> linkSPP != 0xFF) {\n }\n #endif\n\n #if KG_HOSTIF & KG_HOSTIF_BT2_IAP\n // smart read data from Bluetooth v2.1 IAP\n if (interfaceBT2IAPReady && (interfaceBT2IAPMode & KG_INTERFACE_MODE_INCOMING_PACKET) != 0 && bluetoothConnectionMap[bluetoothIAPDeviceIndex] -> linkIAP != 0xFF) {\n }\n #endif\n\n #if KG_HOSTIF & KG_HOSTIF_BT2_RAWHID\n // smart read data from Bluetooth v2.1 custom HID\n if (interfaceBT2RawHIDReady && (interfaceBT2RawHIDMode & KG_INTERFACE_MODE_INCOMING_PACKET) != 0 && bluetoothConnectionMap[bluetoothRawHIDDeviceIndex] -> linkHIDInterrupt != 0xFF) {\n }\n #endif\n\n // check for protocol timeout condition\n if (inBinPacket && (millis() - packetStartTime) > KG_PROTOCOL_RX_TIMEOUT) {\n // error (data payload too long)\n uint8_t payload[2] = { KG_DEF_PROTOCOL_ERROR_PACKET_TIMEOUT, 0x00 };\n send_keyglove_packet(KG_PACKET_TYPE_EVENT, 2, KG_PACKET_CLASS_PROTOCOL, KG_PACKET_ID_EVT_PROTOCOL_ERROR, payload);\n inBinPacket = false;\n rxPacketLength = 0;\n }\n\n return 0;\n}\n\nuint8_t send_keyglove_packet(uint8_t packetType, uint8_t payloadLength, uint8_t packetClass, uint8_t packetId, uint8_t *payload) {\n // validate payload length\n if ((payload == NULL && payloadLength > 0) || payloadLength > 250) {\n // payload specified but not provided, or too long\n return 1;\n }\n \n // filter outgoing packets for custom behavior\n if (filter_outgoing_keyglove_packet(&packetType, &payloadLength, &packetClass, &packetId, payload)) return 255;\n\n // allocate and check full packet buffer\n uint8_t *buffer = (uint8_t *)malloc(4 + payloadLength);\n if (buffer == 0) {\n // couldn't allocate packet buffer...uh oh\n return 2;\n }\n \n buffer[0] = packetType;\n buffer[1] = payloadLength;\n buffer[2] = packetClass;\n buffer[3] = packetId;\n if (payloadLength) memcpy(buffer + 4, payload, payloadLength);\n uint8_t length = 4 + payloadLength;\n\n #if KG_HOSTIF & KG_HOSTIF_USB_SERIAL\n // send packet out over wired serial (USB virtual serial)\n if (interfaceUSBSerialReady && (interfaceUSBSerialMode & KG_INTERFACE_MODE_OUTGOING_PACKET) != 0) {\n USBSerial.write((const uint8_t *)buffer, length); // packet data\n }\n #endif\n\n #if KG_HOSTIF & KG_HOSTIF_USB_RAWHID\n // send packet out over wired custom HID interface (USB raw HID)\n // 64-byte packets, formatted where byte 0 is [0-64] and bytes 1-63 are data\n if (interfaceUSBRawHIDReady && (interfaceUSBRawHIDMode & KG_INTERFACE_MODE_OUTGOING_PACKET) != 0) {\n int8_t bytes;\n for (uint8_t i = 0; i < length; i += (RAWHID_TX_SIZE - 1)) {\n memset(txRawHIDPacket, 0, RAWHID_TX_SIZE);\n txRawHIDPacket[0] = min(RAWHID_TX_SIZE - 1, length - i);\n for (uint8_t j = 0; j < (RAWHID_TX_SIZE - 1); j++) {\n if (i + j >= length) break;\n txRawHIDPacket[j + 1] = buffer[i + j];\n }\n bytes = RawHID.send(txRawHIDPacket, 2);\n }\n }\n #endif\n\n #if KG_HOSTIF & KG_HOSTIF_BT2_SERIAL\n // send packet out over wireless serial (Bluetooth v2.1 SPP)\n if (interfaceBT2SerialReady && (interfaceBT2SerialMode & KG_INTERFACE_MODE_OUTGOING_PACKET) != 0 && bluetoothConnectionMap[bluetoothSPPDeviceIndex] -> linkSPP != 0xFF) {\n bluetooth.smartSendData((const uint8_t *)bluetoothTXRawHIDPacket, length, bluetoothConnectionMap[bluetoothSPPDeviceIndex] -> linkSPP);\n }\n #endif\n\n #if KG_HOSTIF & KG_HOSTIF_BT2_IAP\n // send packet out over wireless iAP link (Bluetooth v2.1 IAP)\n if (interfaceBT2IAPReady && (interfaceBT2IAPMode & KG_INTERFACE_MODE_OUTGOING_PACKET) != 0 && bluetoothConnectionMap[bluetoothIAPDeviceIndex] -> linkIAP != 0xFF) {\n bluetooth.smartSendData((const uint8_t *)bluetoothTXRawHIDPacket, length, bluetoothConnectionMap[bluetoothIAPDeviceIndex] -> linkIAP);\n }\n #endif\n\n #if KG_HOSTIF & KG_HOSTIF_BT2_RAWHID\n // send packet out over wireless custom HID interface (Bluetooth v2.1 raw HID)\n if (interfaceBT2RawHIDReady && (interfaceBT2RawHIDMode & KG_INTERFACE_MODE_OUTGOING_PACKET) != 0 && bluetoothConnectionMap[bluetoothRawHIDDeviceIndex] -> linkHIDInterrupt != 0xFF) {\n int8_t bytes;\n for (uint8_t i = 0; i < length; i += (BLUETOOTH_RAWHID_TX_SIZE - 1)) {\n memset(bluetoothTXRawHIDPacket + 4, 0, BLUETOOTH_RAWHID_TX_SIZE);\n bluetoothTXRawHIDPacket[4] = min(BLUETOOTH_RAWHID_TX_SIZE - 1, length - i);\n for (uint8_t j = 0; j < (BLUETOOTH_RAWHID_TX_SIZE - 1); j++) {\n if (i + j >= length) break;\n bluetoothTXRawHIDPacket[j + 5] = buffer[i + j];\n }\n bluetooth.smartSendData((const uint8_t *)bluetoothTXRawHIDPacket, BLUETOOTH_RAWHID_TX_SIZE + 4, bluetoothConnectionMap[bluetoothRawHIDDeviceIndex] -> linkHIDInterrupt);\n }\n }\n #endif\n\n // KG_HID_KEYBOARD and KG_HID_MOUSE are handled elsewhere and deal with other kinds of data\n \n // tidy up!\n free(buffer);\n\n return 0;\n}\n\n#endif // _SUPPORT_PROTOCOL_H_"
},
{
"alpha_fraction": 0.6199803948402405,
"alphanum_fraction": 0.6282076239585876,
"avg_line_length": 27.841808319091797,
"blob_id": "0547bd921330e9fa59194807b92c4770b79e0e91",
"content_id": "61681b24e73c2d53504d26ad5692b770cc8584c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5105,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 177,
"path": "/keyglove/config.h",
"repo_name": "Tahamosaad/keyglove",
"src_encoding": "UTF-8",
"text": "// Keyglove controller source code - Overall architecture/hardware option definitions\n// 9/10/2013 by Jeff Rowberg <[email protected]>\n\n/* ============================================\nController code is placed under the MIT license\nCopyright (c) 2013 Jeff Rowberg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n===============================================\n*/\n\n#ifndef _CONFIG_H_\n#define _CONFIG_H_\n\n/* ===============================================\n * HARDWARE OPTIONS\n=============================================== */\n\n// attempt auto-detection of base hardware and interfaces\n\n// Teensy++ 2.0\n#if defined(CORE_TEENSY)\n #if defined(__AVR_AT90USB1286__) || defined(__AVR_AT90USB1287__)\n #define KG_BOARD KG_BOARD_TEENSYPP2\n #ifdef CORE_TEENSY_SERIAL\n #define AUTO_KG_HOSTIF_USB_SERIAL KG_HOSTIF_USB_SERIAL\n #endif\n #ifdef CORE_TEENSY_RAWHID\n #define AUTO_KG_HOSTIF_USB_RAWHID KG_HOSTIF_USB_RAWHID\n #endif\n #ifdef CORE_TEENSY_HID\n #define AUTO_KG_HOSTIF_USB_HID KG_HOSTIF_USB_HID\n #endif\n #else\n #error Only the Teensy++ 2.0 variant of the Teensy line is supported.\n #endif\n\n// Arduino Due\n#elif defined(_VARIANT_ARDUINO_DUE_X_)\n #define KG_BOARD KG_BOARD_ARDUINO_DUE\n\n// Keyglove\n#elif defined(CORE_KEYGLOVE)\n #define KG_BOARD KG_BOARD_KEYGLOVE\n\n#else\n // ...if you're adding support for something else here, make sure you define it by this point!\n #warning No compatible board defined. This could be a problem.\n#endif\n\n\n\n// make sure these definitions exist even if they are \"0\"\n#ifndef AUTO_KG_HOSTIF_USB_SERIAL\n #define AUTO_KG_HOSTIF_USB_SERIAL 0\n#endif\n#ifndef AUTO_KG_HOSTIF_USB_RAWHID\n #define AUTO_KG_HOSTIF_USB_RAWHID 0\n#endif\n#ifndef AUTO_KG_HOSTIF_USB_HID\n #define AUTO_KG_HOSTIF_USB_HID 0\n#endif\n\n\n\n#define KG_HOSTIF (AUTO_KG_HOSTIF_USB_SERIAL | AUTO_KG_HOSTIF_USB_RAWHID | AUTO_KG_HOSTIF_USB_HID /*| KG_HOSTIF_BT2_HID | KG_HOSTIF_BT2_RAWHID */)\n\n//#define KG_MOTION KG_MOTION_NONE\n#define KG_MOTION KG_MOTION_MPU6050_HAND\n\n#define KG_FEEDBACK KG_FEEDBACK_BLINK\n//NOT RE-IMPLEMENTED YET: #define KG_FEEDBACK (KG_FEEDBACK_BLINK | KG_FEEDBACK_PIEZO | KG_FEEDBACK_VIBRATE | KG_FEEDBACK_RGB)\n\n// DUALGLOVE / FLEX / PRESSURE (NOT IMPLEMENTED YET)\n#define KG_DUALGLOVE KG_DUALGLOVE_NONE\n#define KG_FLEX KG_FLEX_NONE\n#define KG_PRESSURE KG_PRESSURE_NONE\n\n\n\n/* ===============================================\n * DEBUG SETTINGS\n=============================================== */\n\n#define KG_DEBUG KG_DEBUG_NONE\n\n/*\nDebug options:\n KG_DEBUG_NONE\n KG_DEBUG_BENCHMARK\n KG_DEBUG_FEEDBACK\n KG_DEBUG_TOUCH\n KG_DEBUG_TOUCHSET\n KG_DEBUG_MOTION\n KG_DEBUG_HOSTIF_USB\n KG_DEBUG_HOSTIF_BT2\n KG_DEBUG_HID_KEYBOARD\n KG_DEBUG_HID_MOUSE\n*/\n\n\n\n/* ===============================================\n * DEBUG COMPILER MACROS\n=============================================== */\n\n#if (KG_DEBUG & KG_DEBUG_BENCHMARK)\n #define DEBUG_BENCHMARK(x) x\n#else\n #define DEBUG_BENCHMARK(x)\n#endif\n\n#if (KG_DEBUG & KG_DEBUG_TOUCH)\n #define DEBUG_TOUCH(x) x\n#else\n #define DEBUG_TOUCH(x)\n#endif\n\n#if (KG_DEBUG & KG_DEBUG_TOUCHSET)\n #define DEBUG_TOUCHSET(x) x\n#else\n #define DEBUG_TOUCHSET(x)\n#endif\n\n#if (KG_DEBUG & KG_DEBUG_MOTION)\n #define DEBUG_MOTION(x) x\n#else\n #define DEBUG_MOTION(x)\n#endif\n\n#if (KG_DEBUG & KG_DEBUG_FEEDBACK)\n #define DEBUG_FEEDBACK(x) x\n#else\n #define DEBUG_FEEDBACK(x)\n#endif\n\n#if (KG_DEBUG & KG_DEBUG_HOSTIF_USB)\n #define DEBUG_HOSTIF_USB(x) x\n#else\n #define DEBUG_HOSTIF_USB(x)\n#endif\n\n#if (KG_DEBUG & KG_DEBUG_HOSTIF_BT2)\n #define DEBUG_HOSTIF_BT2(x) x\n#else\n #define DEBUG_HOSTIF_BT2(x)\n#endif\n\n#if (KG_DEBUG & KG_DEBUG_HID_KEYBOARD)\n #define DEBUG_HID_KEYBOARD(x) x\n#else\n #define DEBUG_HID_KEYBOARD(x)\n#endif\n\n#if (KG_DEBUG & KG_DEBUG_HID_MOUSE)\n #define DEBUG_HID_MOUSE(x) x\n#else\n #define DEBUG_HID_MOUSE(x)\n#endif\n\n#endif // _CONFIG_H_\n"
},
{
"alpha_fraction": 0.5970336198806763,
"alphanum_fraction": 0.6425736546516418,
"avg_line_length": 37,
"blob_id": "86e1a2646b9bda24bfc160b5d03ccc0fe9aa2e23",
"content_id": "54b7a456c7bb0d30b6d05b47658241f20efd7239",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4787,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 126,
"path": "/keyglove/support_motion_mpu6050_hand.h",
"repo_name": "Tahamosaad/keyglove",
"src_encoding": "UTF-8",
"text": "// Keyglove controller source code - Special hardware setup file\n// 11/3/2013 by Jeff Rowberg <[email protected]>\n\n/* ============================================\nController code is placed under the MIT license\nCopyright (c) 2011 Jeff Rowberg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n===============================================\n*/\n\n\n\n#ifndef _SUPPORT_MOTION_MPU6050_HAND_H_\n#define _SUPPORT_MOTION_MPU6050_HAND_H_\n\nMPU6050 mpuHand = MPU6050(0x68);\nbool mpuHandInterrupt;\n\nVectorInt16 aaRaw; // raw linear acceleration\nVectorInt16 aa; // filtered linear acceleration\nVectorInt16 aa0; // last-iteration filtered linear acceleration\nVectorInt16 gvRaw; // raw rotational velocity\nVectorInt16 gv; // filtered rotational velocity\nVectorInt16 gv0; // last-iteration filtered rotational velocity\n\nvoid mpu6050_hand_interrupt() {\n mpuHandInterrupt = true;\n}\n\nvoid set_motion_mpu6050_hand_mode(uint8_t mode) {\n if (mode == 1) {\n aa.x = aa.y = aa.z = 0;\n gv.x = gv.y = gv.z = 0;\n mpuHandInterrupt = true;\n attachInterrupt(4, mpu6050_hand_interrupt, FALLING);\n mpuHand.setSleepEnabled(false);\n } else {\n mpuHand.setSleepEnabled(true);\n detachInterrupt(4);\n }\n\n // send kg_evt_motion_mode packet (if we aren't just intentionally setting it already)\n if (!inBinPacket) {\n uint8_t payload[2] = { 0x00, mode };\n send_keyglove_packet(KG_PACKET_TYPE_EVENT, sizeof(payload), KG_PACKET_CLASS_MOTION, KG_PACKET_ID_EVT_MOTION_MODE, payload);\n }\n}\n\nvoid setup_motion_mpu6050_hand() {\n // set INT4 pin (Arduino Pin 36) to INPUT/HIGH so MPU can drive interrupt pin as active-low\n pinMode(36, INPUT);\n digitalWrite(36, HIGH);\n\n // setup MPU-6050\n mpuHandInterrupt = false;\n mpuHand.initialize();\n delay(30);\n mpuHand.setFullScaleGyroRange(MPU6050_GYRO_FS_2000);\n mpuHand.setDLPFMode(MPU6050_DLPF_BW_42); // 42 Hz DLPF, 1kHz internal sampling\n mpuHand.setRate(9); // 1kHz/(9+1) = 100Hz\n mpuHand.setInterruptMode(1); // active low\n mpuHand.setInterruptDrive(1); // open drain\n mpuHand.setInterruptLatch(0); // latch until read\n mpuHand.setInterruptLatchClear(1); // clear on any read\n mpuHand.setIntDataReadyEnabled(1); // trigger interrupt on data ready\n set_motion_mpu6050_hand_mode(1); // enable motion detection\n}\n\nvoid update_motion_mpu6050_hand() {\n // read raw motion data\n mpuHand.getMotion6(&aaRaw.x, &aaRaw.y, &aaRaw.z, &gvRaw.x, &gvRaw.y, &gvRaw.z);\n\n // store previous accel/gyro values\n aa0.x = aa.x;\n aa0.y = aa.y;\n aa0.z = aa.z;\n gv0.x = gv.x;\n gv0.y = gv.y;\n gv0.z = gv.z;\n\n // simple smoothing filter\n aa.x = aa0.x + (0.25 * (aaRaw.x - aa0.x));\n aa.y = aa0.y + (0.25 * (aaRaw.y - aa0.y));\n aa.z = aa0.z + (0.25 * (aaRaw.z - aa0.z));\n gv.x = gv0.x + (0.25 * (gvRaw.x - gv0.x));\n gv.y = gv0.y + (0.25 * (gvRaw.y - gv0.y));\n gv.z = gv0.z + (0.25 * (gvRaw.z - gv0.z));\n\n // build and send kg_evt_motion_data packet\n uint8_t payload[15];\n payload[0] = 0x00; // sensor 0\n payload[1] = 0x03; // 1=accel, 2=gyro, 1|2 = 0x03\n payload[2] = 0x0C; // 12 bytes of motion data (6 axes, 2 bytes each)\n payload[3] = aa.x & 0xFF;\n payload[4] = aa.x >> 8;\n payload[5] = aa.y & 0xFF;\n payload[6] = aa.y >> 8;\n payload[7] = aa.z & 0xFF;\n payload[8] = aa.z >> 8;\n payload[9] = gv.x & 0xFF;\n payload[10] = gv.x >> 8;\n payload[11] = gv.y & 0xFF;\n payload[12] = gv.y >> 8;\n payload[13] = gv.z & 0xFF;\n payload[14] = gv.z >> 8;\n send_keyglove_packet(KG_PACKET_TYPE_EVENT, sizeof(payload), KG_PACKET_CLASS_MOTION, KG_PACKET_ID_EVT_MOTION_DATA, payload);\n}\n\n#endif // _SUPPORT_MOTION_MPU6050_HAND_H_"
},
{
"alpha_fraction": 0.4650857448577881,
"alphanum_fraction": 0.5350525379180908,
"avg_line_length": 41.66961669921875,
"blob_id": "9c4c57adb393771e1a2f18cfa5391b12c8393ccf",
"content_id": "dd5af3fd917a88eb007ebe186ea5dad774577bd6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 14464,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 339,
"path": "/keyglove/support_board_arduino_due.h",
"repo_name": "Tahamosaad/keyglove",
"src_encoding": "UTF-8",
"text": "// Keyglove controller source code - Special hardware setup file\n// 8/27/2013 by Jeff Rowberg <[email protected]>\n\n/* ============================================\nController code is placed under the MIT license\nCopyright (c) 2013 Jeff Rowberg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n===============================================\n*/\n\n\n\n#ifndef _SUPPORT_BOARD_ARDUINO_DUE_H_\n#define _SUPPORT_BOARD_ARDUINO_DUE_H_\n\n#define BTSerial Serial2 // UART1: TXD1/RXD1/CTS1/RTS1\n \n#define US1_CR * (volatile uint32_t *) (0x4009C000) /* USART Control Register address for USART1 */\n#define US1_MR * (volatile uint32_t *) (0x4009C004) /* USART Mode Register address for USART1 */\n\n#define KG_HOSTIF_SERIAL_BAUD 115200\n\n/* ===============================================\n * 100 HZ INTERRUPT VECTOR\n=============================================== */\n\nvolatile uint8_t timer1Overflow = 0;\nISR(TIMER1_OVF_vect) {\n timer1Overflow++;\n}\n\n#define KG_INTERRUPT_PIN_ACCEL 36 // PE4 (internal/tiny, right)\n#define KG_INTERRUPT_NUM_ACCEL 4 // Teensy++ interrupt #4\n\n#define KG_INTERRUPT_PIN_GYRO 37 // PE5 (internal/tiny, left)\n#define KG_INTERRUPT_NUM_GYRO 5 // Teensy++ interrupt #5\n\n// FUSION and ACCEL are never both used at the same time\n#define KG_INTERRUPT_PIN_FUSION 36 // PE4 (internal/tiny, right)\n#define KG_INTERRUPT_NUM_FUSION 4 // Teensy++ interrupt #4\n\n#define KG_PIN_BLINK 6 // PD6\n\n#define KG_PIN_BT_RTS 19 // PE7\n#define KG_INTERRUPT_NUM_BT_RTS 7 // Teensy++ interrupt #7\n#define KG_PIN_BT_CTS 18 // PE6\n\n// ITEMS BELOW THIS CANNOT BE USED SIMULTANEOUSLY WITH ALL ABOVE FEATURES\n\n// direct I/O feedback pins (kills 3 thumb sensors and Bluetooth flow control)\n#define KG_PIN_PIEZO 19 // PE7\n#define KG_PIN_VIBRATE 18 // PE6\n#define KG_PIN_RGB_RED 21 // PB1\n#define KG_PIN_RGB_GREEN 22 // PB2\n#define KG_PIN_RGB_BLUE 24 // PB4\n\n// PS/2 clock/data pins for keyboard (kills direct I/O piezo/vibe or Bluetooth flow control)\n#define KG_PIN_KB_CLOCK 19 // PE7\n#define KG_PIN_KB_DATA 18 // PE6\n\n// PS/2 clock/data pins for mouse (kills Bluetooth UART connection)\n#define KG_PIN_MOUSE_CLOCK 3 // PD3\n#define KG_PIN_MOUSE_DATA 2 // PD2\n\n// ======================== END PIN DEFINITIONS ========================\n\n// sensor count and base combination count\n#define KG_TOTAL_SENSORS 37\n#define KG_BASE_COMBINATIONS 60\n#define KG_BASE_COMBINATION_BYTES 8\n\n// NOTE: KG_BASE_COMBINATIONS seems like it would be very high, but there are\n// physical and practical limitations that make this number much smaller\n\n#define KS_DM ((touches[0] & 0x01) != 0)\n#define KS_AY ((touches[0] & 0x02) != 0)\n#define KS_BY ((touches[0] & 0x04) != 0)\n#define KS_CY ((touches[0] & 0x08) != 0)\n#define KS_DY ((touches[0] & 0x10) != 0)\n#define KS_EY ((touches[0] & 0x20) != 0)\n#define KS_FY ((touches[0] & 0x40) != 0)\n#define KS_GY ((touches[0] & 0x80) != 0)\n#define KS_HY ((touches[1] & 0x01) != 0)\n#define KS_IY ((touches[1] & 0x02) != 0)\n#define KS_JY ((touches[1] & 0x04) != 0)\n#define KS_KY ((touches[1] & 0x08) != 0)\n#define KS_LY ((touches[1] & 0x10) != 0)\n#define KS_MY ((touches[1] & 0x20) != 0)\n#define KS_NY ((touches[1] & 0x40) != 0)\n#define KS_OY ((touches[1] & 0x80) != 0)\n#define KS_PY ((touches[2] & 0x01) != 0)\n#define KS_QY ((touches[2] & 0x02) != 0)\n#define KS_RY ((touches[2] & 0x04) != 0)\n#define KS_SY ((touches[2] & 0x08) != 0)\n#define KS_TY ((touches[2] & 0x10) != 0)\n#define KS_UY ((touches[2] & 0x20) != 0)\n#define KS_VY ((touches[2] & 0x40) != 0)\n#define KS_WY ((touches[2] & 0x80) != 0)\n#define KS_XY ((touches[3] & 0x01) != 0)\n#define KS_MZ ((touches[3] & 0x02) != 0)\n#define KS_NZ ((touches[3] & 0x04) != 0)\n#define KS_OZ ((touches[3] & 0x08) != 0)\n#define KS_PZ ((touches[3] & 0x10) != 0)\n#define KS_QZ ((touches[3] & 0x20) != 0)\n#define KS_RZ ((touches[3] & 0x40) != 0)\n#define KS_A1 ((touches[3] & 0x80) != 0)\n#define KS_D1 ((touches[4] & 0x01) != 0)\n#define KS_G1 ((touches[4] & 0x02) != 0)\n#define KS_J1 ((touches[4] & 0x04) != 0)\n#define KS_Y1 ((touches[4] & 0x08) != 0)\n#define KS_A2 ((touches[4] & 0x10) != 0)\n#define KS_D2 ((touches[4] & 0x20) != 0)\n#define KS_G2 ((touches[4] & 0x40) != 0)\n#define KS_J2 ((touches[4] & 0x80) != 0)\n#define KS_A3 ((touches[5] & 0x01) != 0)\n#define KS_D3 ((touches[5] & 0x02) != 0)\n#define KS_G3 ((touches[5] & 0x04) != 0)\n#define KS_J3 ((touches[5] & 0x08) != 0)\n#define KS_D4 ((touches[5] & 0x10) != 0)\n#define KS_Y4 ((touches[5] & 0x20) != 0)\n#define KS_Z4 ((touches[5] & 0x40) != 0)\n#define KS_Y5 ((touches[5] & 0x80) != 0)\n#define KS_Z5 ((touches[6] & 0x01) != 0)\n#define KS_D6 ((touches[6] & 0x02) != 0)\n#define KS_Y6 ((touches[6] & 0x04) != 0)\n#define KS_Z6 ((touches[6] & 0x08) != 0)\n#define KS_D7 ((touches[6] & 0x10) != 0)\n#define KS_G7 ((touches[6] & 0x20) != 0)\n#define KS_Y7 ((touches[6] & 0x40) != 0)\n#define KS_Z7 ((touches[6] & 0x80) != 0)\n#define KS_A8 ((touches[7] & 0x01) != 0)\n#define KS_D8 ((touches[7] & 0x02) != 0)\n#define KS_G8 ((touches[7] & 0x04) != 0)\n#define KS_J8 ((touches[7] & 0x08) != 0)\n\n#define KS_ADY (KS_AY && KS_DY)\n#define KS_AJY (KS_AY && KS_JY)\n#define KS_DGY (KS_DY && KS_GY)\n#define KS_GJY (KS_GY && KS_JY)\n\n#define CLR(x, y) (x &= (~(1 << y)))\n#define SET(x, y) (x |= (1 << y))\n#define _BV(bit) (1 << (bit))\nuint8_t _pina, _pinb, _pinc, _pind, _pine, _pinf;\n\nvoid setup_board() {\n // enable RTS/CTS handshaking (flow control) on UART1 (Serial2 in Arduino)\n // uses digital pins 23 and 24\n //US1_MR |= 0x02;\n}\n\nvoid update_board() {\n}\n\nvoid setup_board_touch() {\n // make sure we enable internal pullup resistors\n /*DDRA &= 0x00;\n PORTA |= 0xFF; // 0,1,2,3,4,5,6,7\n DDRB &= 0x00;\n PORTB |= 0xFF; // 0,1,2,3,4,5,6,7\n DDRC &= 0x00;\n PORTC |= 0xFF; // 0,1,2,3,4,5,6,7\n DDRD &= 0x4F;\n PORTD |= 0xB0; // 4,5,7\n DDRE &= 0xFC;\n PORTE |= 0x03; // 0,1\n DDRF &= 0x00;\n PORTF |= 0xFF; // 0,1,2,3,4,5,6,7*/\n}\n\nvoid update_board_touch(uint8_t *touches) {\n/* // check on M combinations (PF2)\n SET(DDRF, 2); // set to OUTPUT\n CLR(PORTF, 2); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pinf = PINF;\n CLR(DDRF, 2); // set to INPUT\n SET(PORTF, 2); // pull HIGH\n if (!(_pinf & (1 << 6))) touches[0] |= 0x01; // D (PF6)\n\n // check on Y combinations (PB6)\n SET(DDRB, 6); // set to OUTPUT\n CLR(PORTB, 6); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pina = PINA; _pinb = PINB; _pinc = PINC; _pind = PIND; _pine = PINE; _pinf = PINF;\n CLR(DDRB, 6); // set to INPUT\n SET(PORTB, 6); // pull HIGH\n if (!(_pinb & (1 << 0))) touches[0] |= 0x02; // A (PB0)\n if (!(_pinf & (1 << 0))) touches[0] |= 0x04; // B (PF0)\n if (!(_pinf & (1 << 1))) touches[0] |= 0x08; // C (PF1)\n if (!(_pinf & (1 << 6))) touches[0] |= 0x10; // D (PF6)\n if (!(_pinf & (1 << 7))) touches[0] |= 0x20; // E (PF7)\n if (!(_pina & (1 << 3))) touches[0] |= 0x40; // F (PA3)\n if (!(_pina & (1 << 5))) touches[0] |= 0x80; // G (PA5)\n if (!(_pina & (1 << 6))) touches[1] |= 0x01; // H (PA6)\n if (!(_pina & (1 << 7))) touches[1] |= 0x02; // I (PA7)\n if (!(_pinc & (1 << 3))) touches[1] |= 0x04; // J (PC3)\n if (!(_pinc & (1 << 2))) touches[1] |= 0x08; // K (PC2)\n if (!(_pinc & (1 << 1))) touches[1] |= 0x10; // L (PC1)\n if (!(_pinf & (1 << 2))) touches[1] |= 0x20; // M (PF2)\n if (!(_pinf & (1 << 3))) touches[1] |= 0x40; // N (PF3)\n if (!(_pinf & (1 << 4))) touches[1] |= 0x80; // O (PF4)\n if (!(_pina & (1 << 2))) touches[2] |= 0x01; // P (PA2)\n if (!(_pina & (1 << 1))) touches[2] |= 0x02; // Q (PA1)\n if (!(_pina & (1 << 0))) touches[2] |= 0x04; // R (PA0)\n if (!(_pinc & (1 << 7))) touches[2] |= 0x08; // S (PC7)\n if (!(_pinc & (1 << 6))) touches[2] |= 0x10; // T (PC6)\n if (!(_pinc & (1 << 5))) touches[2] |= 0x20; // U (PC5)\n if (!(_pinc & (1 << 0))) touches[2] |= 0x40; // V (PC0)\n if (!(_pine & (1 << 1))) touches[2] |= 0x80; // W (PE1)\n if (!(_pine & (1 << 0))) touches[3] |= 0x01; // X (PE0)\n\n // check on Z combinations (PB5)\n SET(DDRB, 5); // set to OUTPUT\n CLR(PORTB, 5); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pina = PINA; _pinf = PINF;\n CLR(DDRB, 5); // set to INPUT\n SET(PORTB, 5); // pull HIGH\n if (!(_pinf & (1 << 2))) touches[3] |= 0x02; // M (PF2)\n if (!(_pinf & (1 << 3))) touches[3] |= 0x04; // N (PF3)\n if (!(_pinf & (1 << 4))) touches[3] |= 0x08; // O (PF4)\n if (!(_pina & (1 << 2))) touches[3] |= 0x10; // P (PA2)\n if (!(_pina & (1 << 1))) touches[3] |= 0x20; // Q (PA1)\n if (!(_pina & (1 << 0))) touches[3] |= 0x40; // R (PA0)\n\n // check on 1 combinations (PD5)\n SET(DDRD, 5); // set to OUTPUT\n CLR(PORTD, 5); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pina = PINA; _pinb = PINB; _pinc = PINC; _pinf = PINF;\n CLR(DDRD, 5); // set to INPUT\n SET(PORTD, 5); // pull HIGH\n if (!(_pinb & (1 << 0))) touches[3] |= 0x80; // A (PB0)\n if (!(_pinf & (1 << 6))) touches[4] |= 0x01; // D (PF6)\n if (!(_pina & (1 << 5))) touches[4] |= 0x02; // G (PA5)\n if (!(_pinc & (1 << 3))) touches[4] |= 0x04; // J (PC3)\n if (!(_pinb & (1 << 6))) touches[4] |= 0x08; // Y (PB6)\n\n // check on 2 combinations (PD4)\n SET(DDRD, 4); // set to OUTPUT\n CLR(PORTD, 4); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pina = PINA; _pinb = PINB; _pinc = PINC; _pinf = PINF;\n CLR(DDRD, 4); // set to INPUT\n SET(PORTD, 4); // pull HIGH\n if (!(_pinb & (1 << 0))) touches[4] |= 0x10; // A (PB0)\n if (!(_pinf & (1 << 6))) touches[4] |= 0x20; // D (PF6)\n if (!(_pina & (1 << 5))) touches[4] |= 0x40; // G (PA5)\n if (!(_pinc & (1 << 3))) touches[4] |= 0x80; // J (PC3)\n\n // check on 3 combinations (PB7)\n SET(DDRB, 7); // set to OUTPUT\n CLR(PORTB, 7); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pina = PINA; _pinb = PINB; _pinc = PINC; _pinf = PINF;\n CLR(DDRB, 7); // set to INPUT\n SET(PORTB, 7); // pull HIGH\n if (!(_pinb & (1 << 0))) touches[5] |= 0x01; // A (PB0)\n if (!(_pinf & (1 << 6))) touches[5] |= 0x02; // D (PF6)\n if (!(_pina & (1 << 5))) touches[5] |= 0x04; // G (PA5)\n if (!(_pinc & (1 << 3))) touches[5] |= 0x08; // J (PC3)\n\n // check on 4 combinations (PF5)\n SET(DDRF, 5); // set to OUTPUT\n CLR(PORTF, 5); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pinb = PINB; _pinf = PINF;\n CLR(DDRF, 5); // set to INPUT\n SET(PORTF, 5); // pull HIGH\n if (!(_pinf & (1 << 6))) touches[5] |= 0x10; // D (PF6)\n if (!(_pinb & (1 << 6))) touches[5] |= 0x20; // Y (PB6)\n if (!(_pinb & (1 << 5))) touches[5] |= 0x40; // Z (PB5)\n\n // check on 5 combinations (PA4)\n SET(DDRA, 4); // set to OUTPUT\n CLR(PORTA, 4); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pinb = PINB;\n CLR(DDRA, 4); // set to INPUT\n SET(PORTA, 4); // pull HIGH\n if (!(_pinb & (1 << 6))) touches[5] |= 0x80; // Y (PB6)\n if (!(_pinb & (1 << 5))) touches[6] |= 0x01; // Z (PB5)\n\n // check on 6 combinations (PC4)\n SET(DDRC, 4); // set to OUTPUT\n CLR(PORTC, 4); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pinb = PINB; _pinf = PINF;\n CLR(DDRC, 4); // set to INPUT\n SET(PORTC, 4); // pull HIGH\n if (!(_pinf & (1 << 6))) touches[6] |= 0x02; // D (PF6)\n if (!(_pinb & (1 << 6))) touches[6] |= 0x04; // Y (PB6)\n if (!(_pinb & (1 << 5))) touches[6] |= 0x08; // Z (PB5)\n\n // check on 7 combinations (PD7)\n SET(DDRD, 7); // set to OUTPUT\n CLR(PORTD, 7); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pina = PINA; _pinb = PINB; _pinf = PINF;\n CLR(DDRD, 7); // set to INPUT\n SET(PORTD, 7); // pull HIGH\n if (!(_pinf & (1 << 6))) touches[6] |= 0x10; // D (PF6)\n if (!(_pina & (1 << 5))) touches[6] |= 0x20; // G (PA5)\n if (!(_pinb & (1 << 6))) touches[6] |= 0x40; // Y (PB6)\n if (!(_pinb & (1 << 5))) touches[6] |= 0x80; // Z (PB5)\n\n // check on 8 combinations (PB3)\n SET(DDRB, 3); // set to OUTPUT\n CLR(PORTB, 3); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pina = PINA; _pinb = PINB; _pinc = PINC; _pinf = PINF;\n CLR(DDRB, 3); // set to INPUT\n SET(PORTB, 3); // pull HIGH\n if (!(_pinb & (1 << 0))) touches[7] |= 0x01; // A (PB0)\n if (!(_pinf & (1 << 6))) touches[7] |= 0x02; // D (PF6)\n if (!(_pina & (1 << 5))) touches[7] |= 0x04; // G (PA5)\n if (!(_pinc & (1 << 3))) touches[7] |= 0x08; // J (PC3)\n */\n}\n\n#endif // _SUPPORT_BOARD_ARDUINO_DUE_H_"
},
{
"alpha_fraction": 0.43967404961586,
"alphanum_fraction": 0.5189331769943237,
"avg_line_length": 36.44743728637695,
"blob_id": "8a47fb287d04fd56048162fa0a5212e26ccbe46a",
"content_id": "dd690636487a8c05c2f903251e7c3d9814c546d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 29208,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 780,
"path": "/keyglove/support_board_teensypp2.h",
"repo_name": "Tahamosaad/keyglove",
"src_encoding": "UTF-8",
"text": "// Keyglove controller source code - Special hardware setup file\n// 7/17/2011 by Jeff Rowberg <[email protected]>\n\n/* ============================================\nController code is placed under the MIT license\nCopyright (c) 2011 Jeff Rowberg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n===============================================\n*/\n\n\n\n#ifndef _SUPPORT_BOARD_TEENSYPP2_H_\n#define _SUPPORT_BOARD_TEENSYPP2_H_\n\n/*\nTeensy++ 2.0 Pin arrangement:\n\n __|||||__\n GND | USB | VCC\n 3 27 | | 26 Y\n SCL 0 | | 25 Z\n SDA 1 | | 24 a*\n RXD 2 | | 23 8\n TXD 3 | | 22 9\n 2 4 | 37.36 | 21 0\n 1 5 | | 20 A\n LED 6 | | 19 INT7\n 7 7 | | 18 INT6\n X 8 | | GND\n W 9 | | AREF\n V 10 | | 38 B\n L 5 11 | 32 . 28 | 39 R C\n K G 12 | 33 . 29 | 40 Q M\n J H 13 | 34 . 30 | 41 P N\n 6 I 14 | 35 . 31 | 42 F O\n U 15 | | 43 4\n T 16 | | 44 D\n S 17 |_________| 45 E\n \n 36=INT4, 37=INT5\n\nThis is a grand total of 46 usable I/O pins.\nWe use SCL (0) and SDA (1) for I2C communication, leaving 44 usable pins.\nWe use RXD (2) and TXD (2) for Bluetooth UART communication, leaving 42 usable pins.\nWe use INT4 (36) and INT5 (37) for accel and gyro interrupts, leaving 40 usable pins.\nWe use INT7 (19) for Bluetooth Link interrupts, leaving 39 usable pins.\nWe use INT6 (18) for DTR control, leaving 38 usable pins.\nWe use LED for BLINK feedback, leaving 37 usable pins.\n...and we have a total of 37 sensors. Yay!\n\nPin Change interrupts:\n- Y, 26, PB6\n- Z, 25, PB5\n- a*, 24, PB4\n- 8, 23, PB3\n- 9, 22, PB2\n- 0, 21, PB1\n- 1, 5, PD5\n- 2, 4, PD4\n- 3, 27, PB7\n\nFor the sake of the Keyglove Kit board, the sensor pin connections should be arranged\nin a clockwise manner as Thumb, Index, Middle, Ring, Little, with each individual\nfinger's sensors in increasing alphabetical order: (Y,Z,a*,8,9,0), (A,B,C,M,N,O,4),\nand so on. Start with pin 26 (B6)\n*/\n\n\n\n// ======================== BEGIN PIN DEFINITIONS ========================\n\n#define KEYGLOVE_KIT_BUG_PORTA_REVERSED\n\n/*\n#if (KG_HAND == KG_HAND_RIGHT)\n #define KSP_Y 26 // PB6\n #define KSP_Z 25 // PB5\n #define KSP_AA 24 // PB4\n #define KSP_8 23 // PB3\n #define KSP_9 22 // PB2\n #define KSP_0 21 // PB1\n\n #define KSP_A 20 // PB0\n #define KSP_B 38 // PF0\n #define KSP_C 39 // PF1\n #define KSP_M 40 // PF2\n #define KSP_N 41 // PF3\n #define KSP_O 42 // PF4\n #define KSP_4 43 // PF5\n\n #define KSP_D 44 // PF6\n #define KSP_E 45 // PF7\n\n #ifdef KEYGLOVE_KIT_BUG_PORTA_REVERSED\n // d'oh! Teensy++ part in Eagle PA0-7 pins were backwards\n // PA0 = PA4, PA1 = PA5, PA2 = PA6, PA3 = PA7, PA4 = PA0, PA5 = PA1, PA6 = PA2, PA7 = PA3\n #define KSP_F 35 // PA3\n #define KSP_P 34 // PA2\n #define KSP_Q 33 // PA1\n #define KSP_R 32 // PA0\n #define KSP_5 28 // PA4\n \n #define KSP_G 29 // PA5\n #define KSP_H 30 // PA6\n #define KSP_I 31 // PA7\n #else\n #define KSP_F 31 // PA3\n #define KSP_P 30 // PA2\n #define KSP_Q 29 // PA1\n #define KSP_R 28 // PA0\n #define KSP_5 32 // PA4\n\n #define KSP_G 33 // PA5\n #define KSP_H 34 // PA6\n #define KSP_I 35 // PA7\n #endif\n\n #define KSP_S 17 // PC7\n #define KSP_T 16 // PC6\n #define KSP_U 15 // PC5\n #define KSP_6 14 // PC4\n \n #define KSP_J 13 // PC3\n #define KSP_K 12 // PC2\n #define KSP_L 11 // PC1\n #define KSP_V 10 // PC0\n #define KSP_W 9 // PE1\n #define KSP_X 8 // PE0\n #define KSP_7 7 // PD7\n\n #define KSP_1 5 // PD5\n #define KSP_2 4 // PD4\n #define KSP_3 27 // PB7\n#else /* !KG_HAND_RIGHT (KG_HAND_LEFT) */\n/* #define KSP_X 26 // PB6\n #define KSP_W 25 // PB5\n #define KSP_V 24 // PB4\n #define KSP_L 23 // PB3\n #define KSP_K 22 // PB2\n #define KSP_J 21 // PB1\n\n #define KSP_6 20 // PB0\n #define KSP_U 38 // PF0\n #define KSP_T 39 // PF1\n #define KSP_S 40 // PF2\n #define KSP_I 41 // PF3\n #define KSP_H 42 // PF4\n #define KSP_G 43 // PF5\n\n #define KSP_5 44 // PF6\n #define KSP_R 45 // PF7\n\n #define KEYGLOVE_KIT_BUG_PORTA_REVERSED\n #ifdef KEYGLOVE_KIT_BUG_PORTA_REVERSED\n // d'oh! Teensy++ part in Eagle PA0-7 pins were backwards\n // PA0 = PA4, PA1 = PA5, PA2 = PA6, PA3 = PA7, PA4 = PA0, PA5 = PA1, PA6 = PA2, PA7 = PA3\n #define KSP_Q 35 // PA3\n #define KSP_P 34 // PA2\n #define KSP_F 33 // PA1\n #define KSP_E 32 // PA0\n #define KSP_D 28 // PA4\n\n #define KSP_4 29 // PA5\n #define KSP_O 30 // PA6\n #define KSP_N 31 // PA7\n #else\n #define KSP_Q 31 // PA3\n #define KSP_P 30 // PA2\n #define KSP_F 29 // PA1\n #define KSP_E 28 // PA0\n #define KSP_D 32 // PA4\n\n #define KSP_4 33 // PA5\n #define KSP_O 34 // PA6\n #define KSP_N 35 // PA7\n #endif\n\n #define KSP_M 17 // PC7\n #define KSP_C 16 // PC6\n #define KSP_B 15 // PC5\n #define KSP_A 14 // PC4\n\n #define KSP_0 13 // PC3\n #define KSP_9 12 // PC2\n #define KSP_8 11 // PC1\n #define KSP_AA 10 // PC0\n #define KSP_Z 9 // PE1\n #define KSP_Y 8 // PE0\n #define KSP_7 7 // PD7\n\n #define KSP_1 5 // PD5\n #define KSP_2 4 // PD4\n #define KSP_3 27 // PB7\n#endif /* KG_HAND_LEFT */\n/*\n#define KBC_Z7 0\n#define KBC_Y7 1\n#define KBC_J8 2\n#define KBC_JY 3\n#define KBC_KY 4\n#define KBC_LY 5\n#define KBC_HY 6\n#define KBC_VY 7\n#define KBC_WY 8\n#define KBC_XY 9\n#define KBC_Z6 10\n#define KBC_Y6 11\n#define KBC_G8 12\n#define KBC_GY 13\n#define KBC_IY 14\n#define KBC_SY 15\n#define KBC_TY 16\n#define KBC_UY 17\n#define KBC_G7 18\n#define KBC_Z5 19\n#define KBC_Y5 20\n#define KBC_D8 21\n#define KBC_DY 22\n#define KBC_EY 23\n#define KBC_FY 24\n#define KBC_PZ 25\n#define KBC_PY 26\n#define KBC_QZ 27\n#define KBC_QY 28\n#define KBC_RZ 29\n#define KBC_RY 30\n#define KBC_D4 31\n#define KBC_D6 32\n#define KBC_D7 33\n#define KBC_DM 34\n#define KBC_Z4 35\n#define KBC_Y4 36\n#define KBC_A8 37\n#define KBC_MY 38\n#define KBC_AY 39\n#define KBC_BY 40\n#define KBC_CY 41\n#define KBC_MZ 42\n#define KBC_NZ 43\n#define KBC_NY 44\n#define KBC_OZ 45\n#define KBC_OY 46\n#define KBC_A3 47\n#define KBC_A2 48\n#define KBC_A1 49\n#define KBC_D3 50\n#define KBC_D2 51\n#define KBC_D1 52\n#define KBC_G3 53\n#define KBC_G2 54\n#define KBC_G1 55\n#define KBC_J3 56\n#define KBC_J2 57\n#define KBC_J1 58\n#define KBC_Y1 59\n#define KBC_AAA 60\n#define KBC_DAA 61\n#define KBC_GAA 62\n#define KBC_JAA 63\n#define KBC_AA4 64\n#define KBC_AA5 65\n#define KBC_AA6 66\n#define KBC_AA7 67\n#define KBC_A9 68\n#define KBC_D9 69\n#define KBC_G9 70\n#define KBC_J9 71\n#define KBC_A0 72\n#define KBC_D0 73\n#define KBC_G0 74\n#define KBC_J0 75\n*/\n\n\n\n#define USBSerial Serial\n#define BT2Serial Serial1\n\n#define KG_HOSTIF_USB_SERIAL_BAUD 115200\n#define KG_HOSTIF_USB_SERIAL_BAUD 125000\n\n#define KG_INTERFACE_MODE_NONE 0x00\n#define KG_INTERFACE_MODE_OUTGOING_PACKET 0x01\n#define KG_INTERFACE_MODE_INCOMING_PACKET 0x02\n//#define KG_INTERFACE_MODE_OUTGOING_INFO 0x04\n\n\n\n#define KG_INTERRUPT_PIN_ACCEL 36 // PE4 (internal/tiny, right)\n#define KG_INTERRUPT_NUM_ACCEL 4 // Teensy++ interrupt #4\n\n#define KG_INTERRUPT_PIN_GYRO 37 // PE5 (internal/tiny, left)\n#define KG_INTERRUPT_NUM_GYRO 5 // Teensy++ interrupt #5\n\n// FUSION and ACCEL are never both used at the same time\n#define KG_INTERRUPT_PIN_FUSION 36 // PE4 (internal/tiny, right)\n#define KG_INTERRUPT_NUM_FUSION 4 // Teensy++ interrupt #4\n\n#define KG_PIN_BLINK 6 // PD6\n\n#define KG_PIN_BT_RTS 19 // PE7\n#define KG_INTERRUPT_NUM_BT_RTS 7 // Teensy++ interrupt #7\n#define KG_PIN_BT_CTS 18 // PE6\n\n// ITEMS BELOW THIS CANNOT BE USED SIMULTANEOUSLY WITH ALL ABOVE FEATURES\n\n// direct I/O feedback pins (kills 3 thumb sensors and Bluetooth flow control)\n#define KG_PIN_PIEZO 19 // PE7\n#define KG_PIN_VIBRATE 18 // PE6\n#define KG_PIN_RGB_RED 21 // PB1\n#define KG_PIN_RGB_GREEN 22 // PB2\n#define KG_PIN_RGB_BLUE 24 // PB4\n\n// PS/2 clock/data pins for keyboard (kills direct I/O piezo/vibe or Bluetooth flow control)\n#define KG_PIN_KB_CLOCK 19 // PE7\n#define KG_PIN_KB_DATA 18 // PE6\n\n// PS/2 clock/data pins for mouse (kills Bluetooth UART connection)\n#define KG_PIN_MOUSE_CLOCK 3 // PD3\n#define KG_PIN_MOUSE_DATA 2 // PD2\n\n// ======================== END PIN DEFINITIONS ========================\n\n// sensor count and base combination count\n#define KG_TOTAL_SENSORS 37\n#define KG_BASE_COMBINATIONS 60\n#define KG_BASE_COMBINATION_BYTES 8\n\n// NOTE: KG_BASE_COMBINATIONS seems like it would be very high, but there are\n// physical and practical limitations that make this number much smaller\n\n#define KS_DM ((touches[0] & 0x01) != 0)\n#define KS_AY ((touches[0] & 0x02) != 0)\n#define KS_BY ((touches[0] & 0x04) != 0)\n#define KS_CY ((touches[0] & 0x08) != 0)\n#define KS_DY ((touches[0] & 0x10) != 0)\n#define KS_EY ((touches[0] & 0x20) != 0)\n#define KS_FY ((touches[0] & 0x40) != 0)\n#define KS_GY ((touches[0] & 0x80) != 0)\n#define KS_HY ((touches[1] & 0x01) != 0)\n#define KS_IY ((touches[1] & 0x02) != 0)\n#define KS_JY ((touches[1] & 0x04) != 0)\n#define KS_KY ((touches[1] & 0x08) != 0)\n#define KS_LY ((touches[1] & 0x10) != 0)\n#define KS_MY ((touches[1] & 0x20) != 0)\n#define KS_NY ((touches[1] & 0x40) != 0)\n#define KS_OY ((touches[1] & 0x80) != 0)\n#define KS_PY ((touches[2] & 0x01) != 0)\n#define KS_QY ((touches[2] & 0x02) != 0)\n#define KS_RY ((touches[2] & 0x04) != 0)\n#define KS_SY ((touches[2] & 0x08) != 0)\n#define KS_TY ((touches[2] & 0x10) != 0)\n#define KS_UY ((touches[2] & 0x20) != 0)\n#define KS_VY ((touches[2] & 0x40) != 0)\n#define KS_WY ((touches[2] & 0x80) != 0)\n#define KS_XY ((touches[3] & 0x01) != 0)\n#define KS_MZ ((touches[3] & 0x02) != 0)\n#define KS_NZ ((touches[3] & 0x04) != 0)\n#define KS_OZ ((touches[3] & 0x08) != 0)\n#define KS_PZ ((touches[3] & 0x10) != 0)\n#define KS_QZ ((touches[3] & 0x20) != 0)\n#define KS_RZ ((touches[3] & 0x40) != 0)\n#define KS_A1 ((touches[3] & 0x80) != 0)\n#define KS_D1 ((touches[4] & 0x01) != 0)\n#define KS_G1 ((touches[4] & 0x02) != 0)\n#define KS_J1 ((touches[4] & 0x04) != 0)\n#define KS_Y1 ((touches[4] & 0x08) != 0)\n#define KS_A2 ((touches[4] & 0x10) != 0)\n#define KS_D2 ((touches[4] & 0x20) != 0)\n#define KS_G2 ((touches[4] & 0x40) != 0)\n#define KS_J2 ((touches[4] & 0x80) != 0)\n#define KS_A3 ((touches[5] & 0x01) != 0)\n#define KS_D3 ((touches[5] & 0x02) != 0)\n#define KS_G3 ((touches[5] & 0x04) != 0)\n#define KS_J3 ((touches[5] & 0x08) != 0)\n#define KS_D4 ((touches[5] & 0x10) != 0)\n#define KS_Y4 ((touches[5] & 0x20) != 0)\n#define KS_Z4 ((touches[5] & 0x40) != 0)\n#define KS_Y5 ((touches[5] & 0x80) != 0)\n#define KS_Z5 ((touches[6] & 0x01) != 0)\n#define KS_D6 ((touches[6] & 0x02) != 0)\n#define KS_Y6 ((touches[6] & 0x04) != 0)\n#define KS_Z6 ((touches[6] & 0x08) != 0)\n#define KS_D7 ((touches[6] & 0x10) != 0)\n#define KS_G7 ((touches[6] & 0x20) != 0)\n#define KS_Y7 ((touches[6] & 0x40) != 0)\n#define KS_Z7 ((touches[6] & 0x80) != 0)\n#define KS_A8 ((touches[7] & 0x01) != 0)\n#define KS_D8 ((touches[7] & 0x02) != 0)\n#define KS_G8 ((touches[7] & 0x04) != 0)\n#define KS_J8 ((touches[7] & 0x08) != 0)\n\n#define KS_ADY (KS_AY && KS_DY)\n#define KS_AJY (KS_AY && KS_JY)\n#define KS_DGY (KS_DY && KS_GY)\n#define KS_GJY (KS_GY && KS_JY)\n\n#define CLR(x, y) (x &= (~(1 << y)))\n#define SET(x, y) (x |= (1 << y))\n#define _BV(bit) (1 << (bit))\nuint8_t _pina, _pinb, _pinc, _pind, _pine, _pinf;\n\nbool interfaceUSBSerialReady = false;\nuint8_t interfaceUSBSerialMode = 0;\nbool interfaceUSBRawHIDReady = false;\nuint8_t interfaceUSBRawHIDMode = 0;\nbool interfaceUSBHIDReady = false;\nuint8_t interfaceUSBHIDMode = 0;\n\nISR(TIMER1_COMPA_vect) {\n keyglove100Hz = 1;\n}\n\nvoid setup_board_teensypp2() {\n // setup internal 100Hz \"tick\" interrupt\n // thanks to http://www.arduino.cc/cgi-bin/yabb2/YaBB.pl?num=1212098919 (and 'bens')\n // also, lots of timer info here and here:\n // http://www.avrfreaks.net/index.php?name=PNphpBB2&file=viewtopic&t=50106\n // http://www.avrbeginners.net/architecture/timers/timers.html\n // also, section 15.10 of the AT90USB128x datasheet:\n // http://www.atmel.com/Images/doc7593.pdf\n \n // set up Timer1\n // WGM13 = 0 \\\n // WGM12 = 1 | --> CTC mode, TOP=OCR1A\n // WGM11 = 0 |\n // WGM10 = 0 /\n // CS12 = 1 \\\n // CS11 = 0 | --> clk/256 prescaler\n // CS10 = 0 /\n\n TCCR1A = 0x00; // TCCR1A: COM1A1=0, COM1A0=0, COM1B1=0, COM1B0=0, COM1C1=0, COM1C0=0, WGM11=0, WGM10=0\n TCCR1B = 0x0A; // TCCR1B: ICNC1=0, ICES1=0, -, WGM13=1, WGM12=1, CS12=1, CS11=0, CS10=0\n OCR1A = 0x2710; // 10ms interval @ 8MHz\n //OCR1A = 0x0500; // speed up ALL THE THINGS!!\n TIMSK1 |= (1 << OCIE1A); // enable TIMER1 output compare match interrupt\n\n // setup touch sensors and make sure we enable internal pullup resistors\n DDRA &= 0x00;\n PORTA |= 0xFF; // 0,1,2,3,4,5,6,7\n DDRB &= 0x00;\n PORTB |= 0xFF; // 0,1,2,3,4,5,6,7\n DDRC &= 0x00;\n PORTC |= 0xFF; // 0,1,2,3,4,5,6,7\n DDRD &= 0x4F;\n PORTD |= 0xB0; // 4,5,7\n DDRE &= 0xFC;\n PORTE |= 0x03; // 0,1\n DDRF &= 0x00;\n PORTF |= 0xFF; // 0,1,2,3,4,5,6,7\n\n /*\n Pin Change interrupts for thumb points:\n - Y, 26, PB6\n - Z, 25, PB5\n - a*, 24, PB4\n - 8, 23, PB3\n - 9, 22, PB2\n - 0, 21, PB1\n */\n\n //PCICR = 0x01; // PCIE0=1, pin change interrupt 0 enabled\n //PCMSK0 = 0x7E; // enable pin change interupts on PB1 - PB6\n\n #if KG_HOSTIF & KG_HOSTIF_USB_SERIAL\n // start USB serial interface\n USBSerial.begin(KG_HOSTIF_USB_SERIAL_BAUD);\n interfaceUSBSerialReady = true;\n interfaceUSBSerialMode = KG_INTERFACE_MODE_OUTGOING_PACKET | KG_INTERFACE_MODE_INCOMING_PACKET;\n #endif\n\n #if KG_HOSTIF & KG_HOSTIF_USB_RAWHID\n interfaceUSBRawHIDReady = true;\n interfaceUSBRawHIDMode = KG_INTERFACE_MODE_OUTGOING_PACKET | KG_INTERFACE_MODE_INCOMING_PACKET;\n #endif\n\n #if KG_HOSTIF & (KG_HOSTIF_BT2_SERIAL | KG_HOSTIF_BT2_RAWHID | KG_HOSTIF_BT2_HID | KG_HOSTIF_BT2_IAP)\n // start BT2 serial interface if any BT2 interfaces are used\n BT2Serial.begin(KG_HOSTIF_BT2_SERIAL_BAUD);\n #endif\n}\n\nvoid update_board_touch(uint8_t *touches) {\n // check on M combinations (PF2)\n SET(DDRF, 2); // set to OUTPUT\n CLR(PORTF, 2); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pinf = PINF;\n CLR(DDRF, 2); // set to INPUT\n SET(PORTF, 2); // pull HIGH\n if (!(_pinf & (1 << 6))) touches[0] |= 0x01; // D (PF6)\n// { KSP_D, KSP_M /* 34 DM */ },\n\n // check on Y combinations (PB6)\n SET(DDRB, 6); // set to OUTPUT\n CLR(PORTB, 6); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pina = PINA; _pinb = PINB; _pinc = PINC; _pind = PIND; _pine = PINE; _pinf = PINF;\n CLR(DDRB, 6); // set to INPUT\n SET(PORTB, 6); // pull HIGH\n if (!(_pinb & (1 << 0))) touches[0] |= 0x02; // A (PB0)\n if (!(_pinf & (1 << 0))) touches[0] |= 0x04; // B (PF0)\n if (!(_pinf & (1 << 1))) touches[0] |= 0x08; // C (PF1)\n if (!(_pinf & (1 << 6))) touches[0] |= 0x10; // D (PF6)\n if (!(_pinf & (1 << 7))) touches[0] |= 0x20; // E (PF7)\n #ifdef KEYGLOVE_KIT_BUG_PORTA_REVERSED\n // d'oh! Teensy++ part in Eagle PA0-7 pins were backwards\n // PA0 = PA4, PA1 = PA5, PA2 = PA6, PA3 = PA7, PA4 = PA0, PA5 = PA1, PA6 = PA2, PA7 = PA3\n if (!(_pina & (1 << 7))) touches[0] |= 0x40; // F (PA7 !PA3)\n if (!(_pina & (1 << 1))) touches[0] |= 0x80; // G (PA1 !PA5)\n if (!(_pina & (1 << 2))) touches[1] |= 0x01; // H (PA2 !PA6)\n if (!(_pina & (1 << 3))) touches[1] |= 0x02; // I (PA3 !PA7)\n #else\n if (!(_pina & (1 << 3))) touches[0] |= 0x40; // F (PA3)\n if (!(_pina & (1 << 5))) touches[0] |= 0x80; // G (PA5)\n if (!(_pina & (1 << 6))) touches[1] |= 0x01; // H (PA6)\n if (!(_pina & (1 << 7))) touches[1] |= 0x02; // I (PA7)\n #endif\n if (!(_pinc & (1 << 3))) touches[1] |= 0x04; // J (PC3)\n if (!(_pinc & (1 << 2))) touches[1] |= 0x08; // K (PC2)\n if (!(_pinc & (1 << 1))) touches[1] |= 0x10; // L (PC1)\n if (!(_pinf & (1 << 2))) touches[1] |= 0x20; // M (PF2)\n if (!(_pinf & (1 << 3))) touches[1] |= 0x40; // N (PF3)\n if (!(_pinf & (1 << 4))) touches[1] |= 0x80; // O (PF4)\n #ifdef KEYGLOVE_KIT_BUG_PORTA_REVERSED\n // d'oh! Teensy++ part in Eagle PA0-7 pins were backwards\n // PA0 = PA4, PA1 = PA5, PA2 = PA6, PA3 = PA7, PA4 = PA0, PA5 = PA1, PA6 = PA2, PA7 = PA3\n if (!(_pina & (1 << 6))) touches[2] |= 0x01; // P (PA6 !PA2)\n if (!(_pina & (1 << 5))) touches[2] |= 0x02; // Q (PA5 !PA1)\n if (!(_pina & (1 << 4))) touches[2] |= 0x04; // R (PA4 !PA0)\n #else\n if (!(_pina & (1 << 2))) touches[2] |= 0x01; // P (PA2)\n if (!(_pina & (1 << 1))) touches[2] |= 0x02; // Q (PA1)\n if (!(_pina & (1 << 0))) touches[2] |= 0x04; // R (PA0)\n #endif\n if (!(_pinc & (1 << 7))) touches[2] |= 0x08; // S (PC7)\n if (!(_pinc & (1 << 6))) touches[2] |= 0x10; // T (PC6)\n if (!(_pinc & (1 << 5))) touches[2] |= 0x20; // U (PC5)\n if (!(_pinc & (1 << 0))) touches[2] |= 0x40; // V (PC0)\n if (!(_pine & (1 << 1))) touches[2] |= 0x80; // W (PE1)\n if (!(_pine & (1 << 0))) touches[3] |= 0x01; // X (PE0)\n// { KSP_A, KSP_Y /* 39 AY */ },\n// { KSP_B, KSP_Y /* 40 BY */ },\n// { KSP_C, KSP_Y /* 41 CY */ },\n// { KSP_D, KSP_Y /* 22 DY */ },\n// { KSP_E, KSP_Y /* 23 EY */ },\n// { KSP_F, KSP_Y /* 24 FY */ },\n// { KSP_G, KSP_Y /* 13 GY */ },\n// { KSP_H, KSP_Y /* 6 HY */ },\n// { KSP_I, KSP_Y /* 14 IY */ },\n// { KSP_J, KSP_Y /* 3 JY */ },\n// { KSP_K, KSP_Y /* 4 KY */ },\n// { KSP_L, KSP_Y /* 5 LY */ },\n// { KSP_M, KSP_Y /* 38 MY */ },\n// { KSP_N, KSP_Y /* 44 NY */ },\n// { KSP_O, KSP_Y /* 46 OY */ },\n// { KSP_P, KSP_Y /* 26 PY */ },\n// { KSP_Q, KSP_Y /* 28 QY */ },\n// { KSP_R, KSP_Y /* 30 RY */ },\n// { KSP_S, KSP_Y /* 15 SY */ },\n// { KSP_T, KSP_Y /* 16 TY */ },\n// { KSP_U, KSP_Y /* 17 UY */ },\n// { KSP_V, KSP_Y /* 7 VY */ },\n// { KSP_W, KSP_Y /* 8 WY */ },\n// { KSP_X, KSP_Y /* 9 XY */ },\n\n // check on Z combinations (PB5)\n SET(DDRB, 5); // set to OUTPUT\n CLR(PORTB, 5); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pina = PINA; _pinf = PINF;\n CLR(DDRB, 5); // set to INPUT\n SET(PORTB, 5); // pull HIGH\n if (!(_pinf & (1 << 2))) touches[3] |= 0x02; // M (PF2)\n if (!(_pinf & (1 << 3))) touches[3] |= 0x04; // N (PF3)\n if (!(_pinf & (1 << 4))) touches[3] |= 0x08; // O (PF4)\n #ifdef KEYGLOVE_KIT_BUG_PORTA_REVERSED\n // d'oh! Teensy++ part in Eagle PA0-7 pins were backwards\n // PA0 = PA4, PA1 = PA5, PA2 = PA6, PA3 = PA7, PA4 = PA0, PA5 = PA1, PA6 = PA2, PA7 = PA3\n if (!(_pina & (1 << 6))) touches[3] |= 0x10; // P (PA6 !PA2)\n if (!(_pina & (1 << 5))) touches[3] |= 0x20; // Q (PA5 !PA1)\n if (!(_pina & (1 << 4))) touches[3] |= 0x40; // R (PA4 !PA0)\n #else\n if (!(_pina & (1 << 2))) touches[3] |= 0x10; // P (PA2)\n if (!(_pina & (1 << 1))) touches[3] |= 0x20; // Q (PA1)\n if (!(_pina & (1 << 0))) touches[3] |= 0x40; // R (PA0)\n #endif\n// { KSP_M, KSP_Z /* 42 MZ */ },\n// { KSP_N, KSP_Z /* 43 NZ */ },\n// { KSP_O, KSP_Z /* 45 OZ */ },\n// { KSP_P, KSP_Z /* 25 PZ */ },\n// { KSP_Q, KSP_Z /* 27 QZ */ },\n// { KSP_R, KSP_Z /* 29 RZ */ },\n\n // check on 1 combinations (PD5)\n SET(DDRD, 5); // set to OUTPUT\n CLR(PORTD, 5); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pina = PINA; _pinb = PINB; _pinc = PINC; _pinf = PINF;\n CLR(DDRD, 5); // set to INPUT\n SET(PORTD, 5); // pull HIGH\n if (!(_pinb & (1 << 0))) touches[3] |= 0x80; // A (PB0)\n if (!(_pinf & (1 << 6))) touches[4] |= 0x01; // D (PF6)\n #ifdef KEYGLOVE_KIT_BUG_PORTA_REVERSED\n // d'oh! Teensy++ part in Eagle PA0-7 pins were backwards\n // PA0 = PA4, PA1 = PA5, PA2 = PA6, PA3 = PA7, PA4 = PA0, PA5 = PA1, PA6 = PA2, PA7 = PA3\n if (!(_pina & (1 << 1))) touches[4] |= 0x02; // G (PA1 !PA5)\n #else\n if (!(_pina & (1 << 5))) touches[4] |= 0x02; // G (PA5)\n #endif\n if (!(_pinc & (1 << 3))) touches[4] |= 0x04; // J (PC3)\n if (!(_pinb & (1 << 6))) touches[4] |= 0x08; // Y (PB6)\n// { KSP_A, KSP_1 /* 49 A1 */ },\n// { KSP_D, KSP_1 /* 52 D1 */ },\n// { KSP_G, KSP_1 /* 55 G1 */ },\n// { KSP_J, KSP_1 /* 58 J1 */ },\n// { KSP_Y, KSP_1 /* 59 Y1 */ }\n\n // check on 2 combinations (PD4)\n SET(DDRD, 4); // set to OUTPUT\n CLR(PORTD, 4); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pina = PINA; _pinb = PINB; _pinc = PINC; _pinf = PINF;\n CLR(DDRD, 4); // set to INPUT\n SET(PORTD, 4); // pull HIGH\n if (!(_pinb & (1 << 0))) touches[4] |= 0x10; // A (PB0)\n if (!(_pinf & (1 << 6))) touches[4] |= 0x20; // D (PF6)\n #ifdef KEYGLOVE_KIT_BUG_PORTA_REVERSED\n // d'oh! Teensy++ part in Eagle PA0-7 pins were backwards\n // PA0 = PA4, PA1 = PA5, PA2 = PA6, PA3 = PA7, PA4 = PA0, PA5 = PA1, PA6 = PA2, PA7 = PA3\n if (!(_pina & (1 << 1))) touches[4] |= 0x40; // G (PA1 !PA5)\n #else\n if (!(_pina & (1 << 5))) touches[4] |= 0x40; // G (PA5)\n #endif\n if (!(_pinc & (1 << 3))) touches[4] |= 0x80; // J (PC3)\n// { KSP_A, KSP_2 /* 48 A2 */ },\n// { KSP_D, KSP_2 /* 51 D2 */ },\n// { KSP_G, KSP_2 /* 57 G2 */ },\n// { KSP_J, KSP_2 /* 54 J2 */ },\n\n // check on 3 combinations (PB7)\n SET(DDRB, 7); // set to OUTPUT\n CLR(PORTB, 7); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pina = PINA; _pinb = PINB; _pinc = PINC; _pinf = PINF;\n CLR(DDRB, 7); // set to INPUT\n SET(PORTB, 7); // pull HIGH\n if (!(_pinb & (1 << 0))) touches[5] |= 0x01; // A (PB0)\n if (!(_pinf & (1 << 6))) touches[5] |= 0x02; // D (PF6)\n #ifdef KEYGLOVE_KIT_BUG_PORTA_REVERSED\n // d'oh! Teensy++ part in Eagle PA0-7 pins were backwards\n // PA0 = PA4, PA1 = PA5, PA2 = PA6, PA3 = PA7, PA4 = PA0, PA5 = PA1, PA6 = PA2, PA7 = PA3\n if (!(_pina & (1 << 1))) touches[5] |= 0x04; // G (PA1 !PA5)\n #else\n if (!(_pina & (1 << 5))) touches[5] |= 0x04; // G (PA5)\n #endif\n if (!(_pinc & (1 << 3))) touches[5] |= 0x08; // J (PC3)\n// { KSP_A, KSP_3 /* 47 A3 */ },\n// { KSP_D, KSP_3 /* 50 D3 */ },\n// { KSP_G, KSP_3 /* 56 G3 */ },\n// { KSP_J, KSP_3 /* 53 J3 */ },\n\n // check on 4 combinations (PF5)\n SET(DDRF, 5); // set to OUTPUT\n CLR(PORTF, 5); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pinb = PINB; _pinf = PINF;\n CLR(DDRF, 5); // set to INPUT\n SET(PORTF, 5); // pull HIGH\n if (!(_pinf & (1 << 6))) touches[5] |= 0x10; // D (PF6)\n if (!(_pinb & (1 << 6))) touches[5] |= 0x20; // Y (PB6)\n if (!(_pinb & (1 << 5))) touches[5] |= 0x40; // Z (PB5)\n// { KSP_D, KSP_4 /* 31 D4 */ },\n// { KSP_Y, KSP_4 /* 36 Y4 */ },\n// { KSP_Z, KSP_4 /* 35 Z4 */ },\n\n // check on 5 combinations (PA4)\n #ifdef KEYGLOVE_KIT_BUG_PORTA_REVERSED\n // d'oh! Teensy++ part in Eagle PA0-7 pins were backwards\n // PA0 = PA4, PA1 = PA5, PA2 = PA6, PA3 = PA7, PA4 = PA0, PA5 = PA1, PA6 = PA2, PA7 = PA3\n SET(DDRA, 0); // set to OUTPUT\n CLR(PORTA, 0); // set to LOW\n #else\n SET(DDRA, 4); // set to OUTPUT\n CLR(PORTA, 4); // set to LOW\n #endif\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pinb = PINB;\n #ifdef KEYGLOVE_KIT_BUG_PORTA_REVERSED\n // d'oh! Teensy++ part in Eagle PA0-7 pins were backwards\n // PA0 = PA4, PA1 = PA5, PA2 = PA6, PA3 = PA7, PA4 = PA0, PA5 = PA1, PA6 = PA2, PA7 = PA3\n CLR(DDRA, 0); // set to OUTPUT\n SET(PORTA, 0); // set to LOW\n #else\n CLR(DDRA, 4); // set to INPUT\n SET(PORTA, 4); // pull HIGH\n #endif\n if (!(_pinb & (1 << 6))) touches[5] |= 0x80; // Y (PB6)\n if (!(_pinb & (1 << 5))) touches[6] |= 0x01; // Z (PB5)\n// { KSP_Y, KSP_5 /* 20 Y5 */ },\n// { KSP_Z, KSP_5 /* 19 Z5 */ },\n\n // check on 6 combinations (PC4)\n SET(DDRC, 4); // set to OUTPUT\n CLR(PORTC, 4); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pinb = PINB; _pinf = PINF;\n CLR(DDRC, 4); // set to INPUT\n SET(PORTC, 4); // pull HIGH\n if (!(_pinf & (1 << 6))) touches[6] |= 0x02; // D (PF6)\n if (!(_pinb & (1 << 6))) touches[6] |= 0x04; // Y (PB6)\n if (!(_pinb & (1 << 5))) touches[6] |= 0x08; // Z (PB5)\n// { KSP_D, KSP_6 /* 32 D6 */ },\n// { KSP_Y, KSP_6 /* 11 Y6 */ },\n// { KSP_Z, KSP_6 /* 10 Z6 */ },\n\n // check on 7 combinations (PD7)\n SET(DDRD, 7); // set to OUTPUT\n CLR(PORTD, 7); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pina = PINA; _pinb = PINB; _pinf = PINF;\n CLR(DDRD, 7); // set to INPUT\n SET(PORTD, 7); // pull HIGH\n if (!(_pinf & (1 << 6))) touches[6] |= 0x10; // D (PF6)\n #ifdef KEYGLOVE_KIT_BUG_PORTA_REVERSED\n // d'oh! Teensy++ part in Eagle PA0-7 pins were backwards\n // PA0 = PA4, PA1 = PA5, PA2 = PA6, PA3 = PA7, PA4 = PA0, PA5 = PA1, PA6 = PA2, PA7 = PA3\n if (!(_pina & (1 << 1))) touches[6] |= 0x20; // G (PA1 !PA5)\n #else\n if (!(_pina & (1 << 5))) touches[6] |= 0x20; // G (PA5)\n #endif\n if (!(_pinb & (1 << 6))) touches[6] |= 0x40; // Y (PB6)\n if (!(_pinb & (1 << 5))) touches[6] |= 0x80; // Z (PB5)\n// { KSP_D, KSP_7 /* 33 D7 */ },\n// { KSP_G, KSP_7 /* 18 G7 */ },\n// { KSP_Y, KSP_7 /* 1 Y7 */ },\n// { KSP_Z, KSP_7 /* 0 Z7 */ },\n\n // check on 8 combinations (PB3)\n SET(DDRB, 3); // set to OUTPUT\n CLR(PORTB, 3); // set to LOW\n delayMicroseconds(3); // give the poor receiving pins a chance to change state\n _pina = PINA; _pinb = PINB; _pinc = PINC; _pinf = PINF;\n CLR(DDRB, 3); // set to INPUT\n SET(PORTB, 3); // pull HIGH\n if (!(_pinb & (1 << 0))) touches[7] |= 0x01; // A (PB0)\n if (!(_pinf & (1 << 6))) touches[7] |= 0x02; // D (PF6)\n #ifdef KEYGLOVE_KIT_BUG_PORTA_REVERSED\n // d'oh! Teensy++ part in Eagle PA0-7 pins were backwards\n // PA0 = PA4, PA1 = PA5, PA2 = PA6, PA3 = PA7, PA4 = PA0, PA5 = PA1, PA6 = PA2, PA7 = PA3\n if (!(_pina & (1 << 1))) touches[7] |= 0x04; // G (PA1 !PA5)\n #else\n if (!(_pina & (1 << 5))) touches[7] |= 0x04; // G (PA5)\n #endif\n if (!(_pinc & (1 << 3))) touches[7] |= 0x08; // J (PC3)\n// { KSP_A, KSP_8 /* 37 A8 */ },\n// { KSP_D, KSP_8 /* 21 D8 */ },\n// { KSP_G, KSP_8 /* 12 G8 */ },\n// { KSP_J, KSP_8 /* 2 J8 */ },\n}\n\n#endif // _SUPPORT_BOARD_TEENSYPP2_H_"
},
{
"alpha_fraction": 0.5954341888427734,
"alphanum_fraction": 0.6114723086357117,
"avg_line_length": 28.330509185791016,
"blob_id": "d22307e951212f37cb56ac3ad9c590d296f4e156",
"content_id": "62ae804f735fc5d7cc779dfddfc4f74696432694",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 6921,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 236,
"path": "/keyglove/keyglove.ino",
"repo_name": "Tahamosaad/keyglove",
"src_encoding": "UTF-8",
"text": "// Keyglove Controller source code - Main setup/loop controller\n// 9/9/2013 by Jeff Rowberg <[email protected]>\n\n/* ============================================\nController code is placed under the MIT license\nCopyright (c) 2013 Jeff Rowberg\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n===============================================\n*/\n\n\n\n/* ===============================================\n * LIBRARY INCLUDES FOR PROPER BUILD PROCESS\n=============================================== */\n\n#include <Wire.h>\n#include <I2Cdev.h>\n#include <MPU6050.h>\n#include <iWRAP.h>\n\n\n\n/* ===============================================\n * UNIVERSAL CONTROLLER DECLARATIONS\n=============================================== */\n\nvolatile uint8_t keyglove100Hz = 0;\nuint8_t keygloveTick = 0; // increments every ~10ms (100hz), loops at 100\nuint32_t keygloveTock = 0; // increments every 100 ticks, loops at 2^32 (~4 billion)\nuint32_t keygloveTickTime = 0, keygloveTickTime0 = 0;\n\n\n\n/* ===============================================\n * SUPPORT INCLUDES\n =============================================== */\n\n#include \"version.h\"\n#include \"hardware.h\"\n#include \"config.h\"\n\n// BOARD\n#if KG_BOARD == KG_BOARD_TEENSYPP2\n #include \"support_board_teensypp2.h\"\n#elif #KG_BOARD == KG_BOARD_ARDUINO_DUE\n #include \"support_board_arduino_due.h\"\n#else\n #error Unsupported platform selected in KG_BOARD\n#endif\n\n// COMMUNICATION PROTOCOL\n#include \"support_protocol.h\"\n\n// CORE TOUCH SENSOR LOGIC\n#include \"support_touch.h\"\n\n// FEEDBACK\n#if (KG_FEEDBACK & KG_FEEDBACK_BLINK)\n #include \"support_feedback_blink.h\"\n#endif\n#if (KG_FEEDBACK & KG_FEEDBACK_PIEZO)\n #include \"support_feedback_piezo.h\"\n#endif\n#if (KG_FEEDBACK & KG_FEEDBACK_VIBRATE_HAND)\n #include \"support_feedback_vibrate_hand.h\"\n#endif\n#if (KG_FEEDBACK & KG_FEEDBACK_RGB)\n #include \"support_feedback_rgb.h\"\n#endif\n\n// MOTION\n#include \"support_helper_3dmath.h\"\n#if (KG_MOTION & KG_MOTION_MPU6050_HAND)\n #include \"support_motion_mpu6050_hand.h\"\n#endif\n\n// HOST INTERFACE\n#if (KG_HOSTIF & KG_HOSTIF_USB)\n #include \"support_hostif_usb.h\"\n#endif\n#if (KG_HOSTIF & KG_HOSTIF_BT2)\n #include \"support_hostif_bt2.h\"\n#endif\n\n// HUMAN INPUT DEVICE\n#if (KG_HID & KG_HID_KEYBOARD)\n #include \"support_hid_keyboard.h\"\n#endif\n#if (KG_HID & KG_HID_MOUSE)\n #include \"support_hid_mouse.h\"\n#endif\n\n// PROTOCOL DEFINITIONS (DECLARATIONS IN \"support_protocol.h\")\n#include \"support_protocol_system.h\"\n#include \"support_protocol_touch.h\"\n#include \"support_protocol_feedback.h\"\n#include \"support_protocol_motion.h\"\n//#include \"support_protocol_flex.h\"\n//#include \"support_protocol_pressure.h\"\n//#include \"support_protocol_touchset.h\"\n\n// USE THIS FILE TO MODIFY/CANCEL BUILT-IN PROTOCOL PACKETS AND ADD YOUR OWN FOR SPECIAL FUNCTIONALITY\n#include \"custom_protocol.h\"\n\n/* ===============================================\n * MAIN SETUP ROUTINE\n=============================================== */\n\nvoid setup() {\n // reset runtime counters\n keygloveTick = 0;\n keygloveTock = 0;\n\n // BOARD\n #if KG_BOARD == KG_BOARD_TEENSYPP2\n setup_board_teensypp2();\n #elif #KG_BOARD == KG_BOARD_ARDUINO_DUE\n setup_board_arduino_due();\n #endif\n\n // COMMUNICATION PROTOCOL\n setup_protocol();\n\n // send system_boot event\n send_keyglove_packet(KG_PACKET_TYPE_EVENT, 0, KG_PACKET_CLASS_SYSTEM, KG_PACKET_ID_EVT_SYSTEM_BOOT, 0);\n\n // CORE TOUCH SENSOR LOGIC\n //setup_touch();\n\n // FEEDBACK\n #if (KG_FEEDBACK & KG_FEEDBACK_BLINK)\n setup_feedback_blink();\n #endif\n #if (KG_FEEDBACK & KG_FEEDBACK_PIEZO)\n setup_feedback_piezo();\n #endif\n #if (KG_FEEDBACK & KG_FEEDBACK_VIBRATE_HAND)\n setup_feedback_vibrate_hand();\n #endif\n #if (KG_FEEDBACK & KG_FEEDBACK_RGB)\n setup_feedback_rgb();\n #endif\n\n // MOTION\n #if (KG_MOTION & KG_MOTION_MPU6050_HAND)\n setup_motion_mpu6050_hand();\n #endif\n\n // HOST INTERFACE\n #if (KG_HOSTIF & KG_HOSTIF_USB)\n setup_hostif_usb();\n #endif\n #if (KG_HOSTIF & KG_HOSTIF_BT2)\n setup_hostif_bt2();\n #endif\n\n // HUMAN INPUT DEVICE\n #if (KG_HID & KG_HID_KEYBOARD)\n setup_hid_keyboard();\n #endif\n #if (KG_HID & KG_HID_MOUSE)\n setup_hid_mouse();\n #endif\n\n // send system_boot event\n send_keyglove_packet(KG_PACKET_TYPE_EVENT, 0, KG_PACKET_CLASS_SYSTEM, KG_PACKET_ID_EVT_SYSTEM_READY, 0);\n}\n\n\n\n/* ===============================================\n * MAIN LOOP ROUTINE\n=============================================== */\n\nvoid loop() {\n // check for incoming protocol data\n check_incoming_protocol_data();\n\n // check for 100Hz tick (i.e. every 10ms)\n if (keyglove100Hz) {\n keyglove100Hz = 0;\n //keygloveTickTime += micros() - keygloveTickTime0;\n //keygloveTickTime0 = micros();\n \n // update touch status\n update_touch();\n\n // update feedback settings\n #if (KG_FEEDBACK & KG_FEEDBACK_BLINK)\n update_feedback_blink();\n #endif // KG_FEEDBACK_BLINK\n #if (KG_FEEDBACK & KG_FEEDBACK_RGB)\n update_feedback_rgb();\n #endif // KG_FEEDBACK_RGB\n #if (KG_FEEDBACK & KG_FEEDBACK_PIEZO)\n update_feedback_piezo();\n #endif // KG_FEEDBACK_PIEZO\n #if (KG_FEEDBACK & KG_FEEDBACK_VIBRATE)\n update_feedback_vibrate();\n #endif // KG_FEEDBACK_VIBRATE\n\n // check for 100 ticks and reset counter (should be every 1 second)\n keygloveTick++;\n if (keygloveTick == 100) {\n //keygloveTickTime = 0;\n keygloveTick = 0;\n keygloveTock++;\n }\n }\n \n #if (KG_MOTION & KG_MOTION_MPU6050_HAND)\n // check for available motion data from MPU-6050 on back of hand\n if (mpuHandInterrupt) {\n mpuHandInterrupt = false; // clear the flag so we don't read again until the next interrupt\n update_motion_mpu6050_hand();\n }\n #endif\n}"
}
] | 12 |
RedaMansy/ProjectV_3 | https://github.com/RedaMansy/ProjectV_3 | 93bace778c9afa1607fa3e27f20ea5698c414649 | d93e4e8d3ffa625d62a7dac85b2120e3cbe824b3 | cd2c3f68569b2ccdb8f86bcc51be0e757d2beb94 | refs/heads/master | 2020-04-02T06:14:05.995135 | 2018-10-23T12:14:01 | 2018-10-23T12:14:01 | 154,137,259 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6658291220664978,
"alphanum_fraction": 0.6859296560287476,
"avg_line_length": 17.090909957885742,
"blob_id": "b042494e892cc1907664b9fb0038a071da8e4bac",
"content_id": "74807a02561ec53ff17e5bf58721241a15559a23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 398,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 22,
"path": "/player_project.py",
"repo_name": "RedaMansy/ProjectV_3",
"src_encoding": "UTF-8",
"text": "from item_project import *\nfrom map_project import rooms\n\ninventory = []\n#For example: item_id\n\n# Start game at the reception\ncurrent_room = rooms[\"Reception\"]\n\n#====================================\n# Player status\nenergy_min = 0\n#Minimum energyof player\n\nenergy_max = 100\n#Maximum energy of player\n\nproject_process = 0\n#Original project process\n\nproject_process_max = 100\n#Maximum project process\n"
},
{
"alpha_fraction": 0.637699544429779,
"alphanum_fraction": 0.6384595632553101,
"avg_line_length": 27.19285774230957,
"blob_id": "4c2417940a43800164df0bc8c113ed68e0782e82",
"content_id": "b84df1827f985e6aecd726f694396789645b0c97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3948,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 140,
"path": "/map_project.py",
"repo_name": "RedaMansy/ProjectV_3",
"src_encoding": "UTF-8",
"text": "from item_project import *\n\nroom_reception = {\n \"name\": \"The Reception\",\n\n \"description\":\n \"\"\"You have just entered the Reception. The interior is very modern, with sleek furnishings and glass walls. \n To your North, you can see the cafeteria. \n To your west, you can see the cinema. \n To your east, you can see the shopping centre. In front of you, sits a short, moustached receptionist, reading a newspaper.\n \"\"\",\n\n \"exits\": {\"east\": \"The Shopping Centre\", \"west\": \"The Cinema\", \"north\": \"The Cafeteria\"},\n\n \"items\": [item_keycard]\n}\n\nroom_lab = {\n \"name\": \"The Lab\",\n\n \"description\":\n \"\"\"You enter the lab. It isn't too crowded. A few people are around, working and seeing that stresses you \n out becuase you feel that you should be working too. You look around, not knowing who you're looking for.\"\"\",\n\n \"exits\": {\"west\": \"The Library\", \"east\": \"The Cafeteria\", \"south\": \"The Cinema\"},\n\n \"items\": [item_github, item_thebae]\n}\n\nroom_library = {\n \"name\": \"The Library\",\n\n \"description\":\n \"\"\"You are standing in the library, Everyone is working or reading quietly.\n As everyone is focusing on their own work, nobody notice that there is a notepad on the floor.\n The exit is to the east\"\"\",\n\n \"exits\": {\"east\": \"The Lab\"},\n\n \"items\": [item_notes, item_voucher, item_profoak]\n}\n\nroom_cinema = {\n \"name\": \"The Cinema\",\n\n \"description\":\n \"\"\"You wander around the city aimlessly and you find a cinema that you've never noticed before.\n It is oddly empty and you are perplexed as you've been in this area many times in the past and have never noticed it.\n The only movie you find playing is David Fincher's The Social Network.\"\"\",\n\n \"exits\": {\"east\": \"The Reception\", \"north\": \"The Lab\"},\n\n \"items\": [item_zucc]\n}\n\nroom_cafeteria = {\n \"name\": \"The Cafeteria\",\n\n \"description\":\n \"\"\"You walk in the cafeteria and immediately notice the lingering waft of coffee in the air. \n You look at the black board by the cashier and notice that they're serving hotdogs and tiramisu. \"\"\",\n\n \"exits\": {\"west\": \"The Lab\", \"north\": \"Home\", \"east\": \"The Closet\", \"south\": \"The Reception\"},\n\n \"items\": [item_food, item_water, item_bluebear]\n}\n\nroom_shoppingcentre = {\n \"name\": \"The Shopping Centre\",\n\n \"description\":\n \"\"\"INSERT DESCRIPTION\"\"\",\n\n \"exits\": {\"west\": \"The Reception\", \"north\": \"The Closet\"},\n\n \"items\": [item_laptop, item_hideokojima]\n}\n\nroom_home = {\n \"name\": \"Home\",\n\n \"description\":\n \"\"\"INSERT DESCRIPTION\"\"\",\n\n \"exits\": {\"east\": \"The Closet\", \"south\": \"The Cafeteria\"},\n\n \"items\": [item_phone]\n}\n\nroom_bar = {\n \"name\": \"The Closet\",\n\n \"description\":\n \"\"\"You walk into The Closet to the sounds of loud music and people shouting. \n There is a constant flashing of disco lights and on the board you see that all drinks are £1.13\"\"\",\n\n \"exits\": {\"west\": \"The Cafeteria\", \"north\": \"Home\", \"south\": \"The Shopping Centre\"},\n\n \"items\": [item_NICoffee, item_turing]\n}\n\nroom_petshop = {\n \"name\": \"The Petshop\",\n\n \"description\":\n \"\"\"INSERT DESCRIPTION\"\"\",\n\n \"exits\": {\"INSERT EXITS\"},\n\n \"items\": [item_food, item_water, item_bluebear, item_pythonguy]\n}\n\nroom_cofffeeshop = {\n \"name\": \"The Coffee Shop\",\n\n \"description\":\n \"\"\"You walk into the coffee shop and the strong aroma of coffee beans lingers around the room. \n You can hear the constant grinding of beans and in the corner of your eye you notice a man with a grey beard wearing glasses.\n \"\"\",\n\n \"exits\": {\"east\": \"The Shopping Centre\", \"west\": \"The Cinema\", \"north\": \"The Cafeteria\"},\n\n \"items\": [item_keycard]\n}\n\n\n\nrooms = {\n \"The Reception\": room_reception,\n \"The Lab\": room_lab,\n \"The Library\": room_library,\n \"The Cinema\": room_cinema,\n \"The Cafeteria\": room_cafeteria,\n\n \"The Shopping Centre\": room_shoppingcentre,\n \"The Closet\": room_bar,\n \"Home\": room_home,\n \"The Petshop\": room_petshop,\n\n}\n"
}
] | 2 |
adolfoportilla/Full-Stack-Nanodegree | https://github.com/adolfoportilla/Full-Stack-Nanodegree | 264df71c1f514323c45edddd1fcc50638d33e1c8 | 91b91b6a6dc59a1fa8283c8f4d781c31ea0b995f | b6372ee61ee24c813e3a45e03d9522eba59f9dbe | refs/heads/master | 2020-12-02T22:37:34.909120 | 2017-07-07T02:01:09 | 2017-07-07T02:01:09 | 96,158,454 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6607635021209717,
"alphanum_fraction": 0.7056276798248291,
"avg_line_length": 59.52381134033203,
"blob_id": "fdb906a4b87892ca3df2a23dbd9fef12266c542c",
"content_id": "cd4cb4eabe2a97d2422845407f36d464743b0948",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2541,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 42,
"path": "/p1/entertainment_center.py",
"repo_name": "adolfoportilla/Full-Stack-Nanodegree",
"src_encoding": "UTF-8",
"text": "import media\nimport fresh_tomatoes\n\n\n# Helper varibles to keep lines short and make easier manipulation \ntoystoryImage = \"https://lumiere-a.akamaihd.net/v1/images/open-uri20150422-20810-m8zzyx_5670999f.jpeg?region=0,0,300,450\" # NOQA\ntoystoryYoutube = \"https://www.youtube.com/watch?v=KYz2wyBy3kc\"\navatarImage = \"https://images-na.ssl-images-amazon.com/images/M/MV5BMTYwOTEwNjAzMl5BMl5BanBnXkFtZTcwODc5MTUwMw@@._V1_UY1200_CR90,0,630,1200_AL_.jpg\" # NOQA\navatarYoutube = \"https://www.youtube.com/watch?v=5PSNL1qE6VY\"\nintestellarImage = \"https://images-na.ssl-images-amazon.com/images/M/MV5BMjIxNTU4MzY4MF5BMl5BanBnXkFtZTgwMzM4ODI3MjE@._V1_SY1000_CR0,0,640,1000_AL_.jpg\" # NOQA\ninterstellarYoutube = \"https://www.youtube.com/watch?v=sRLc9OlrZZw\"\nschoolImage = \"https://upload.wikimedia.org/wikipedia/en/thumb/1/11/School_of_Rock_Poster.jpg/220px-School_of_Rock_Poster.jpg\" # NOQA\nschoolYoutube = \"https://www.youtube.com/watch?v=3PsUJFEBC74\"\nmidnigthImage = \"https://upload.wikimedia.org/wikipedia/en/thumb/9/9f/Midnight_in_Paris_Poster.jpg/220px-Midnight_in_Paris_Poster.jpg\" # NOQA\nmidnigthYoutube = \"https://www.youtube.com/watch?v=FAfR8omt-CY\"\nhungerImage = \"https://images-na.ssl-images-amazon.com/images/M/MV5BMjA4NDg3NzYxMF5BMl5BanBnXkFtZTcwNTgyNzkyNw@@._V1_UY1200_CR90,0,630,1200_AL_.jpg\" # NOQA\nhungerYoutube = \"https://www.youtube.com/watch?v=mfmrPu43DF8\"\n\n#Movie variables\ntoy_story = media.Movie(\"Toy Story\",\n \"A story of a boy and his toys that comes to life\",\n toystoryImage, toystoryYoutube)\navatar = media.Movie(\"Avatar\", \"A marine on an alien planet\", avatarImage, \n\t\t\t\t\t avatarYoutube )\ninterstellar = media.Movie(\"Interestellar\",\n \"A group of scientist are trying to discover a \"\n \"planet to move the human race to it\",\n intestellarImage, interstellarYoutube)\nschool_of_rock = media.Movie(\"School of Rock\",\n \"A professor that plays songs in a band\",\n schoolImage, schoolYoutube)\nmidnight_in_paris = media.Movie(\"Midnight in Paris\",\n\t\t\t\t\t\t\t\t\"A movie about a screnwriter\",\n\t\t\t\t\t\t\t\tmidnigthImage, midnigthYoutube)\nthe_hunger_games = media.Movie(\"The hunger games\",\n\t\t\t\t\t\t\t \"A story about a girl that gets chosen \"\n\t\t\t\t\t\t\t \"to play a game that depends on her life\",\n\t\t\t\t\t\t\t hungerImage, hungerYoutube)\n\nmovies = [toy_story, avatar, school_of_rock, interstellar, the_hunger_games,\n\t\t midnight_in_paris]\nfresh_tomatoes.open_movies_page(movies)"
},
{
"alpha_fraction": 0.7570093274116516,
"alphanum_fraction": 0.7757009267807007,
"avg_line_length": 20.399999618530273,
"blob_id": "c151945d22c42e4c09484be0f92a36c9bbb64348",
"content_id": "4197ec044f07c1dc22ff7ca0ef54b98a852678a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 107,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 5,
"path": "/README.md",
"repo_name": "adolfoportilla/Full-Stack-Nanodegree",
"src_encoding": "UTF-8",
"text": "# Full-Stack-Nanodegree\n\n## Project 1 Instructions\n\n1. To run project, type python entertainment_center.py\n"
},
{
"alpha_fraction": 0.7170731425285339,
"alphanum_fraction": 0.7170731425285339,
"avg_line_length": 24.625,
"blob_id": "ebcc9f88d7afe40c5e6d9a1a19cc5a2248b57962",
"content_id": "716ce262a3f4c3400f1e6b1ef845b6a13ed257d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 410,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 16,
"path": "/p1/media.py",
"repo_name": "adolfoportilla/Full-Stack-Nanodegree",
"src_encoding": "UTF-8",
"text": "import webbrowser\n\n\n# Movie CLass\nclass Movie():\n \"\"\"Movie class to add movies\"\"\"\n # Class constructor\n\tdef __init__(self, movie_title, movie_storyline, poster_image, trailer):\n\t\tself.title = movie_title\n\t\tself.storyline = movie_storyline\n\t\tself.poster_image_url = poster_image\n\t\tself.trailer_youtube_url = trailer\n\n\t# Helper method\n\tdef show_trailer(self):\n\t\twebbrowser.open(self.trailer_youtube_url)\n"
}
] | 3 |
KeslerPhyz/2018Training | https://github.com/KeslerPhyz/2018Training | 1988a8ac5cb56cd4caae200691cc3874e652246d | 1f65c05f73bb04e7a2fde529149d06d450802cf4 | bd136381f646a0f1e30d4936bb8cca52632731b7 | refs/heads/master | 2020-04-09T03:02:00.476246 | 2018-11-29T03:12:10 | 2018-11-29T03:12:10 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4962686598300934,
"alphanum_fraction": 0.4962686598300934,
"avg_line_length": 13.44444465637207,
"blob_id": "eef4eab76358dd6a05c4749d0ee9056141f8349e",
"content_id": "e079f29c57dc119137ad721e85b76d74227af34e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 268,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 18,
"path": "/robot/robot/robotMap.py",
"repo_name": "KeslerPhyz/2018Training",
"src_encoding": "UTF-8",
"text": "import wpilib\n\n\n\n\n\nclass RobotMap():\n def __init__(self):\n self.canMap = CanMap()\n self.controllerMap = ControllerMap()\n \n \nclass canMap():\n def __init__(self):\n \n \nclass controllerMap():\n def __init__(self):\n "
},
{
"alpha_fraction": 0.595723032951355,
"alphanum_fraction": 0.6048879623413086,
"avg_line_length": 18.65999984741211,
"blob_id": "87049781c98c17660da874e688a73447a36d86d7",
"content_id": "f7bc628f1f15c3614956dc6b31d7b5b62af561da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 982,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 50,
"path": "/robot/robot/robot.py",
"repo_name": "KeslerPhyz/2018Training",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n'''\n This is a demo program showing how to use Mecanum control with the\n RobotDrive class.\n'''\n\nimport wpilib\nimport commandbased\nimport ctre\n\n#main robot\n\nclass MyRobot(commandbased.CommandBasedRobot):\n def robotInit(self):\n self.motors = {}\n self.motors['leftMotor'] = ctre.WPI_TalonSRX(0)\n self.motors['rightMotor'] = ctre.WPI_TalonSRX(1)\n \n def testInit(self):\n print(\"Test Mode\")\n while self.isTest():\n self.motors['leftMotor'].set(-50)\n self.motors['rightMotor'].set(100)\n print(\"Done\")\n \n #Made by Matthew McFarland, the Great Wizard of \n def teleopPeriodic(self):\n self.drive.tankDrive(0)\n \n\n\n\n\n\n\n\n\n#code to run the robot\n\n#import sys \ndef exit(retval):\n pass\n# sys.exit(retval)\n\nif __name__ == '__main__':\n try:\n print(wpilib._impl.main.exit)\n except:\n wpilib._impl.main.exit = exit\n wpilib.run(MyRobot,physics_enabled=True)"
}
] | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.